2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
45 #include <asm/processor.h>
47 #include <asm/uaccess.h>
48 #include <asm/pgtable.h>
50 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
51 #include "coalesced_mmio.h"
54 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
55 #include <linux/pci.h>
56 #include <linux/interrupt.h>
60 MODULE_AUTHOR("Qumranet");
61 MODULE_LICENSE("GPL");
63 DEFINE_SPINLOCK(kvm_lock
);
66 static cpumask_t cpus_hardware_enabled
;
68 struct kmem_cache
*kvm_vcpu_cache
;
69 EXPORT_SYMBOL_GPL(kvm_vcpu_cache
);
71 static __read_mostly
struct preempt_ops kvm_preempt_ops
;
73 struct dentry
*kvm_debugfs_dir
;
75 static long kvm_vcpu_ioctl(struct file
*file
, unsigned int ioctl
,
80 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
81 static struct kvm_assigned_dev_kernel
*kvm_find_assigned_dev(struct list_head
*head
,
84 struct list_head
*ptr
;
85 struct kvm_assigned_dev_kernel
*match
;
87 list_for_each(ptr
, head
) {
88 match
= list_entry(ptr
, struct kvm_assigned_dev_kernel
, list
);
89 if (match
->assigned_dev_id
== assigned_dev_id
)
95 static void kvm_assigned_dev_interrupt_work_handler(struct work_struct
*work
)
97 struct kvm_assigned_dev_kernel
*assigned_dev
;
99 assigned_dev
= container_of(work
, struct kvm_assigned_dev_kernel
,
102 /* This is taken to safely inject irq inside the guest. When
103 * the interrupt injection (or the ioapic code) uses a
104 * finer-grained lock, update this
106 mutex_lock(&assigned_dev
->kvm
->lock
);
107 kvm_set_irq(assigned_dev
->kvm
,
108 assigned_dev
->irq_source_id
,
109 assigned_dev
->guest_irq
, 1);
110 mutex_unlock(&assigned_dev
->kvm
->lock
);
111 kvm_put_kvm(assigned_dev
->kvm
);
114 static irqreturn_t
kvm_assigned_dev_intr(int irq
, void *dev_id
)
116 struct kvm_assigned_dev_kernel
*assigned_dev
=
117 (struct kvm_assigned_dev_kernel
*) dev_id
;
119 kvm_get_kvm(assigned_dev
->kvm
);
120 schedule_work(&assigned_dev
->interrupt_work
);
121 disable_irq_nosync(irq
);
125 /* Ack the irq line for an assigned device */
126 static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier
*kian
)
128 struct kvm_assigned_dev_kernel
*dev
;
133 dev
= container_of(kian
, struct kvm_assigned_dev_kernel
,
135 kvm_set_irq(dev
->kvm
, dev
->irq_source_id
, dev
->guest_irq
, 0);
136 enable_irq(dev
->host_irq
);
139 static void kvm_free_assigned_device(struct kvm
*kvm
,
140 struct kvm_assigned_dev_kernel
143 if (irqchip_in_kernel(kvm
) && assigned_dev
->irq_requested
)
144 free_irq(assigned_dev
->host_irq
, (void *)assigned_dev
);
146 kvm_unregister_irq_ack_notifier(kvm
, &assigned_dev
->ack_notifier
);
147 kvm_free_irq_source_id(kvm
, assigned_dev
->irq_source_id
);
149 if (cancel_work_sync(&assigned_dev
->interrupt_work
))
150 /* We had pending work. That means we will have to take
151 * care of kvm_put_kvm.
155 pci_release_regions(assigned_dev
->dev
);
156 pci_disable_device(assigned_dev
->dev
);
157 pci_dev_put(assigned_dev
->dev
);
159 list_del(&assigned_dev
->list
);
163 void kvm_free_all_assigned_devices(struct kvm
*kvm
)
165 struct list_head
*ptr
, *ptr2
;
166 struct kvm_assigned_dev_kernel
*assigned_dev
;
168 list_for_each_safe(ptr
, ptr2
, &kvm
->arch
.assigned_dev_head
) {
169 assigned_dev
= list_entry(ptr
,
170 struct kvm_assigned_dev_kernel
,
173 kvm_free_assigned_device(kvm
, assigned_dev
);
177 static int kvm_vm_ioctl_assign_irq(struct kvm
*kvm
,
178 struct kvm_assigned_irq
182 struct kvm_assigned_dev_kernel
*match
;
184 mutex_lock(&kvm
->lock
);
186 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
187 assigned_irq
->assigned_dev_id
);
189 mutex_unlock(&kvm
->lock
);
193 if (match
->irq_requested
) {
194 match
->guest_irq
= assigned_irq
->guest_irq
;
195 match
->ack_notifier
.gsi
= assigned_irq
->guest_irq
;
196 mutex_unlock(&kvm
->lock
);
200 INIT_WORK(&match
->interrupt_work
,
201 kvm_assigned_dev_interrupt_work_handler
);
203 if (irqchip_in_kernel(kvm
)) {
204 if (!capable(CAP_SYS_RAWIO
)) {
209 if (assigned_irq
->host_irq
)
210 match
->host_irq
= assigned_irq
->host_irq
;
212 match
->host_irq
= match
->dev
->irq
;
213 match
->guest_irq
= assigned_irq
->guest_irq
;
214 match
->ack_notifier
.gsi
= assigned_irq
->guest_irq
;
215 match
->ack_notifier
.irq_acked
= kvm_assigned_dev_ack_irq
;
216 kvm_register_irq_ack_notifier(kvm
, &match
->ack_notifier
);
217 r
= kvm_request_irq_source_id(kvm
);
221 match
->irq_source_id
= r
;
223 /* Even though this is PCI, we don't want to use shared
224 * interrupts. Sharing host devices with guest-assigned devices
225 * on the same interrupt line is not a happy situation: there
226 * are going to be long delays in accepting, acking, etc.
228 if (request_irq(match
->host_irq
, kvm_assigned_dev_intr
, 0,
229 "kvm_assigned_device", (void *)match
)) {
235 match
->irq_requested
= true;
236 mutex_unlock(&kvm
->lock
);
239 mutex_unlock(&kvm
->lock
);
240 kvm_free_assigned_device(kvm
, match
);
244 static int kvm_vm_ioctl_assign_device(struct kvm
*kvm
,
245 struct kvm_assigned_pci_dev
*assigned_dev
)
248 struct kvm_assigned_dev_kernel
*match
;
251 mutex_lock(&kvm
->lock
);
253 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
254 assigned_dev
->assigned_dev_id
);
256 /* device already assigned */
261 match
= kzalloc(sizeof(struct kvm_assigned_dev_kernel
), GFP_KERNEL
);
263 printk(KERN_INFO
"%s: Couldn't allocate memory\n",
268 dev
= pci_get_bus_and_slot(assigned_dev
->busnr
,
269 assigned_dev
->devfn
);
271 printk(KERN_INFO
"%s: host device not found\n", __func__
);
275 if (pci_enable_device(dev
)) {
276 printk(KERN_INFO
"%s: Could not enable PCI device\n", __func__
);
280 r
= pci_request_regions(dev
, "kvm_assigned_device");
282 printk(KERN_INFO
"%s: Could not get access to device regions\n",
286 match
->assigned_dev_id
= assigned_dev
->assigned_dev_id
;
287 match
->host_busnr
= assigned_dev
->busnr
;
288 match
->host_devfn
= assigned_dev
->devfn
;
293 list_add(&match
->list
, &kvm
->arch
.assigned_dev_head
);
295 if (assigned_dev
->flags
& KVM_DEV_ASSIGN_ENABLE_IOMMU
) {
296 r
= kvm_iommu_map_guest(kvm
, match
);
302 mutex_unlock(&kvm
->lock
);
305 list_del(&match
->list
);
306 pci_release_regions(dev
);
308 pci_disable_device(dev
);
313 mutex_unlock(&kvm
->lock
);
318 static inline int valid_vcpu(int n
)
320 return likely(n
>= 0 && n
< KVM_MAX_VCPUS
);
323 inline int kvm_is_mmio_pfn(pfn_t pfn
)
326 return PageReserved(pfn_to_page(pfn
));
332 * Switches to specified vcpu, until a matching vcpu_put()
334 void vcpu_load(struct kvm_vcpu
*vcpu
)
338 mutex_lock(&vcpu
->mutex
);
340 preempt_notifier_register(&vcpu
->preempt_notifier
);
341 kvm_arch_vcpu_load(vcpu
, cpu
);
345 void vcpu_put(struct kvm_vcpu
*vcpu
)
348 kvm_arch_vcpu_put(vcpu
);
349 preempt_notifier_unregister(&vcpu
->preempt_notifier
);
351 mutex_unlock(&vcpu
->mutex
);
354 static void ack_flush(void *_completed
)
358 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
362 struct kvm_vcpu
*vcpu
;
366 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
367 vcpu
= kvm
->vcpus
[i
];
370 if (test_and_set_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
))
373 if (cpu
!= -1 && cpu
!= me
)
376 if (cpus_empty(cpus
))
378 ++kvm
->stat
.remote_tlb_flush
;
379 smp_call_function_mask(cpus
, ack_flush
, NULL
, 1);
384 void kvm_reload_remote_mmus(struct kvm
*kvm
)
388 struct kvm_vcpu
*vcpu
;
392 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
393 vcpu
= kvm
->vcpus
[i
];
396 if (test_and_set_bit(KVM_REQ_MMU_RELOAD
, &vcpu
->requests
))
399 if (cpu
!= -1 && cpu
!= me
)
402 if (cpus_empty(cpus
))
404 smp_call_function_mask(cpus
, ack_flush
, NULL
, 1);
410 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
)
415 mutex_init(&vcpu
->mutex
);
419 init_waitqueue_head(&vcpu
->wq
);
421 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
426 vcpu
->run
= page_address(page
);
428 r
= kvm_arch_vcpu_init(vcpu
);
434 free_page((unsigned long)vcpu
->run
);
438 EXPORT_SYMBOL_GPL(kvm_vcpu_init
);
440 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
)
442 kvm_arch_vcpu_uninit(vcpu
);
443 free_page((unsigned long)vcpu
->run
);
445 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit
);
447 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
448 static inline struct kvm
*mmu_notifier_to_kvm(struct mmu_notifier
*mn
)
450 return container_of(mn
, struct kvm
, mmu_notifier
);
453 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier
*mn
,
454 struct mm_struct
*mm
,
455 unsigned long address
)
457 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
461 * When ->invalidate_page runs, the linux pte has been zapped
462 * already but the page is still allocated until
463 * ->invalidate_page returns. So if we increase the sequence
464 * here the kvm page fault will notice if the spte can't be
465 * established because the page is going to be freed. If
466 * instead the kvm page fault establishes the spte before
467 * ->invalidate_page runs, kvm_unmap_hva will release it
470 * The sequence increase only need to be seen at spin_unlock
471 * time, and not at spin_lock time.
473 * Increasing the sequence after the spin_unlock would be
474 * unsafe because the kvm page fault could then establish the
475 * pte after kvm_unmap_hva returned, without noticing the page
476 * is going to be freed.
478 spin_lock(&kvm
->mmu_lock
);
479 kvm
->mmu_notifier_seq
++;
480 need_tlb_flush
= kvm_unmap_hva(kvm
, address
);
481 spin_unlock(&kvm
->mmu_lock
);
483 /* we've to flush the tlb before the pages can be freed */
485 kvm_flush_remote_tlbs(kvm
);
489 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier
*mn
,
490 struct mm_struct
*mm
,
494 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
495 int need_tlb_flush
= 0;
497 spin_lock(&kvm
->mmu_lock
);
499 * The count increase must become visible at unlock time as no
500 * spte can be established without taking the mmu_lock and
501 * count is also read inside the mmu_lock critical section.
503 kvm
->mmu_notifier_count
++;
504 for (; start
< end
; start
+= PAGE_SIZE
)
505 need_tlb_flush
|= kvm_unmap_hva(kvm
, start
);
506 spin_unlock(&kvm
->mmu_lock
);
508 /* we've to flush the tlb before the pages can be freed */
510 kvm_flush_remote_tlbs(kvm
);
513 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier
*mn
,
514 struct mm_struct
*mm
,
518 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
520 spin_lock(&kvm
->mmu_lock
);
522 * This sequence increase will notify the kvm page fault that
523 * the page that is going to be mapped in the spte could have
526 kvm
->mmu_notifier_seq
++;
528 * The above sequence increase must be visible before the
529 * below count decrease but both values are read by the kvm
530 * page fault under mmu_lock spinlock so we don't need to add
531 * a smb_wmb() here in between the two.
533 kvm
->mmu_notifier_count
--;
534 spin_unlock(&kvm
->mmu_lock
);
536 BUG_ON(kvm
->mmu_notifier_count
< 0);
539 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier
*mn
,
540 struct mm_struct
*mm
,
541 unsigned long address
)
543 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
546 spin_lock(&kvm
->mmu_lock
);
547 young
= kvm_age_hva(kvm
, address
);
548 spin_unlock(&kvm
->mmu_lock
);
551 kvm_flush_remote_tlbs(kvm
);
556 static void kvm_mmu_notifier_release(struct mmu_notifier
*mn
,
557 struct mm_struct
*mm
)
559 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
560 kvm_arch_flush_shadow(kvm
);
563 static const struct mmu_notifier_ops kvm_mmu_notifier_ops
= {
564 .invalidate_page
= kvm_mmu_notifier_invalidate_page
,
565 .invalidate_range_start
= kvm_mmu_notifier_invalidate_range_start
,
566 .invalidate_range_end
= kvm_mmu_notifier_invalidate_range_end
,
567 .clear_flush_young
= kvm_mmu_notifier_clear_flush_young
,
568 .release
= kvm_mmu_notifier_release
,
570 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
572 static struct kvm
*kvm_create_vm(void)
574 struct kvm
*kvm
= kvm_arch_create_vm();
575 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
582 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
583 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
586 return ERR_PTR(-ENOMEM
);
588 kvm
->coalesced_mmio_ring
=
589 (struct kvm_coalesced_mmio_ring
*)page_address(page
);
592 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
595 kvm
->mmu_notifier
.ops
= &kvm_mmu_notifier_ops
;
596 err
= mmu_notifier_register(&kvm
->mmu_notifier
, current
->mm
);
598 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
607 kvm
->mm
= current
->mm
;
608 atomic_inc(&kvm
->mm
->mm_count
);
609 spin_lock_init(&kvm
->mmu_lock
);
610 kvm_io_bus_init(&kvm
->pio_bus
);
611 mutex_init(&kvm
->lock
);
612 kvm_io_bus_init(&kvm
->mmio_bus
);
613 init_rwsem(&kvm
->slots_lock
);
614 atomic_set(&kvm
->users_count
, 1);
615 spin_lock(&kvm_lock
);
616 list_add(&kvm
->vm_list
, &vm_list
);
617 spin_unlock(&kvm_lock
);
618 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
619 kvm_coalesced_mmio_init(kvm
);
626 * Free any memory in @free but not in @dont.
628 static void kvm_free_physmem_slot(struct kvm_memory_slot
*free
,
629 struct kvm_memory_slot
*dont
)
631 if (!dont
|| free
->rmap
!= dont
->rmap
)
634 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
635 vfree(free
->dirty_bitmap
);
637 if (!dont
|| free
->lpage_info
!= dont
->lpage_info
)
638 vfree(free
->lpage_info
);
641 free
->dirty_bitmap
= NULL
;
643 free
->lpage_info
= NULL
;
646 void kvm_free_physmem(struct kvm
*kvm
)
650 for (i
= 0; i
< kvm
->nmemslots
; ++i
)
651 kvm_free_physmem_slot(&kvm
->memslots
[i
], NULL
);
654 static void kvm_destroy_vm(struct kvm
*kvm
)
656 struct mm_struct
*mm
= kvm
->mm
;
658 spin_lock(&kvm_lock
);
659 list_del(&kvm
->vm_list
);
660 spin_unlock(&kvm_lock
);
661 kvm_io_bus_destroy(&kvm
->pio_bus
);
662 kvm_io_bus_destroy(&kvm
->mmio_bus
);
663 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
664 if (kvm
->coalesced_mmio_ring
!= NULL
)
665 free_page((unsigned long)kvm
->coalesced_mmio_ring
);
667 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
668 mmu_notifier_unregister(&kvm
->mmu_notifier
, kvm
->mm
);
670 kvm_arch_destroy_vm(kvm
);
674 void kvm_get_kvm(struct kvm
*kvm
)
676 atomic_inc(&kvm
->users_count
);
678 EXPORT_SYMBOL_GPL(kvm_get_kvm
);
680 void kvm_put_kvm(struct kvm
*kvm
)
682 if (atomic_dec_and_test(&kvm
->users_count
))
685 EXPORT_SYMBOL_GPL(kvm_put_kvm
);
688 static int kvm_vm_release(struct inode
*inode
, struct file
*filp
)
690 struct kvm
*kvm
= filp
->private_data
;
697 * Allocate some memory and give it an address in the guest physical address
700 * Discontiguous memory is allowed, mostly for framebuffers.
702 * Must be called holding mmap_sem for write.
704 int __kvm_set_memory_region(struct kvm
*kvm
,
705 struct kvm_userspace_memory_region
*mem
,
710 unsigned long npages
;
712 struct kvm_memory_slot
*memslot
;
713 struct kvm_memory_slot old
, new;
716 /* General sanity checks */
717 if (mem
->memory_size
& (PAGE_SIZE
- 1))
719 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
721 if (mem
->slot
>= KVM_MEMORY_SLOTS
+ KVM_PRIVATE_MEM_SLOTS
)
723 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
726 memslot
= &kvm
->memslots
[mem
->slot
];
727 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
728 npages
= mem
->memory_size
>> PAGE_SHIFT
;
731 mem
->flags
&= ~KVM_MEM_LOG_DIRTY_PAGES
;
733 new = old
= *memslot
;
735 new.base_gfn
= base_gfn
;
737 new.flags
= mem
->flags
;
739 /* Disallow changing a memory slot's size. */
741 if (npages
&& old
.npages
&& npages
!= old
.npages
)
744 /* Check for overlaps */
746 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
747 struct kvm_memory_slot
*s
= &kvm
->memslots
[i
];
751 if (!((base_gfn
+ npages
<= s
->base_gfn
) ||
752 (base_gfn
>= s
->base_gfn
+ s
->npages
)))
756 /* Free page dirty bitmap if unneeded */
757 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
758 new.dirty_bitmap
= NULL
;
762 /* Allocate if a slot is being created */
764 if (npages
&& !new.rmap
) {
765 new.rmap
= vmalloc(npages
* sizeof(struct page
*));
770 memset(new.rmap
, 0, npages
* sizeof(*new.rmap
));
772 new.user_alloc
= user_alloc
;
774 * hva_to_rmmap() serialzies with the mmu_lock and to be
775 * safe it has to ignore memslots with !user_alloc &&
779 new.userspace_addr
= mem
->userspace_addr
;
781 new.userspace_addr
= 0;
783 if (npages
&& !new.lpage_info
) {
784 int largepages
= npages
/ KVM_PAGES_PER_HPAGE
;
785 if (npages
% KVM_PAGES_PER_HPAGE
)
787 if (base_gfn
% KVM_PAGES_PER_HPAGE
)
790 new.lpage_info
= vmalloc(largepages
* sizeof(*new.lpage_info
));
795 memset(new.lpage_info
, 0, largepages
* sizeof(*new.lpage_info
));
797 if (base_gfn
% KVM_PAGES_PER_HPAGE
)
798 new.lpage_info
[0].write_count
= 1;
799 if ((base_gfn
+npages
) % KVM_PAGES_PER_HPAGE
)
800 new.lpage_info
[largepages
-1].write_count
= 1;
803 /* Allocate page dirty bitmap if needed */
804 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
805 unsigned dirty_bytes
= ALIGN(npages
, BITS_PER_LONG
) / 8;
807 new.dirty_bitmap
= vmalloc(dirty_bytes
);
808 if (!new.dirty_bitmap
)
810 memset(new.dirty_bitmap
, 0, dirty_bytes
);
812 #endif /* not defined CONFIG_S390 */
815 kvm_arch_flush_shadow(kvm
);
817 spin_lock(&kvm
->mmu_lock
);
818 if (mem
->slot
>= kvm
->nmemslots
)
819 kvm
->nmemslots
= mem
->slot
+ 1;
822 spin_unlock(&kvm
->mmu_lock
);
824 r
= kvm_arch_set_memory_region(kvm
, mem
, old
, user_alloc
);
826 spin_lock(&kvm
->mmu_lock
);
828 spin_unlock(&kvm
->mmu_lock
);
832 kvm_free_physmem_slot(&old
, npages
? &new : NULL
);
833 /* Slot deletion case: we have to update the current slot */
837 /* map the pages in iommu page table */
838 r
= kvm_iommu_map_pages(kvm
, base_gfn
, npages
);
845 kvm_free_physmem_slot(&new, &old
);
850 EXPORT_SYMBOL_GPL(__kvm_set_memory_region
);
852 int kvm_set_memory_region(struct kvm
*kvm
,
853 struct kvm_userspace_memory_region
*mem
,
858 down_write(&kvm
->slots_lock
);
859 r
= __kvm_set_memory_region(kvm
, mem
, user_alloc
);
860 up_write(&kvm
->slots_lock
);
863 EXPORT_SYMBOL_GPL(kvm_set_memory_region
);
865 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
867 kvm_userspace_memory_region
*mem
,
870 if (mem
->slot
>= KVM_MEMORY_SLOTS
)
872 return kvm_set_memory_region(kvm
, mem
, user_alloc
);
875 int kvm_get_dirty_log(struct kvm
*kvm
,
876 struct kvm_dirty_log
*log
, int *is_dirty
)
878 struct kvm_memory_slot
*memslot
;
881 unsigned long any
= 0;
884 if (log
->slot
>= KVM_MEMORY_SLOTS
)
887 memslot
= &kvm
->memslots
[log
->slot
];
889 if (!memslot
->dirty_bitmap
)
892 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
894 for (i
= 0; !any
&& i
< n
/sizeof(long); ++i
)
895 any
= memslot
->dirty_bitmap
[i
];
898 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
909 int is_error_page(struct page
*page
)
911 return page
== bad_page
;
913 EXPORT_SYMBOL_GPL(is_error_page
);
915 int is_error_pfn(pfn_t pfn
)
917 return pfn
== bad_pfn
;
919 EXPORT_SYMBOL_GPL(is_error_pfn
);
921 static inline unsigned long bad_hva(void)
926 int kvm_is_error_hva(unsigned long addr
)
928 return addr
== bad_hva();
930 EXPORT_SYMBOL_GPL(kvm_is_error_hva
);
932 struct kvm_memory_slot
*gfn_to_memslot_unaliased(struct kvm
*kvm
, gfn_t gfn
)
936 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
937 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
939 if (gfn
>= memslot
->base_gfn
940 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
945 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased
);
947 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
949 gfn
= unalias_gfn(kvm
, gfn
);
950 return gfn_to_memslot_unaliased(kvm
, gfn
);
953 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
)
957 gfn
= unalias_gfn(kvm
, gfn
);
958 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
959 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
961 if (gfn
>= memslot
->base_gfn
962 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
967 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn
);
969 unsigned long gfn_to_hva(struct kvm
*kvm
, gfn_t gfn
)
971 struct kvm_memory_slot
*slot
;
973 gfn
= unalias_gfn(kvm
, gfn
);
974 slot
= gfn_to_memslot_unaliased(kvm
, gfn
);
977 return (slot
->userspace_addr
+ (gfn
- slot
->base_gfn
) * PAGE_SIZE
);
979 EXPORT_SYMBOL_GPL(gfn_to_hva
);
981 pfn_t
gfn_to_pfn(struct kvm
*kvm
, gfn_t gfn
)
983 struct page
*page
[1];
990 addr
= gfn_to_hva(kvm
, gfn
);
991 if (kvm_is_error_hva(addr
)) {
993 return page_to_pfn(bad_page
);
996 npages
= get_user_pages_fast(addr
, 1, 1, page
);
998 if (unlikely(npages
!= 1)) {
999 struct vm_area_struct
*vma
;
1001 down_read(¤t
->mm
->mmap_sem
);
1002 vma
= find_vma(current
->mm
, addr
);
1004 if (vma
== NULL
|| addr
< vma
->vm_start
||
1005 !(vma
->vm_flags
& VM_PFNMAP
)) {
1006 up_read(¤t
->mm
->mmap_sem
);
1008 return page_to_pfn(bad_page
);
1011 pfn
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1012 up_read(¤t
->mm
->mmap_sem
);
1013 BUG_ON(!kvm_is_mmio_pfn(pfn
));
1015 pfn
= page_to_pfn(page
[0]);
1020 EXPORT_SYMBOL_GPL(gfn_to_pfn
);
1022 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
1026 pfn
= gfn_to_pfn(kvm
, gfn
);
1027 if (!kvm_is_mmio_pfn(pfn
))
1028 return pfn_to_page(pfn
);
1030 WARN_ON(kvm_is_mmio_pfn(pfn
));
1036 EXPORT_SYMBOL_GPL(gfn_to_page
);
1038 void kvm_release_page_clean(struct page
*page
)
1040 kvm_release_pfn_clean(page_to_pfn(page
));
1042 EXPORT_SYMBOL_GPL(kvm_release_page_clean
);
1044 void kvm_release_pfn_clean(pfn_t pfn
)
1046 if (!kvm_is_mmio_pfn(pfn
))
1047 put_page(pfn_to_page(pfn
));
1049 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean
);
1051 void kvm_release_page_dirty(struct page
*page
)
1053 kvm_release_pfn_dirty(page_to_pfn(page
));
1055 EXPORT_SYMBOL_GPL(kvm_release_page_dirty
);
1057 void kvm_release_pfn_dirty(pfn_t pfn
)
1059 kvm_set_pfn_dirty(pfn
);
1060 kvm_release_pfn_clean(pfn
);
1062 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty
);
1064 void kvm_set_page_dirty(struct page
*page
)
1066 kvm_set_pfn_dirty(page_to_pfn(page
));
1068 EXPORT_SYMBOL_GPL(kvm_set_page_dirty
);
1070 void kvm_set_pfn_dirty(pfn_t pfn
)
1072 if (!kvm_is_mmio_pfn(pfn
)) {
1073 struct page
*page
= pfn_to_page(pfn
);
1074 if (!PageReserved(page
))
1078 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty
);
1080 void kvm_set_pfn_accessed(pfn_t pfn
)
1082 if (!kvm_is_mmio_pfn(pfn
))
1083 mark_page_accessed(pfn_to_page(pfn
));
1085 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed
);
1087 void kvm_get_pfn(pfn_t pfn
)
1089 if (!kvm_is_mmio_pfn(pfn
))
1090 get_page(pfn_to_page(pfn
));
1092 EXPORT_SYMBOL_GPL(kvm_get_pfn
);
1094 static int next_segment(unsigned long len
, int offset
)
1096 if (len
> PAGE_SIZE
- offset
)
1097 return PAGE_SIZE
- offset
;
1102 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
1108 addr
= gfn_to_hva(kvm
, gfn
);
1109 if (kvm_is_error_hva(addr
))
1111 r
= copy_from_user(data
, (void __user
*)addr
+ offset
, len
);
1116 EXPORT_SYMBOL_GPL(kvm_read_guest_page
);
1118 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
)
1120 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1122 int offset
= offset_in_page(gpa
);
1125 while ((seg
= next_segment(len
, offset
)) != 0) {
1126 ret
= kvm_read_guest_page(kvm
, gfn
, data
, offset
, seg
);
1136 EXPORT_SYMBOL_GPL(kvm_read_guest
);
1138 int kvm_read_guest_atomic(struct kvm
*kvm
, gpa_t gpa
, void *data
,
1143 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1144 int offset
= offset_in_page(gpa
);
1146 addr
= gfn_to_hva(kvm
, gfn
);
1147 if (kvm_is_error_hva(addr
))
1149 pagefault_disable();
1150 r
= __copy_from_user_inatomic(data
, (void __user
*)addr
+ offset
, len
);
1156 EXPORT_SYMBOL(kvm_read_guest_atomic
);
1158 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
1159 int offset
, int len
)
1164 addr
= gfn_to_hva(kvm
, gfn
);
1165 if (kvm_is_error_hva(addr
))
1167 r
= copy_to_user((void __user
*)addr
+ offset
, data
, len
);
1170 mark_page_dirty(kvm
, gfn
);
1173 EXPORT_SYMBOL_GPL(kvm_write_guest_page
);
1175 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
1178 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1180 int offset
= offset_in_page(gpa
);
1183 while ((seg
= next_segment(len
, offset
)) != 0) {
1184 ret
= kvm_write_guest_page(kvm
, gfn
, data
, offset
, seg
);
1195 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
)
1197 return kvm_write_guest_page(kvm
, gfn
, empty_zero_page
, offset
, len
);
1199 EXPORT_SYMBOL_GPL(kvm_clear_guest_page
);
1201 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
)
1203 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1205 int offset
= offset_in_page(gpa
);
1208 while ((seg
= next_segment(len
, offset
)) != 0) {
1209 ret
= kvm_clear_guest_page(kvm
, gfn
, offset
, seg
);
1218 EXPORT_SYMBOL_GPL(kvm_clear_guest
);
1220 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
1222 struct kvm_memory_slot
*memslot
;
1224 gfn
= unalias_gfn(kvm
, gfn
);
1225 memslot
= gfn_to_memslot_unaliased(kvm
, gfn
);
1226 if (memslot
&& memslot
->dirty_bitmap
) {
1227 unsigned long rel_gfn
= gfn
- memslot
->base_gfn
;
1230 if (!test_bit(rel_gfn
, memslot
->dirty_bitmap
))
1231 set_bit(rel_gfn
, memslot
->dirty_bitmap
);
1236 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1238 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
)
1243 prepare_to_wait(&vcpu
->wq
, &wait
, TASK_INTERRUPTIBLE
);
1245 if (kvm_cpu_has_interrupt(vcpu
) ||
1246 kvm_cpu_has_pending_timer(vcpu
) ||
1247 kvm_arch_vcpu_runnable(vcpu
)) {
1248 set_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
1251 if (signal_pending(current
))
1259 finish_wait(&vcpu
->wq
, &wait
);
1262 void kvm_resched(struct kvm_vcpu
*vcpu
)
1264 if (!need_resched())
1268 EXPORT_SYMBOL_GPL(kvm_resched
);
1270 static int kvm_vcpu_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1272 struct kvm_vcpu
*vcpu
= vma
->vm_file
->private_data
;
1275 if (vmf
->pgoff
== 0)
1276 page
= virt_to_page(vcpu
->run
);
1278 else if (vmf
->pgoff
== KVM_PIO_PAGE_OFFSET
)
1279 page
= virt_to_page(vcpu
->arch
.pio_data
);
1281 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1282 else if (vmf
->pgoff
== KVM_COALESCED_MMIO_PAGE_OFFSET
)
1283 page
= virt_to_page(vcpu
->kvm
->coalesced_mmio_ring
);
1286 return VM_FAULT_SIGBUS
;
1292 static struct vm_operations_struct kvm_vcpu_vm_ops
= {
1293 .fault
= kvm_vcpu_fault
,
1296 static int kvm_vcpu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1298 vma
->vm_ops
= &kvm_vcpu_vm_ops
;
1302 static int kvm_vcpu_release(struct inode
*inode
, struct file
*filp
)
1304 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1306 kvm_put_kvm(vcpu
->kvm
);
1310 static struct file_operations kvm_vcpu_fops
= {
1311 .release
= kvm_vcpu_release
,
1312 .unlocked_ioctl
= kvm_vcpu_ioctl
,
1313 .compat_ioctl
= kvm_vcpu_ioctl
,
1314 .mmap
= kvm_vcpu_mmap
,
1318 * Allocates an inode for the vcpu.
1320 static int create_vcpu_fd(struct kvm_vcpu
*vcpu
)
1322 int fd
= anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops
, vcpu
, 0);
1324 kvm_put_kvm(vcpu
->kvm
);
1329 * Creates some virtual cpus. Good luck creating more than one.
1331 static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm
, int n
)
1334 struct kvm_vcpu
*vcpu
;
1339 vcpu
= kvm_arch_vcpu_create(kvm
, n
);
1341 return PTR_ERR(vcpu
);
1343 preempt_notifier_init(&vcpu
->preempt_notifier
, &kvm_preempt_ops
);
1345 r
= kvm_arch_vcpu_setup(vcpu
);
1349 mutex_lock(&kvm
->lock
);
1350 if (kvm
->vcpus
[n
]) {
1354 kvm
->vcpus
[n
] = vcpu
;
1355 mutex_unlock(&kvm
->lock
);
1357 /* Now it's all set up, let userspace reach it */
1359 r
= create_vcpu_fd(vcpu
);
1365 mutex_lock(&kvm
->lock
);
1366 kvm
->vcpus
[n
] = NULL
;
1368 mutex_unlock(&kvm
->lock
);
1369 kvm_arch_vcpu_destroy(vcpu
);
1373 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu
*vcpu
, sigset_t
*sigset
)
1376 sigdelsetmask(sigset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1377 vcpu
->sigset_active
= 1;
1378 vcpu
->sigset
= *sigset
;
1380 vcpu
->sigset_active
= 0;
1384 static long kvm_vcpu_ioctl(struct file
*filp
,
1385 unsigned int ioctl
, unsigned long arg
)
1387 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1388 void __user
*argp
= (void __user
*)arg
;
1390 struct kvm_fpu
*fpu
= NULL
;
1391 struct kvm_sregs
*kvm_sregs
= NULL
;
1393 if (vcpu
->kvm
->mm
!= current
->mm
)
1400 r
= kvm_arch_vcpu_ioctl_run(vcpu
, vcpu
->run
);
1402 case KVM_GET_REGS
: {
1403 struct kvm_regs
*kvm_regs
;
1406 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
1409 r
= kvm_arch_vcpu_ioctl_get_regs(vcpu
, kvm_regs
);
1413 if (copy_to_user(argp
, kvm_regs
, sizeof(struct kvm_regs
)))
1420 case KVM_SET_REGS
: {
1421 struct kvm_regs
*kvm_regs
;
1424 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
1428 if (copy_from_user(kvm_regs
, argp
, sizeof(struct kvm_regs
)))
1430 r
= kvm_arch_vcpu_ioctl_set_regs(vcpu
, kvm_regs
);
1438 case KVM_GET_SREGS
: {
1439 kvm_sregs
= kzalloc(sizeof(struct kvm_sregs
), GFP_KERNEL
);
1443 r
= kvm_arch_vcpu_ioctl_get_sregs(vcpu
, kvm_sregs
);
1447 if (copy_to_user(argp
, kvm_sregs
, sizeof(struct kvm_sregs
)))
1452 case KVM_SET_SREGS
: {
1453 kvm_sregs
= kmalloc(sizeof(struct kvm_sregs
), GFP_KERNEL
);
1458 if (copy_from_user(kvm_sregs
, argp
, sizeof(struct kvm_sregs
)))
1460 r
= kvm_arch_vcpu_ioctl_set_sregs(vcpu
, kvm_sregs
);
1466 case KVM_GET_MP_STATE
: {
1467 struct kvm_mp_state mp_state
;
1469 r
= kvm_arch_vcpu_ioctl_get_mpstate(vcpu
, &mp_state
);
1473 if (copy_to_user(argp
, &mp_state
, sizeof mp_state
))
1478 case KVM_SET_MP_STATE
: {
1479 struct kvm_mp_state mp_state
;
1482 if (copy_from_user(&mp_state
, argp
, sizeof mp_state
))
1484 r
= kvm_arch_vcpu_ioctl_set_mpstate(vcpu
, &mp_state
);
1490 case KVM_TRANSLATE
: {
1491 struct kvm_translation tr
;
1494 if (copy_from_user(&tr
, argp
, sizeof tr
))
1496 r
= kvm_arch_vcpu_ioctl_translate(vcpu
, &tr
);
1500 if (copy_to_user(argp
, &tr
, sizeof tr
))
1505 case KVM_DEBUG_GUEST
: {
1506 struct kvm_debug_guest dbg
;
1509 if (copy_from_user(&dbg
, argp
, sizeof dbg
))
1511 r
= kvm_arch_vcpu_ioctl_debug_guest(vcpu
, &dbg
);
1517 case KVM_SET_SIGNAL_MASK
: {
1518 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
1519 struct kvm_signal_mask kvm_sigmask
;
1520 sigset_t sigset
, *p
;
1525 if (copy_from_user(&kvm_sigmask
, argp
,
1526 sizeof kvm_sigmask
))
1529 if (kvm_sigmask
.len
!= sizeof sigset
)
1532 if (copy_from_user(&sigset
, sigmask_arg
->sigset
,
1537 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, &sigset
);
1541 fpu
= kzalloc(sizeof(struct kvm_fpu
), GFP_KERNEL
);
1545 r
= kvm_arch_vcpu_ioctl_get_fpu(vcpu
, fpu
);
1549 if (copy_to_user(argp
, fpu
, sizeof(struct kvm_fpu
)))
1555 fpu
= kmalloc(sizeof(struct kvm_fpu
), GFP_KERNEL
);
1560 if (copy_from_user(fpu
, argp
, sizeof(struct kvm_fpu
)))
1562 r
= kvm_arch_vcpu_ioctl_set_fpu(vcpu
, fpu
);
1569 r
= kvm_arch_vcpu_ioctl(filp
, ioctl
, arg
);
1577 static long kvm_vm_ioctl(struct file
*filp
,
1578 unsigned int ioctl
, unsigned long arg
)
1580 struct kvm
*kvm
= filp
->private_data
;
1581 void __user
*argp
= (void __user
*)arg
;
1584 if (kvm
->mm
!= current
->mm
)
1587 case KVM_CREATE_VCPU
:
1588 r
= kvm_vm_ioctl_create_vcpu(kvm
, arg
);
1592 case KVM_SET_USER_MEMORY_REGION
: {
1593 struct kvm_userspace_memory_region kvm_userspace_mem
;
1596 if (copy_from_user(&kvm_userspace_mem
, argp
,
1597 sizeof kvm_userspace_mem
))
1600 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 1);
1605 case KVM_GET_DIRTY_LOG
: {
1606 struct kvm_dirty_log log
;
1609 if (copy_from_user(&log
, argp
, sizeof log
))
1611 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
1616 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1617 case KVM_REGISTER_COALESCED_MMIO
: {
1618 struct kvm_coalesced_mmio_zone zone
;
1620 if (copy_from_user(&zone
, argp
, sizeof zone
))
1623 r
= kvm_vm_ioctl_register_coalesced_mmio(kvm
, &zone
);
1629 case KVM_UNREGISTER_COALESCED_MMIO
: {
1630 struct kvm_coalesced_mmio_zone zone
;
1632 if (copy_from_user(&zone
, argp
, sizeof zone
))
1635 r
= kvm_vm_ioctl_unregister_coalesced_mmio(kvm
, &zone
);
1642 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
1643 case KVM_ASSIGN_PCI_DEVICE
: {
1644 struct kvm_assigned_pci_dev assigned_dev
;
1647 if (copy_from_user(&assigned_dev
, argp
, sizeof assigned_dev
))
1649 r
= kvm_vm_ioctl_assign_device(kvm
, &assigned_dev
);
1654 case KVM_ASSIGN_IRQ
: {
1655 struct kvm_assigned_irq assigned_irq
;
1658 if (copy_from_user(&assigned_irq
, argp
, sizeof assigned_irq
))
1660 r
= kvm_vm_ioctl_assign_irq(kvm
, &assigned_irq
);
1667 r
= kvm_arch_vm_ioctl(filp
, ioctl
, arg
);
1673 static int kvm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1675 struct page
*page
[1];
1678 gfn_t gfn
= vmf
->pgoff
;
1679 struct kvm
*kvm
= vma
->vm_file
->private_data
;
1681 addr
= gfn_to_hva(kvm
, gfn
);
1682 if (kvm_is_error_hva(addr
))
1683 return VM_FAULT_SIGBUS
;
1685 npages
= get_user_pages(current
, current
->mm
, addr
, 1, 1, 0, page
,
1687 if (unlikely(npages
!= 1))
1688 return VM_FAULT_SIGBUS
;
1690 vmf
->page
= page
[0];
1694 static struct vm_operations_struct kvm_vm_vm_ops
= {
1695 .fault
= kvm_vm_fault
,
1698 static int kvm_vm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1700 vma
->vm_ops
= &kvm_vm_vm_ops
;
1704 static struct file_operations kvm_vm_fops
= {
1705 .release
= kvm_vm_release
,
1706 .unlocked_ioctl
= kvm_vm_ioctl
,
1707 .compat_ioctl
= kvm_vm_ioctl
,
1708 .mmap
= kvm_vm_mmap
,
1711 static int kvm_dev_ioctl_create_vm(void)
1716 kvm
= kvm_create_vm();
1718 return PTR_ERR(kvm
);
1719 fd
= anon_inode_getfd("kvm-vm", &kvm_vm_fops
, kvm
, 0);
1726 static long kvm_dev_ioctl_check_extension_generic(long arg
)
1729 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS
:
1734 return kvm_dev_ioctl_check_extension(arg
);
1737 static long kvm_dev_ioctl(struct file
*filp
,
1738 unsigned int ioctl
, unsigned long arg
)
1743 case KVM_GET_API_VERSION
:
1747 r
= KVM_API_VERSION
;
1753 r
= kvm_dev_ioctl_create_vm();
1755 case KVM_CHECK_EXTENSION
:
1756 r
= kvm_dev_ioctl_check_extension_generic(arg
);
1758 case KVM_GET_VCPU_MMAP_SIZE
:
1762 r
= PAGE_SIZE
; /* struct kvm_run */
1764 r
+= PAGE_SIZE
; /* pio data page */
1766 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1767 r
+= PAGE_SIZE
; /* coalesced mmio ring page */
1770 case KVM_TRACE_ENABLE
:
1771 case KVM_TRACE_PAUSE
:
1772 case KVM_TRACE_DISABLE
:
1773 r
= kvm_trace_ioctl(ioctl
, arg
);
1776 return kvm_arch_dev_ioctl(filp
, ioctl
, arg
);
1782 static struct file_operations kvm_chardev_ops
= {
1783 .unlocked_ioctl
= kvm_dev_ioctl
,
1784 .compat_ioctl
= kvm_dev_ioctl
,
1787 static struct miscdevice kvm_dev
= {
1793 static void hardware_enable(void *junk
)
1795 int cpu
= raw_smp_processor_id();
1797 if (cpu_isset(cpu
, cpus_hardware_enabled
))
1799 cpu_set(cpu
, cpus_hardware_enabled
);
1800 kvm_arch_hardware_enable(NULL
);
1803 static void hardware_disable(void *junk
)
1805 int cpu
= raw_smp_processor_id();
1807 if (!cpu_isset(cpu
, cpus_hardware_enabled
))
1809 cpu_clear(cpu
, cpus_hardware_enabled
);
1810 kvm_arch_hardware_disable(NULL
);
1813 static int kvm_cpu_hotplug(struct notifier_block
*notifier
, unsigned long val
,
1818 val
&= ~CPU_TASKS_FROZEN
;
1821 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
1823 hardware_disable(NULL
);
1825 case CPU_UP_CANCELED
:
1826 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
1828 smp_call_function_single(cpu
, hardware_disable
, NULL
, 1);
1831 printk(KERN_INFO
"kvm: enabling virtualization on CPU%d\n",
1833 smp_call_function_single(cpu
, hardware_enable
, NULL
, 1);
1840 asmlinkage
void kvm_handle_fault_on_reboot(void)
1843 /* spin while reset goes on */
1846 /* Fault while not rebooting. We want the trace. */
1849 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot
);
1851 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
1854 if (val
== SYS_RESTART
) {
1856 * Some (well, at least mine) BIOSes hang on reboot if
1859 printk(KERN_INFO
"kvm: exiting hardware virtualization\n");
1860 kvm_rebooting
= true;
1861 on_each_cpu(hardware_disable
, NULL
, 1);
1866 static struct notifier_block kvm_reboot_notifier
= {
1867 .notifier_call
= kvm_reboot
,
1871 void kvm_io_bus_init(struct kvm_io_bus
*bus
)
1873 memset(bus
, 0, sizeof(*bus
));
1876 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
)
1880 for (i
= 0; i
< bus
->dev_count
; i
++) {
1881 struct kvm_io_device
*pos
= bus
->devs
[i
];
1883 kvm_iodevice_destructor(pos
);
1887 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
,
1888 gpa_t addr
, int len
, int is_write
)
1892 for (i
= 0; i
< bus
->dev_count
; i
++) {
1893 struct kvm_io_device
*pos
= bus
->devs
[i
];
1895 if (pos
->in_range(pos
, addr
, len
, is_write
))
1902 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
, struct kvm_io_device
*dev
)
1904 BUG_ON(bus
->dev_count
> (NR_IOBUS_DEVS
-1));
1906 bus
->devs
[bus
->dev_count
++] = dev
;
1909 static struct notifier_block kvm_cpu_notifier
= {
1910 .notifier_call
= kvm_cpu_hotplug
,
1911 .priority
= 20, /* must be > scheduler priority */
1914 static int vm_stat_get(void *_offset
, u64
*val
)
1916 unsigned offset
= (long)_offset
;
1920 spin_lock(&kvm_lock
);
1921 list_for_each_entry(kvm
, &vm_list
, vm_list
)
1922 *val
+= *(u32
*)((void *)kvm
+ offset
);
1923 spin_unlock(&kvm_lock
);
1927 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops
, vm_stat_get
, NULL
, "%llu\n");
1929 static int vcpu_stat_get(void *_offset
, u64
*val
)
1931 unsigned offset
= (long)_offset
;
1933 struct kvm_vcpu
*vcpu
;
1937 spin_lock(&kvm_lock
);
1938 list_for_each_entry(kvm
, &vm_list
, vm_list
)
1939 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
1940 vcpu
= kvm
->vcpus
[i
];
1942 *val
+= *(u32
*)((void *)vcpu
+ offset
);
1944 spin_unlock(&kvm_lock
);
1948 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops
, vcpu_stat_get
, NULL
, "%llu\n");
1950 static struct file_operations
*stat_fops
[] = {
1951 [KVM_STAT_VCPU
] = &vcpu_stat_fops
,
1952 [KVM_STAT_VM
] = &vm_stat_fops
,
1955 static void kvm_init_debug(void)
1957 struct kvm_stats_debugfs_item
*p
;
1959 kvm_debugfs_dir
= debugfs_create_dir("kvm", NULL
);
1960 for (p
= debugfs_entries
; p
->name
; ++p
)
1961 p
->dentry
= debugfs_create_file(p
->name
, 0444, kvm_debugfs_dir
,
1962 (void *)(long)p
->offset
,
1963 stat_fops
[p
->kind
]);
1966 static void kvm_exit_debug(void)
1968 struct kvm_stats_debugfs_item
*p
;
1970 for (p
= debugfs_entries
; p
->name
; ++p
)
1971 debugfs_remove(p
->dentry
);
1972 debugfs_remove(kvm_debugfs_dir
);
1975 static int kvm_suspend(struct sys_device
*dev
, pm_message_t state
)
1977 hardware_disable(NULL
);
1981 static int kvm_resume(struct sys_device
*dev
)
1983 hardware_enable(NULL
);
1987 static struct sysdev_class kvm_sysdev_class
= {
1989 .suspend
= kvm_suspend
,
1990 .resume
= kvm_resume
,
1993 static struct sys_device kvm_sysdev
= {
1995 .cls
= &kvm_sysdev_class
,
1998 struct page
*bad_page
;
2002 struct kvm_vcpu
*preempt_notifier_to_vcpu(struct preempt_notifier
*pn
)
2004 return container_of(pn
, struct kvm_vcpu
, preempt_notifier
);
2007 static void kvm_sched_in(struct preempt_notifier
*pn
, int cpu
)
2009 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
2011 kvm_arch_vcpu_load(vcpu
, cpu
);
2014 static void kvm_sched_out(struct preempt_notifier
*pn
,
2015 struct task_struct
*next
)
2017 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
2019 kvm_arch_vcpu_put(vcpu
);
2022 int kvm_init(void *opaque
, unsigned int vcpu_size
,
2023 struct module
*module
)
2030 r
= kvm_arch_init(opaque
);
2034 bad_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
2036 if (bad_page
== NULL
) {
2041 bad_pfn
= page_to_pfn(bad_page
);
2043 r
= kvm_arch_hardware_setup();
2047 for_each_online_cpu(cpu
) {
2048 smp_call_function_single(cpu
,
2049 kvm_arch_check_processor_compat
,
2055 on_each_cpu(hardware_enable
, NULL
, 1);
2056 r
= register_cpu_notifier(&kvm_cpu_notifier
);
2059 register_reboot_notifier(&kvm_reboot_notifier
);
2061 r
= sysdev_class_register(&kvm_sysdev_class
);
2065 r
= sysdev_register(&kvm_sysdev
);
2069 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2070 kvm_vcpu_cache
= kmem_cache_create("kvm_vcpu", vcpu_size
,
2071 __alignof__(struct kvm_vcpu
),
2073 if (!kvm_vcpu_cache
) {
2078 kvm_chardev_ops
.owner
= module
;
2079 kvm_vm_fops
.owner
= module
;
2080 kvm_vcpu_fops
.owner
= module
;
2082 r
= misc_register(&kvm_dev
);
2084 printk(KERN_ERR
"kvm: misc device register failed\n");
2088 kvm_preempt_ops
.sched_in
= kvm_sched_in
;
2089 kvm_preempt_ops
.sched_out
= kvm_sched_out
;
2094 kmem_cache_destroy(kvm_vcpu_cache
);
2096 sysdev_unregister(&kvm_sysdev
);
2098 sysdev_class_unregister(&kvm_sysdev_class
);
2100 unregister_reboot_notifier(&kvm_reboot_notifier
);
2101 unregister_cpu_notifier(&kvm_cpu_notifier
);
2103 on_each_cpu(hardware_disable
, NULL
, 1);
2105 kvm_arch_hardware_unsetup();
2107 __free_page(bad_page
);
2114 EXPORT_SYMBOL_GPL(kvm_init
);
2118 kvm_trace_cleanup();
2119 misc_deregister(&kvm_dev
);
2120 kmem_cache_destroy(kvm_vcpu_cache
);
2121 sysdev_unregister(&kvm_sysdev
);
2122 sysdev_class_unregister(&kvm_sysdev_class
);
2123 unregister_reboot_notifier(&kvm_reboot_notifier
);
2124 unregister_cpu_notifier(&kvm_cpu_notifier
);
2125 on_each_cpu(hardware_disable
, NULL
, 1);
2126 kvm_arch_hardware_unsetup();
2129 __free_page(bad_page
);
2131 EXPORT_SYMBOL_GPL(kvm_exit
);