2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
44 #include <asm/processor.h>
46 #include <asm/uaccess.h>
47 #include <asm/pgtable.h>
49 MODULE_AUTHOR("Qumranet");
50 MODULE_LICENSE("GPL");
52 DEFINE_SPINLOCK(kvm_lock
);
55 static cpumask_t cpus_hardware_enabled
;
57 struct kmem_cache
*kvm_vcpu_cache
;
58 EXPORT_SYMBOL_GPL(kvm_vcpu_cache
);
60 static __read_mostly
struct preempt_ops kvm_preempt_ops
;
62 static struct dentry
*debugfs_dir
;
64 static long kvm_vcpu_ioctl(struct file
*file
, unsigned int ioctl
,
67 static inline int valid_vcpu(int n
)
69 return likely(n
>= 0 && n
< KVM_MAX_VCPUS
);
73 * Switches to specified vcpu, until a matching vcpu_put()
75 void vcpu_load(struct kvm_vcpu
*vcpu
)
79 mutex_lock(&vcpu
->mutex
);
81 preempt_notifier_register(&vcpu
->preempt_notifier
);
82 kvm_arch_vcpu_load(vcpu
, cpu
);
86 void vcpu_put(struct kvm_vcpu
*vcpu
)
89 kvm_arch_vcpu_put(vcpu
);
90 preempt_notifier_unregister(&vcpu
->preempt_notifier
);
92 mutex_unlock(&vcpu
->mutex
);
95 static void ack_flush(void *_completed
)
99 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
103 struct kvm_vcpu
*vcpu
;
106 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
107 vcpu
= kvm
->vcpus
[i
];
110 if (test_and_set_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
))
113 if (cpu
!= -1 && cpu
!= raw_smp_processor_id())
116 if (cpus_empty(cpus
))
118 ++kvm
->stat
.remote_tlb_flush
;
119 smp_call_function_mask(cpus
, ack_flush
, NULL
, 1);
122 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
)
127 mutex_init(&vcpu
->mutex
);
131 init_waitqueue_head(&vcpu
->wq
);
133 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
138 vcpu
->run
= page_address(page
);
140 r
= kvm_arch_vcpu_init(vcpu
);
146 free_page((unsigned long)vcpu
->run
);
150 EXPORT_SYMBOL_GPL(kvm_vcpu_init
);
152 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
)
154 kvm_arch_vcpu_uninit(vcpu
);
155 free_page((unsigned long)vcpu
->run
);
157 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit
);
159 static struct kvm
*kvm_create_vm(void)
161 struct kvm
*kvm
= kvm_arch_create_vm();
166 kvm
->mm
= current
->mm
;
167 atomic_inc(&kvm
->mm
->mm_count
);
168 spin_lock_init(&kvm
->mmu_lock
);
169 kvm_io_bus_init(&kvm
->pio_bus
);
170 mutex_init(&kvm
->lock
);
171 kvm_io_bus_init(&kvm
->mmio_bus
);
172 init_rwsem(&kvm
->slots_lock
);
173 spin_lock(&kvm_lock
);
174 list_add(&kvm
->vm_list
, &vm_list
);
175 spin_unlock(&kvm_lock
);
181 * Free any memory in @free but not in @dont.
183 static void kvm_free_physmem_slot(struct kvm_memory_slot
*free
,
184 struct kvm_memory_slot
*dont
)
186 if (!dont
|| free
->rmap
!= dont
->rmap
)
189 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
190 vfree(free
->dirty_bitmap
);
193 free
->dirty_bitmap
= NULL
;
197 void kvm_free_physmem(struct kvm
*kvm
)
201 for (i
= 0; i
< kvm
->nmemslots
; ++i
)
202 kvm_free_physmem_slot(&kvm
->memslots
[i
], NULL
);
205 static void kvm_destroy_vm(struct kvm
*kvm
)
207 struct mm_struct
*mm
= kvm
->mm
;
209 spin_lock(&kvm_lock
);
210 list_del(&kvm
->vm_list
);
211 spin_unlock(&kvm_lock
);
212 kvm_io_bus_destroy(&kvm
->pio_bus
);
213 kvm_io_bus_destroy(&kvm
->mmio_bus
);
214 kvm_arch_destroy_vm(kvm
);
218 static int kvm_vm_release(struct inode
*inode
, struct file
*filp
)
220 struct kvm
*kvm
= filp
->private_data
;
227 * Allocate some memory and give it an address in the guest physical address
230 * Discontiguous memory is allowed, mostly for framebuffers.
232 * Must be called holding mmap_sem for write.
234 int __kvm_set_memory_region(struct kvm
*kvm
,
235 struct kvm_userspace_memory_region
*mem
,
240 unsigned long npages
;
242 struct kvm_memory_slot
*memslot
;
243 struct kvm_memory_slot old
, new;
246 /* General sanity checks */
247 if (mem
->memory_size
& (PAGE_SIZE
- 1))
249 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
251 if (mem
->slot
>= KVM_MEMORY_SLOTS
+ KVM_PRIVATE_MEM_SLOTS
)
253 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
256 memslot
= &kvm
->memslots
[mem
->slot
];
257 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
258 npages
= mem
->memory_size
>> PAGE_SHIFT
;
261 mem
->flags
&= ~KVM_MEM_LOG_DIRTY_PAGES
;
263 new = old
= *memslot
;
265 new.base_gfn
= base_gfn
;
267 new.flags
= mem
->flags
;
269 /* Disallow changing a memory slot's size. */
271 if (npages
&& old
.npages
&& npages
!= old
.npages
)
274 /* Check for overlaps */
276 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
277 struct kvm_memory_slot
*s
= &kvm
->memslots
[i
];
281 if (!((base_gfn
+ npages
<= s
->base_gfn
) ||
282 (base_gfn
>= s
->base_gfn
+ s
->npages
)))
286 /* Free page dirty bitmap if unneeded */
287 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
288 new.dirty_bitmap
= NULL
;
292 /* Allocate if a slot is being created */
293 if (npages
&& !new.rmap
) {
294 new.rmap
= vmalloc(npages
* sizeof(struct page
*));
299 memset(new.rmap
, 0, npages
* sizeof(*new.rmap
));
301 new.user_alloc
= user_alloc
;
302 new.userspace_addr
= mem
->userspace_addr
;
305 /* Allocate page dirty bitmap if needed */
306 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
307 unsigned dirty_bytes
= ALIGN(npages
, BITS_PER_LONG
) / 8;
309 new.dirty_bitmap
= vmalloc(dirty_bytes
);
310 if (!new.dirty_bitmap
)
312 memset(new.dirty_bitmap
, 0, dirty_bytes
);
315 if (mem
->slot
>= kvm
->nmemslots
)
316 kvm
->nmemslots
= mem
->slot
+ 1;
320 r
= kvm_arch_set_memory_region(kvm
, mem
, old
, user_alloc
);
326 kvm_free_physmem_slot(&old
, &new);
330 kvm_free_physmem_slot(&new, &old
);
335 EXPORT_SYMBOL_GPL(__kvm_set_memory_region
);
337 int kvm_set_memory_region(struct kvm
*kvm
,
338 struct kvm_userspace_memory_region
*mem
,
343 down_write(&kvm
->slots_lock
);
344 r
= __kvm_set_memory_region(kvm
, mem
, user_alloc
);
345 up_write(&kvm
->slots_lock
);
348 EXPORT_SYMBOL_GPL(kvm_set_memory_region
);
350 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
352 kvm_userspace_memory_region
*mem
,
355 if (mem
->slot
>= KVM_MEMORY_SLOTS
)
357 return kvm_set_memory_region(kvm
, mem
, user_alloc
);
360 int kvm_get_dirty_log(struct kvm
*kvm
,
361 struct kvm_dirty_log
*log
, int *is_dirty
)
363 struct kvm_memory_slot
*memslot
;
366 unsigned long any
= 0;
369 if (log
->slot
>= KVM_MEMORY_SLOTS
)
372 memslot
= &kvm
->memslots
[log
->slot
];
374 if (!memslot
->dirty_bitmap
)
377 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
379 for (i
= 0; !any
&& i
< n
/sizeof(long); ++i
)
380 any
= memslot
->dirty_bitmap
[i
];
383 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
394 int is_error_page(struct page
*page
)
396 return page
== bad_page
;
398 EXPORT_SYMBOL_GPL(is_error_page
);
400 static inline unsigned long bad_hva(void)
405 int kvm_is_error_hva(unsigned long addr
)
407 return addr
== bad_hva();
409 EXPORT_SYMBOL_GPL(kvm_is_error_hva
);
411 static struct kvm_memory_slot
*__gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
415 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
416 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
418 if (gfn
>= memslot
->base_gfn
419 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
425 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
427 gfn
= unalias_gfn(kvm
, gfn
);
428 return __gfn_to_memslot(kvm
, gfn
);
431 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
)
435 gfn
= unalias_gfn(kvm
, gfn
);
436 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
437 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
439 if (gfn
>= memslot
->base_gfn
440 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
445 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn
);
447 static unsigned long gfn_to_hva(struct kvm
*kvm
, gfn_t gfn
)
449 struct kvm_memory_slot
*slot
;
451 gfn
= unalias_gfn(kvm
, gfn
);
452 slot
= __gfn_to_memslot(kvm
, gfn
);
455 return (slot
->userspace_addr
+ (gfn
- slot
->base_gfn
) * PAGE_SIZE
);
459 * Requires current->mm->mmap_sem to be held
461 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
463 struct page
*page
[1];
469 addr
= gfn_to_hva(kvm
, gfn
);
470 if (kvm_is_error_hva(addr
)) {
475 npages
= get_user_pages(current
, current
->mm
, addr
, 1, 1, 1, page
,
486 EXPORT_SYMBOL_GPL(gfn_to_page
);
488 void kvm_release_page_clean(struct page
*page
)
492 EXPORT_SYMBOL_GPL(kvm_release_page_clean
);
494 void kvm_release_page_dirty(struct page
*page
)
496 if (!PageReserved(page
))
500 EXPORT_SYMBOL_GPL(kvm_release_page_dirty
);
502 static int next_segment(unsigned long len
, int offset
)
504 if (len
> PAGE_SIZE
- offset
)
505 return PAGE_SIZE
- offset
;
510 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
516 addr
= gfn_to_hva(kvm
, gfn
);
517 if (kvm_is_error_hva(addr
))
519 r
= copy_from_user(data
, (void __user
*)addr
+ offset
, len
);
524 EXPORT_SYMBOL_GPL(kvm_read_guest_page
);
526 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
)
528 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
530 int offset
= offset_in_page(gpa
);
533 while ((seg
= next_segment(len
, offset
)) != 0) {
534 ret
= kvm_read_guest_page(kvm
, gfn
, data
, offset
, seg
);
544 EXPORT_SYMBOL_GPL(kvm_read_guest
);
546 int kvm_read_guest_atomic(struct kvm
*kvm
, gpa_t gpa
, void *data
,
551 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
552 int offset
= offset_in_page(gpa
);
554 addr
= gfn_to_hva(kvm
, gfn
);
555 if (kvm_is_error_hva(addr
))
557 r
= __copy_from_user_inatomic(data
, (void __user
*)addr
+ offset
, len
);
562 EXPORT_SYMBOL(kvm_read_guest_atomic
);
564 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
570 addr
= gfn_to_hva(kvm
, gfn
);
571 if (kvm_is_error_hva(addr
))
573 r
= copy_to_user((void __user
*)addr
+ offset
, data
, len
);
576 mark_page_dirty(kvm
, gfn
);
579 EXPORT_SYMBOL_GPL(kvm_write_guest_page
);
581 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
584 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
586 int offset
= offset_in_page(gpa
);
589 while ((seg
= next_segment(len
, offset
)) != 0) {
590 ret
= kvm_write_guest_page(kvm
, gfn
, data
, offset
, seg
);
601 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
)
603 return kvm_write_guest_page(kvm
, gfn
, empty_zero_page
, offset
, len
);
605 EXPORT_SYMBOL_GPL(kvm_clear_guest_page
);
607 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
)
609 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
611 int offset
= offset_in_page(gpa
);
614 while ((seg
= next_segment(len
, offset
)) != 0) {
615 ret
= kvm_clear_guest_page(kvm
, gfn
, offset
, seg
);
624 EXPORT_SYMBOL_GPL(kvm_clear_guest
);
626 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
628 struct kvm_memory_slot
*memslot
;
630 gfn
= unalias_gfn(kvm
, gfn
);
631 memslot
= __gfn_to_memslot(kvm
, gfn
);
632 if (memslot
&& memslot
->dirty_bitmap
) {
633 unsigned long rel_gfn
= gfn
- memslot
->base_gfn
;
636 if (!test_bit(rel_gfn
, memslot
->dirty_bitmap
))
637 set_bit(rel_gfn
, memslot
->dirty_bitmap
);
642 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
644 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
)
646 DECLARE_WAITQUEUE(wait
, current
);
648 add_wait_queue(&vcpu
->wq
, &wait
);
651 * We will block until either an interrupt or a signal wakes us up
653 while (!kvm_cpu_has_interrupt(vcpu
)
654 && !signal_pending(current
)
655 && !kvm_arch_vcpu_runnable(vcpu
)) {
656 set_current_state(TASK_INTERRUPTIBLE
);
662 __set_current_state(TASK_RUNNING
);
663 remove_wait_queue(&vcpu
->wq
, &wait
);
666 void kvm_resched(struct kvm_vcpu
*vcpu
)
672 EXPORT_SYMBOL_GPL(kvm_resched
);
674 static int kvm_vcpu_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
676 struct kvm_vcpu
*vcpu
= vma
->vm_file
->private_data
;
680 page
= virt_to_page(vcpu
->run
);
681 else if (vmf
->pgoff
== KVM_PIO_PAGE_OFFSET
)
682 page
= virt_to_page(vcpu
->arch
.pio_data
);
684 return VM_FAULT_SIGBUS
;
690 static struct vm_operations_struct kvm_vcpu_vm_ops
= {
691 .fault
= kvm_vcpu_fault
,
694 static int kvm_vcpu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
696 vma
->vm_ops
= &kvm_vcpu_vm_ops
;
700 static int kvm_vcpu_release(struct inode
*inode
, struct file
*filp
)
702 struct kvm_vcpu
*vcpu
= filp
->private_data
;
704 fput(vcpu
->kvm
->filp
);
708 static struct file_operations kvm_vcpu_fops
= {
709 .release
= kvm_vcpu_release
,
710 .unlocked_ioctl
= kvm_vcpu_ioctl
,
711 .compat_ioctl
= kvm_vcpu_ioctl
,
712 .mmap
= kvm_vcpu_mmap
,
716 * Allocates an inode for the vcpu.
718 static int create_vcpu_fd(struct kvm_vcpu
*vcpu
)
724 r
= anon_inode_getfd(&fd
, &inode
, &file
,
725 "kvm-vcpu", &kvm_vcpu_fops
, vcpu
);
728 atomic_inc(&vcpu
->kvm
->filp
->f_count
);
733 * Creates some virtual cpus. Good luck creating more than one.
735 static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm
, int n
)
738 struct kvm_vcpu
*vcpu
;
743 vcpu
= kvm_arch_vcpu_create(kvm
, n
);
745 return PTR_ERR(vcpu
);
747 preempt_notifier_init(&vcpu
->preempt_notifier
, &kvm_preempt_ops
);
749 r
= kvm_arch_vcpu_setup(vcpu
);
753 mutex_lock(&kvm
->lock
);
756 mutex_unlock(&kvm
->lock
);
759 kvm
->vcpus
[n
] = vcpu
;
760 mutex_unlock(&kvm
->lock
);
762 /* Now it's all set up, let userspace reach it */
763 r
= create_vcpu_fd(vcpu
);
769 mutex_lock(&kvm
->lock
);
770 kvm
->vcpus
[n
] = NULL
;
771 mutex_unlock(&kvm
->lock
);
773 kvm_arch_vcpu_destroy(vcpu
);
777 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu
*vcpu
, sigset_t
*sigset
)
780 sigdelsetmask(sigset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
781 vcpu
->sigset_active
= 1;
782 vcpu
->sigset
= *sigset
;
784 vcpu
->sigset_active
= 0;
788 static long kvm_vcpu_ioctl(struct file
*filp
,
789 unsigned int ioctl
, unsigned long arg
)
791 struct kvm_vcpu
*vcpu
= filp
->private_data
;
792 void __user
*argp
= (void __user
*)arg
;
795 if (vcpu
->kvm
->mm
!= current
->mm
)
802 r
= kvm_arch_vcpu_ioctl_run(vcpu
, vcpu
->run
);
805 struct kvm_regs kvm_regs
;
807 memset(&kvm_regs
, 0, sizeof kvm_regs
);
808 r
= kvm_arch_vcpu_ioctl_get_regs(vcpu
, &kvm_regs
);
812 if (copy_to_user(argp
, &kvm_regs
, sizeof kvm_regs
))
818 struct kvm_regs kvm_regs
;
821 if (copy_from_user(&kvm_regs
, argp
, sizeof kvm_regs
))
823 r
= kvm_arch_vcpu_ioctl_set_regs(vcpu
, &kvm_regs
);
829 case KVM_GET_SREGS
: {
830 struct kvm_sregs kvm_sregs
;
832 memset(&kvm_sregs
, 0, sizeof kvm_sregs
);
833 r
= kvm_arch_vcpu_ioctl_get_sregs(vcpu
, &kvm_sregs
);
837 if (copy_to_user(argp
, &kvm_sregs
, sizeof kvm_sregs
))
842 case KVM_SET_SREGS
: {
843 struct kvm_sregs kvm_sregs
;
846 if (copy_from_user(&kvm_sregs
, argp
, sizeof kvm_sregs
))
848 r
= kvm_arch_vcpu_ioctl_set_sregs(vcpu
, &kvm_sregs
);
854 case KVM_TRANSLATE
: {
855 struct kvm_translation tr
;
858 if (copy_from_user(&tr
, argp
, sizeof tr
))
860 r
= kvm_arch_vcpu_ioctl_translate(vcpu
, &tr
);
864 if (copy_to_user(argp
, &tr
, sizeof tr
))
869 case KVM_DEBUG_GUEST
: {
870 struct kvm_debug_guest dbg
;
873 if (copy_from_user(&dbg
, argp
, sizeof dbg
))
875 r
= kvm_arch_vcpu_ioctl_debug_guest(vcpu
, &dbg
);
881 case KVM_SET_SIGNAL_MASK
: {
882 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
883 struct kvm_signal_mask kvm_sigmask
;
889 if (copy_from_user(&kvm_sigmask
, argp
,
893 if (kvm_sigmask
.len
!= sizeof sigset
)
896 if (copy_from_user(&sigset
, sigmask_arg
->sigset
,
901 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, &sigset
);
907 memset(&fpu
, 0, sizeof fpu
);
908 r
= kvm_arch_vcpu_ioctl_get_fpu(vcpu
, &fpu
);
912 if (copy_to_user(argp
, &fpu
, sizeof fpu
))
921 if (copy_from_user(&fpu
, argp
, sizeof fpu
))
923 r
= kvm_arch_vcpu_ioctl_set_fpu(vcpu
, &fpu
);
930 r
= kvm_arch_vcpu_ioctl(filp
, ioctl
, arg
);
936 static long kvm_vm_ioctl(struct file
*filp
,
937 unsigned int ioctl
, unsigned long arg
)
939 struct kvm
*kvm
= filp
->private_data
;
940 void __user
*argp
= (void __user
*)arg
;
943 if (kvm
->mm
!= current
->mm
)
946 case KVM_CREATE_VCPU
:
947 r
= kvm_vm_ioctl_create_vcpu(kvm
, arg
);
951 case KVM_SET_USER_MEMORY_REGION
: {
952 struct kvm_userspace_memory_region kvm_userspace_mem
;
955 if (copy_from_user(&kvm_userspace_mem
, argp
,
956 sizeof kvm_userspace_mem
))
959 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 1);
964 case KVM_GET_DIRTY_LOG
: {
965 struct kvm_dirty_log log
;
968 if (copy_from_user(&log
, argp
, sizeof log
))
970 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
976 r
= kvm_arch_vm_ioctl(filp
, ioctl
, arg
);
982 static int kvm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
984 struct kvm
*kvm
= vma
->vm_file
->private_data
;
987 if (!kvm_is_visible_gfn(kvm
, vmf
->pgoff
))
988 return VM_FAULT_SIGBUS
;
989 page
= gfn_to_page(kvm
, vmf
->pgoff
);
990 if (is_error_page(page
)) {
991 kvm_release_page_clean(page
);
992 return VM_FAULT_SIGBUS
;
998 static struct vm_operations_struct kvm_vm_vm_ops
= {
999 .fault
= kvm_vm_fault
,
1002 static int kvm_vm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1004 vma
->vm_ops
= &kvm_vm_vm_ops
;
1008 static struct file_operations kvm_vm_fops
= {
1009 .release
= kvm_vm_release
,
1010 .unlocked_ioctl
= kvm_vm_ioctl
,
1011 .compat_ioctl
= kvm_vm_ioctl
,
1012 .mmap
= kvm_vm_mmap
,
1015 static int kvm_dev_ioctl_create_vm(void)
1018 struct inode
*inode
;
1022 kvm
= kvm_create_vm();
1024 return PTR_ERR(kvm
);
1025 r
= anon_inode_getfd(&fd
, &inode
, &file
, "kvm-vm", &kvm_vm_fops
, kvm
);
1027 kvm_destroy_vm(kvm
);
1036 static long kvm_dev_ioctl(struct file
*filp
,
1037 unsigned int ioctl
, unsigned long arg
)
1039 void __user
*argp
= (void __user
*)arg
;
1043 case KVM_GET_API_VERSION
:
1047 r
= KVM_API_VERSION
;
1053 r
= kvm_dev_ioctl_create_vm();
1055 case KVM_CHECK_EXTENSION
:
1056 r
= kvm_dev_ioctl_check_extension((long)argp
);
1058 case KVM_GET_VCPU_MMAP_SIZE
:
1065 return kvm_arch_dev_ioctl(filp
, ioctl
, arg
);
1071 static struct file_operations kvm_chardev_ops
= {
1072 .unlocked_ioctl
= kvm_dev_ioctl
,
1073 .compat_ioctl
= kvm_dev_ioctl
,
1076 static struct miscdevice kvm_dev
= {
1082 static void hardware_enable(void *junk
)
1084 int cpu
= raw_smp_processor_id();
1086 if (cpu_isset(cpu
, cpus_hardware_enabled
))
1088 cpu_set(cpu
, cpus_hardware_enabled
);
1089 kvm_arch_hardware_enable(NULL
);
1092 static void hardware_disable(void *junk
)
1094 int cpu
= raw_smp_processor_id();
1096 if (!cpu_isset(cpu
, cpus_hardware_enabled
))
1098 cpu_clear(cpu
, cpus_hardware_enabled
);
1099 decache_vcpus_on_cpu(cpu
);
1100 kvm_arch_hardware_disable(NULL
);
1103 static int kvm_cpu_hotplug(struct notifier_block
*notifier
, unsigned long val
,
1108 val
&= ~CPU_TASKS_FROZEN
;
1111 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
1113 hardware_disable(NULL
);
1115 case CPU_UP_CANCELED
:
1116 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
1118 smp_call_function_single(cpu
, hardware_disable
, NULL
, 0, 1);
1121 printk(KERN_INFO
"kvm: enabling virtualization on CPU%d\n",
1123 smp_call_function_single(cpu
, hardware_enable
, NULL
, 0, 1);
1129 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
1132 if (val
== SYS_RESTART
) {
1134 * Some (well, at least mine) BIOSes hang on reboot if
1137 printk(KERN_INFO
"kvm: exiting hardware virtualization\n");
1138 on_each_cpu(hardware_disable
, NULL
, 0, 1);
1143 static struct notifier_block kvm_reboot_notifier
= {
1144 .notifier_call
= kvm_reboot
,
1148 void kvm_io_bus_init(struct kvm_io_bus
*bus
)
1150 memset(bus
, 0, sizeof(*bus
));
1153 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
)
1157 for (i
= 0; i
< bus
->dev_count
; i
++) {
1158 struct kvm_io_device
*pos
= bus
->devs
[i
];
1160 kvm_iodevice_destructor(pos
);
1164 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
, gpa_t addr
)
1168 for (i
= 0; i
< bus
->dev_count
; i
++) {
1169 struct kvm_io_device
*pos
= bus
->devs
[i
];
1171 if (pos
->in_range(pos
, addr
))
1178 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
, struct kvm_io_device
*dev
)
1180 BUG_ON(bus
->dev_count
> (NR_IOBUS_DEVS
-1));
1182 bus
->devs
[bus
->dev_count
++] = dev
;
1185 static struct notifier_block kvm_cpu_notifier
= {
1186 .notifier_call
= kvm_cpu_hotplug
,
1187 .priority
= 20, /* must be > scheduler priority */
1190 static int vm_stat_get(void *_offset
, u64
*val
)
1192 unsigned offset
= (long)_offset
;
1196 spin_lock(&kvm_lock
);
1197 list_for_each_entry(kvm
, &vm_list
, vm_list
)
1198 *val
+= *(u32
*)((void *)kvm
+ offset
);
1199 spin_unlock(&kvm_lock
);
1203 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops
, vm_stat_get
, NULL
, "%llu\n");
1205 static int vcpu_stat_get(void *_offset
, u64
*val
)
1207 unsigned offset
= (long)_offset
;
1209 struct kvm_vcpu
*vcpu
;
1213 spin_lock(&kvm_lock
);
1214 list_for_each_entry(kvm
, &vm_list
, vm_list
)
1215 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
1216 vcpu
= kvm
->vcpus
[i
];
1218 *val
+= *(u32
*)((void *)vcpu
+ offset
);
1220 spin_unlock(&kvm_lock
);
1224 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops
, vcpu_stat_get
, NULL
, "%llu\n");
1226 static struct file_operations
*stat_fops
[] = {
1227 [KVM_STAT_VCPU
] = &vcpu_stat_fops
,
1228 [KVM_STAT_VM
] = &vm_stat_fops
,
1231 static void kvm_init_debug(void)
1233 struct kvm_stats_debugfs_item
*p
;
1235 debugfs_dir
= debugfs_create_dir("kvm", NULL
);
1236 for (p
= debugfs_entries
; p
->name
; ++p
)
1237 p
->dentry
= debugfs_create_file(p
->name
, 0444, debugfs_dir
,
1238 (void *)(long)p
->offset
,
1239 stat_fops
[p
->kind
]);
1242 static void kvm_exit_debug(void)
1244 struct kvm_stats_debugfs_item
*p
;
1246 for (p
= debugfs_entries
; p
->name
; ++p
)
1247 debugfs_remove(p
->dentry
);
1248 debugfs_remove(debugfs_dir
);
1251 static int kvm_suspend(struct sys_device
*dev
, pm_message_t state
)
1253 hardware_disable(NULL
);
1257 static int kvm_resume(struct sys_device
*dev
)
1259 hardware_enable(NULL
);
1263 static struct sysdev_class kvm_sysdev_class
= {
1265 .suspend
= kvm_suspend
,
1266 .resume
= kvm_resume
,
1269 static struct sys_device kvm_sysdev
= {
1271 .cls
= &kvm_sysdev_class
,
1274 struct page
*bad_page
;
1277 struct kvm_vcpu
*preempt_notifier_to_vcpu(struct preempt_notifier
*pn
)
1279 return container_of(pn
, struct kvm_vcpu
, preempt_notifier
);
1282 static void kvm_sched_in(struct preempt_notifier
*pn
, int cpu
)
1284 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
1286 kvm_arch_vcpu_load(vcpu
, cpu
);
1289 static void kvm_sched_out(struct preempt_notifier
*pn
,
1290 struct task_struct
*next
)
1292 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
1294 kvm_arch_vcpu_put(vcpu
);
1297 int kvm_init(void *opaque
, unsigned int vcpu_size
,
1298 struct module
*module
)
1305 r
= kvm_arch_init(opaque
);
1309 bad_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
1311 if (bad_page
== NULL
) {
1316 r
= kvm_arch_hardware_setup();
1320 for_each_online_cpu(cpu
) {
1321 smp_call_function_single(cpu
,
1322 kvm_arch_check_processor_compat
,
1328 on_each_cpu(hardware_enable
, NULL
, 0, 1);
1329 r
= register_cpu_notifier(&kvm_cpu_notifier
);
1332 register_reboot_notifier(&kvm_reboot_notifier
);
1334 r
= sysdev_class_register(&kvm_sysdev_class
);
1338 r
= sysdev_register(&kvm_sysdev
);
1342 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1343 kvm_vcpu_cache
= kmem_cache_create("kvm_vcpu", vcpu_size
,
1344 __alignof__(struct kvm_vcpu
),
1346 if (!kvm_vcpu_cache
) {
1351 kvm_chardev_ops
.owner
= module
;
1353 r
= misc_register(&kvm_dev
);
1355 printk(KERN_ERR
"kvm: misc device register failed\n");
1359 kvm_preempt_ops
.sched_in
= kvm_sched_in
;
1360 kvm_preempt_ops
.sched_out
= kvm_sched_out
;
1365 kmem_cache_destroy(kvm_vcpu_cache
);
1367 sysdev_unregister(&kvm_sysdev
);
1369 sysdev_class_unregister(&kvm_sysdev_class
);
1371 unregister_reboot_notifier(&kvm_reboot_notifier
);
1372 unregister_cpu_notifier(&kvm_cpu_notifier
);
1374 on_each_cpu(hardware_disable
, NULL
, 0, 1);
1376 kvm_arch_hardware_unsetup();
1378 __free_page(bad_page
);
1385 EXPORT_SYMBOL_GPL(kvm_init
);
1389 misc_deregister(&kvm_dev
);
1390 kmem_cache_destroy(kvm_vcpu_cache
);
1391 sysdev_unregister(&kvm_sysdev
);
1392 sysdev_class_unregister(&kvm_sysdev_class
);
1393 unregister_reboot_notifier(&kvm_reboot_notifier
);
1394 unregister_cpu_notifier(&kvm_cpu_notifier
);
1395 on_each_cpu(hardware_disable
, NULL
, 0, 1);
1396 kvm_arch_hardware_unsetup();
1399 __free_page(bad_page
);
1401 EXPORT_SYMBOL_GPL(kvm_exit
);