2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
45 #include <asm/processor.h>
47 #include <asm/uaccess.h>
48 #include <asm/pgtable.h>
50 MODULE_AUTHOR("Qumranet");
51 MODULE_LICENSE("GPL");
53 DEFINE_SPINLOCK(kvm_lock
);
56 static cpumask_t cpus_hardware_enabled
;
58 struct kmem_cache
*kvm_vcpu_cache
;
59 EXPORT_SYMBOL_GPL(kvm_vcpu_cache
);
61 static __read_mostly
struct preempt_ops kvm_preempt_ops
;
63 struct dentry
*kvm_debugfs_dir
;
65 static long kvm_vcpu_ioctl(struct file
*file
, unsigned int ioctl
,
70 static inline int valid_vcpu(int n
)
72 return likely(n
>= 0 && n
< KVM_MAX_VCPUS
);
76 * Switches to specified vcpu, until a matching vcpu_put()
78 void vcpu_load(struct kvm_vcpu
*vcpu
)
82 mutex_lock(&vcpu
->mutex
);
84 preempt_notifier_register(&vcpu
->preempt_notifier
);
85 kvm_arch_vcpu_load(vcpu
, cpu
);
89 void vcpu_put(struct kvm_vcpu
*vcpu
)
92 kvm_arch_vcpu_put(vcpu
);
93 preempt_notifier_unregister(&vcpu
->preempt_notifier
);
95 mutex_unlock(&vcpu
->mutex
);
98 static void ack_flush(void *_completed
)
102 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
106 struct kvm_vcpu
*vcpu
;
109 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
110 vcpu
= kvm
->vcpus
[i
];
113 if (test_and_set_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
))
116 if (cpu
!= -1 && cpu
!= raw_smp_processor_id())
119 if (cpus_empty(cpus
))
121 ++kvm
->stat
.remote_tlb_flush
;
122 smp_call_function_mask(cpus
, ack_flush
, NULL
, 1);
125 void kvm_reload_remote_mmus(struct kvm
*kvm
)
129 struct kvm_vcpu
*vcpu
;
132 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
133 vcpu
= kvm
->vcpus
[i
];
136 if (test_and_set_bit(KVM_REQ_MMU_RELOAD
, &vcpu
->requests
))
139 if (cpu
!= -1 && cpu
!= raw_smp_processor_id())
142 if (cpus_empty(cpus
))
144 smp_call_function_mask(cpus
, ack_flush
, NULL
, 1);
148 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
)
153 mutex_init(&vcpu
->mutex
);
157 init_waitqueue_head(&vcpu
->wq
);
159 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
164 vcpu
->run
= page_address(page
);
166 r
= kvm_arch_vcpu_init(vcpu
);
172 free_page((unsigned long)vcpu
->run
);
176 EXPORT_SYMBOL_GPL(kvm_vcpu_init
);
178 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
)
180 kvm_arch_vcpu_uninit(vcpu
);
181 free_page((unsigned long)vcpu
->run
);
183 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit
);
185 static struct kvm
*kvm_create_vm(void)
187 struct kvm
*kvm
= kvm_arch_create_vm();
192 kvm
->mm
= current
->mm
;
193 atomic_inc(&kvm
->mm
->mm_count
);
194 spin_lock_init(&kvm
->mmu_lock
);
195 kvm_io_bus_init(&kvm
->pio_bus
);
196 mutex_init(&kvm
->lock
);
197 kvm_io_bus_init(&kvm
->mmio_bus
);
198 init_rwsem(&kvm
->slots_lock
);
199 atomic_set(&kvm
->users_count
, 1);
200 spin_lock(&kvm_lock
);
201 list_add(&kvm
->vm_list
, &vm_list
);
202 spin_unlock(&kvm_lock
);
208 * Free any memory in @free but not in @dont.
210 static void kvm_free_physmem_slot(struct kvm_memory_slot
*free
,
211 struct kvm_memory_slot
*dont
)
213 if (!dont
|| free
->rmap
!= dont
->rmap
)
216 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
217 vfree(free
->dirty_bitmap
);
219 if (!dont
|| free
->lpage_info
!= dont
->lpage_info
)
220 vfree(free
->lpage_info
);
223 free
->dirty_bitmap
= NULL
;
225 free
->lpage_info
= NULL
;
228 void kvm_free_physmem(struct kvm
*kvm
)
232 for (i
= 0; i
< kvm
->nmemslots
; ++i
)
233 kvm_free_physmem_slot(&kvm
->memslots
[i
], NULL
);
236 static void kvm_destroy_vm(struct kvm
*kvm
)
238 struct mm_struct
*mm
= kvm
->mm
;
240 spin_lock(&kvm_lock
);
241 list_del(&kvm
->vm_list
);
242 spin_unlock(&kvm_lock
);
243 kvm_io_bus_destroy(&kvm
->pio_bus
);
244 kvm_io_bus_destroy(&kvm
->mmio_bus
);
245 kvm_arch_destroy_vm(kvm
);
249 void kvm_get_kvm(struct kvm
*kvm
)
251 atomic_inc(&kvm
->users_count
);
253 EXPORT_SYMBOL_GPL(kvm_get_kvm
);
255 void kvm_put_kvm(struct kvm
*kvm
)
257 if (atomic_dec_and_test(&kvm
->users_count
))
260 EXPORT_SYMBOL_GPL(kvm_put_kvm
);
263 static int kvm_vm_release(struct inode
*inode
, struct file
*filp
)
265 struct kvm
*kvm
= filp
->private_data
;
272 * Allocate some memory and give it an address in the guest physical address
275 * Discontiguous memory is allowed, mostly for framebuffers.
277 * Must be called holding mmap_sem for write.
279 int __kvm_set_memory_region(struct kvm
*kvm
,
280 struct kvm_userspace_memory_region
*mem
,
285 unsigned long npages
;
287 struct kvm_memory_slot
*memslot
;
288 struct kvm_memory_slot old
, new;
291 /* General sanity checks */
292 if (mem
->memory_size
& (PAGE_SIZE
- 1))
294 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
296 if (mem
->slot
>= KVM_MEMORY_SLOTS
+ KVM_PRIVATE_MEM_SLOTS
)
298 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
301 memslot
= &kvm
->memslots
[mem
->slot
];
302 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
303 npages
= mem
->memory_size
>> PAGE_SHIFT
;
306 mem
->flags
&= ~KVM_MEM_LOG_DIRTY_PAGES
;
308 new = old
= *memslot
;
310 new.base_gfn
= base_gfn
;
312 new.flags
= mem
->flags
;
314 /* Disallow changing a memory slot's size. */
316 if (npages
&& old
.npages
&& npages
!= old
.npages
)
319 /* Check for overlaps */
321 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
322 struct kvm_memory_slot
*s
= &kvm
->memslots
[i
];
326 if (!((base_gfn
+ npages
<= s
->base_gfn
) ||
327 (base_gfn
>= s
->base_gfn
+ s
->npages
)))
331 /* Free page dirty bitmap if unneeded */
332 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
333 new.dirty_bitmap
= NULL
;
337 /* Allocate if a slot is being created */
338 if (npages
&& !new.rmap
) {
339 new.rmap
= vmalloc(npages
* sizeof(struct page
*));
344 memset(new.rmap
, 0, npages
* sizeof(*new.rmap
));
346 new.user_alloc
= user_alloc
;
347 new.userspace_addr
= mem
->userspace_addr
;
349 if (npages
&& !new.lpage_info
) {
350 int largepages
= npages
/ KVM_PAGES_PER_HPAGE
;
351 if (npages
% KVM_PAGES_PER_HPAGE
)
353 if (base_gfn
% KVM_PAGES_PER_HPAGE
)
356 new.lpage_info
= vmalloc(largepages
* sizeof(*new.lpage_info
));
361 memset(new.lpage_info
, 0, largepages
* sizeof(*new.lpage_info
));
363 if (base_gfn
% KVM_PAGES_PER_HPAGE
)
364 new.lpage_info
[0].write_count
= 1;
365 if ((base_gfn
+npages
) % KVM_PAGES_PER_HPAGE
)
366 new.lpage_info
[largepages
-1].write_count
= 1;
369 /* Allocate page dirty bitmap if needed */
370 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
371 unsigned dirty_bytes
= ALIGN(npages
, BITS_PER_LONG
) / 8;
373 new.dirty_bitmap
= vmalloc(dirty_bytes
);
374 if (!new.dirty_bitmap
)
376 memset(new.dirty_bitmap
, 0, dirty_bytes
);
379 if (mem
->slot
>= kvm
->nmemslots
)
380 kvm
->nmemslots
= mem
->slot
+ 1;
384 r
= kvm_arch_set_memory_region(kvm
, mem
, old
, user_alloc
);
390 kvm_free_physmem_slot(&old
, &new);
394 kvm_free_physmem_slot(&new, &old
);
399 EXPORT_SYMBOL_GPL(__kvm_set_memory_region
);
401 int kvm_set_memory_region(struct kvm
*kvm
,
402 struct kvm_userspace_memory_region
*mem
,
407 down_write(&kvm
->slots_lock
);
408 r
= __kvm_set_memory_region(kvm
, mem
, user_alloc
);
409 up_write(&kvm
->slots_lock
);
412 EXPORT_SYMBOL_GPL(kvm_set_memory_region
);
414 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
416 kvm_userspace_memory_region
*mem
,
419 if (mem
->slot
>= KVM_MEMORY_SLOTS
)
421 return kvm_set_memory_region(kvm
, mem
, user_alloc
);
424 int kvm_get_dirty_log(struct kvm
*kvm
,
425 struct kvm_dirty_log
*log
, int *is_dirty
)
427 struct kvm_memory_slot
*memslot
;
430 unsigned long any
= 0;
433 if (log
->slot
>= KVM_MEMORY_SLOTS
)
436 memslot
= &kvm
->memslots
[log
->slot
];
438 if (!memslot
->dirty_bitmap
)
441 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
443 for (i
= 0; !any
&& i
< n
/sizeof(long); ++i
)
444 any
= memslot
->dirty_bitmap
[i
];
447 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
458 int is_error_page(struct page
*page
)
460 return page
== bad_page
;
462 EXPORT_SYMBOL_GPL(is_error_page
);
464 int is_error_pfn(pfn_t pfn
)
466 return pfn
== bad_pfn
;
468 EXPORT_SYMBOL_GPL(is_error_pfn
);
470 static inline unsigned long bad_hva(void)
475 int kvm_is_error_hva(unsigned long addr
)
477 return addr
== bad_hva();
479 EXPORT_SYMBOL_GPL(kvm_is_error_hva
);
481 static struct kvm_memory_slot
*__gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
485 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
486 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
488 if (gfn
>= memslot
->base_gfn
489 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
495 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
497 gfn
= unalias_gfn(kvm
, gfn
);
498 return __gfn_to_memslot(kvm
, gfn
);
501 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
)
505 gfn
= unalias_gfn(kvm
, gfn
);
506 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
507 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
509 if (gfn
>= memslot
->base_gfn
510 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
515 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn
);
517 unsigned long gfn_to_hva(struct kvm
*kvm
, gfn_t gfn
)
519 struct kvm_memory_slot
*slot
;
521 gfn
= unalias_gfn(kvm
, gfn
);
522 slot
= __gfn_to_memslot(kvm
, gfn
);
525 return (slot
->userspace_addr
+ (gfn
- slot
->base_gfn
) * PAGE_SIZE
);
527 EXPORT_SYMBOL_GPL(gfn_to_hva
);
530 * Requires current->mm->mmap_sem to be held
532 pfn_t
gfn_to_pfn(struct kvm
*kvm
, gfn_t gfn
)
534 struct page
*page
[1];
541 addr
= gfn_to_hva(kvm
, gfn
);
542 if (kvm_is_error_hva(addr
)) {
544 return page_to_pfn(bad_page
);
547 npages
= get_user_pages(current
, current
->mm
, addr
, 1, 1, 1, page
,
550 if (unlikely(npages
!= 1)) {
551 struct vm_area_struct
*vma
;
553 vma
= find_vma(current
->mm
, addr
);
554 if (vma
== NULL
|| addr
< vma
->vm_start
||
555 !(vma
->vm_flags
& VM_PFNMAP
)) {
557 return page_to_pfn(bad_page
);
560 pfn
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
561 BUG_ON(pfn_valid(pfn
));
563 pfn
= page_to_pfn(page
[0]);
568 EXPORT_SYMBOL_GPL(gfn_to_pfn
);
570 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
574 pfn
= gfn_to_pfn(kvm
, gfn
);
576 return pfn_to_page(pfn
);
578 WARN_ON(!pfn_valid(pfn
));
584 EXPORT_SYMBOL_GPL(gfn_to_page
);
586 void kvm_release_page_clean(struct page
*page
)
588 kvm_release_pfn_clean(page_to_pfn(page
));
590 EXPORT_SYMBOL_GPL(kvm_release_page_clean
);
592 void kvm_release_pfn_clean(pfn_t pfn
)
595 put_page(pfn_to_page(pfn
));
597 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean
);
599 void kvm_release_page_dirty(struct page
*page
)
601 kvm_release_pfn_dirty(page_to_pfn(page
));
603 EXPORT_SYMBOL_GPL(kvm_release_page_dirty
);
605 void kvm_release_pfn_dirty(pfn_t pfn
)
607 kvm_set_pfn_dirty(pfn
);
608 kvm_release_pfn_clean(pfn
);
610 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty
);
612 void kvm_set_page_dirty(struct page
*page
)
614 kvm_set_pfn_dirty(page_to_pfn(page
));
616 EXPORT_SYMBOL_GPL(kvm_set_page_dirty
);
618 void kvm_set_pfn_dirty(pfn_t pfn
)
620 if (pfn_valid(pfn
)) {
621 struct page
*page
= pfn_to_page(pfn
);
622 if (!PageReserved(page
))
626 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty
);
628 void kvm_set_pfn_accessed(pfn_t pfn
)
631 mark_page_accessed(pfn_to_page(pfn
));
633 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed
);
635 void kvm_get_pfn(pfn_t pfn
)
638 get_page(pfn_to_page(pfn
));
640 EXPORT_SYMBOL_GPL(kvm_get_pfn
);
642 static int next_segment(unsigned long len
, int offset
)
644 if (len
> PAGE_SIZE
- offset
)
645 return PAGE_SIZE
- offset
;
650 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
656 addr
= gfn_to_hva(kvm
, gfn
);
657 if (kvm_is_error_hva(addr
))
659 r
= copy_from_user(data
, (void __user
*)addr
+ offset
, len
);
664 EXPORT_SYMBOL_GPL(kvm_read_guest_page
);
666 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
)
668 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
670 int offset
= offset_in_page(gpa
);
673 while ((seg
= next_segment(len
, offset
)) != 0) {
674 ret
= kvm_read_guest_page(kvm
, gfn
, data
, offset
, seg
);
684 EXPORT_SYMBOL_GPL(kvm_read_guest
);
686 int kvm_read_guest_atomic(struct kvm
*kvm
, gpa_t gpa
, void *data
,
691 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
692 int offset
= offset_in_page(gpa
);
694 addr
= gfn_to_hva(kvm
, gfn
);
695 if (kvm_is_error_hva(addr
))
698 r
= __copy_from_user_inatomic(data
, (void __user
*)addr
+ offset
, len
);
704 EXPORT_SYMBOL(kvm_read_guest_atomic
);
706 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
712 addr
= gfn_to_hva(kvm
, gfn
);
713 if (kvm_is_error_hva(addr
))
715 r
= copy_to_user((void __user
*)addr
+ offset
, data
, len
);
718 mark_page_dirty(kvm
, gfn
);
721 EXPORT_SYMBOL_GPL(kvm_write_guest_page
);
723 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
726 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
728 int offset
= offset_in_page(gpa
);
731 while ((seg
= next_segment(len
, offset
)) != 0) {
732 ret
= kvm_write_guest_page(kvm
, gfn
, data
, offset
, seg
);
743 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
)
745 return kvm_write_guest_page(kvm
, gfn
, empty_zero_page
, offset
, len
);
747 EXPORT_SYMBOL_GPL(kvm_clear_guest_page
);
749 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
)
751 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
753 int offset
= offset_in_page(gpa
);
756 while ((seg
= next_segment(len
, offset
)) != 0) {
757 ret
= kvm_clear_guest_page(kvm
, gfn
, offset
, seg
);
766 EXPORT_SYMBOL_GPL(kvm_clear_guest
);
768 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
770 struct kvm_memory_slot
*memslot
;
772 gfn
= unalias_gfn(kvm
, gfn
);
773 memslot
= __gfn_to_memslot(kvm
, gfn
);
774 if (memslot
&& memslot
->dirty_bitmap
) {
775 unsigned long rel_gfn
= gfn
- memslot
->base_gfn
;
778 if (!test_bit(rel_gfn
, memslot
->dirty_bitmap
))
779 set_bit(rel_gfn
, memslot
->dirty_bitmap
);
784 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
786 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
)
791 prepare_to_wait(&vcpu
->wq
, &wait
, TASK_INTERRUPTIBLE
);
793 if (kvm_cpu_has_interrupt(vcpu
))
795 if (kvm_cpu_has_pending_timer(vcpu
))
797 if (kvm_arch_vcpu_runnable(vcpu
))
799 if (signal_pending(current
))
807 finish_wait(&vcpu
->wq
, &wait
);
810 void kvm_resched(struct kvm_vcpu
*vcpu
)
816 EXPORT_SYMBOL_GPL(kvm_resched
);
818 static int kvm_vcpu_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
820 struct kvm_vcpu
*vcpu
= vma
->vm_file
->private_data
;
824 page
= virt_to_page(vcpu
->run
);
826 else if (vmf
->pgoff
== KVM_PIO_PAGE_OFFSET
)
827 page
= virt_to_page(vcpu
->arch
.pio_data
);
830 return VM_FAULT_SIGBUS
;
836 static struct vm_operations_struct kvm_vcpu_vm_ops
= {
837 .fault
= kvm_vcpu_fault
,
840 static int kvm_vcpu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
842 vma
->vm_ops
= &kvm_vcpu_vm_ops
;
846 static int kvm_vcpu_release(struct inode
*inode
, struct file
*filp
)
848 struct kvm_vcpu
*vcpu
= filp
->private_data
;
850 kvm_put_kvm(vcpu
->kvm
);
854 static const struct file_operations kvm_vcpu_fops
= {
855 .release
= kvm_vcpu_release
,
856 .unlocked_ioctl
= kvm_vcpu_ioctl
,
857 .compat_ioctl
= kvm_vcpu_ioctl
,
858 .mmap
= kvm_vcpu_mmap
,
862 * Allocates an inode for the vcpu.
864 static int create_vcpu_fd(struct kvm_vcpu
*vcpu
)
866 int fd
= anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops
, vcpu
);
868 kvm_put_kvm(vcpu
->kvm
);
873 * Creates some virtual cpus. Good luck creating more than one.
875 static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm
, int n
)
878 struct kvm_vcpu
*vcpu
;
883 vcpu
= kvm_arch_vcpu_create(kvm
, n
);
885 return PTR_ERR(vcpu
);
887 preempt_notifier_init(&vcpu
->preempt_notifier
, &kvm_preempt_ops
);
889 r
= kvm_arch_vcpu_setup(vcpu
);
893 mutex_lock(&kvm
->lock
);
896 mutex_unlock(&kvm
->lock
);
899 kvm
->vcpus
[n
] = vcpu
;
900 mutex_unlock(&kvm
->lock
);
902 /* Now it's all set up, let userspace reach it */
904 r
= create_vcpu_fd(vcpu
);
910 mutex_lock(&kvm
->lock
);
911 kvm
->vcpus
[n
] = NULL
;
912 mutex_unlock(&kvm
->lock
);
914 kvm_arch_vcpu_destroy(vcpu
);
918 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu
*vcpu
, sigset_t
*sigset
)
921 sigdelsetmask(sigset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
922 vcpu
->sigset_active
= 1;
923 vcpu
->sigset
= *sigset
;
925 vcpu
->sigset_active
= 0;
929 static long kvm_vcpu_ioctl(struct file
*filp
,
930 unsigned int ioctl
, unsigned long arg
)
932 struct kvm_vcpu
*vcpu
= filp
->private_data
;
933 void __user
*argp
= (void __user
*)arg
;
936 if (vcpu
->kvm
->mm
!= current
->mm
)
943 r
= kvm_arch_vcpu_ioctl_run(vcpu
, vcpu
->run
);
946 struct kvm_regs
*kvm_regs
;
949 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
952 r
= kvm_arch_vcpu_ioctl_get_regs(vcpu
, kvm_regs
);
956 if (copy_to_user(argp
, kvm_regs
, sizeof(struct kvm_regs
)))
964 struct kvm_regs
*kvm_regs
;
967 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
971 if (copy_from_user(kvm_regs
, argp
, sizeof(struct kvm_regs
)))
973 r
= kvm_arch_vcpu_ioctl_set_regs(vcpu
, kvm_regs
);
981 case KVM_GET_SREGS
: {
982 struct kvm_sregs kvm_sregs
;
984 memset(&kvm_sregs
, 0, sizeof kvm_sregs
);
985 r
= kvm_arch_vcpu_ioctl_get_sregs(vcpu
, &kvm_sregs
);
989 if (copy_to_user(argp
, &kvm_sregs
, sizeof kvm_sregs
))
994 case KVM_SET_SREGS
: {
995 struct kvm_sregs kvm_sregs
;
998 if (copy_from_user(&kvm_sregs
, argp
, sizeof kvm_sregs
))
1000 r
= kvm_arch_vcpu_ioctl_set_sregs(vcpu
, &kvm_sregs
);
1006 case KVM_GET_MP_STATE
: {
1007 struct kvm_mp_state mp_state
;
1009 r
= kvm_arch_vcpu_ioctl_get_mpstate(vcpu
, &mp_state
);
1013 if (copy_to_user(argp
, &mp_state
, sizeof mp_state
))
1018 case KVM_SET_MP_STATE
: {
1019 struct kvm_mp_state mp_state
;
1022 if (copy_from_user(&mp_state
, argp
, sizeof mp_state
))
1024 r
= kvm_arch_vcpu_ioctl_set_mpstate(vcpu
, &mp_state
);
1030 case KVM_TRANSLATE
: {
1031 struct kvm_translation tr
;
1034 if (copy_from_user(&tr
, argp
, sizeof tr
))
1036 r
= kvm_arch_vcpu_ioctl_translate(vcpu
, &tr
);
1040 if (copy_to_user(argp
, &tr
, sizeof tr
))
1045 case KVM_DEBUG_GUEST
: {
1046 struct kvm_debug_guest dbg
;
1049 if (copy_from_user(&dbg
, argp
, sizeof dbg
))
1051 r
= kvm_arch_vcpu_ioctl_debug_guest(vcpu
, &dbg
);
1057 case KVM_SET_SIGNAL_MASK
: {
1058 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
1059 struct kvm_signal_mask kvm_sigmask
;
1060 sigset_t sigset
, *p
;
1065 if (copy_from_user(&kvm_sigmask
, argp
,
1066 sizeof kvm_sigmask
))
1069 if (kvm_sigmask
.len
!= sizeof sigset
)
1072 if (copy_from_user(&sigset
, sigmask_arg
->sigset
,
1077 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, &sigset
);
1083 memset(&fpu
, 0, sizeof fpu
);
1084 r
= kvm_arch_vcpu_ioctl_get_fpu(vcpu
, &fpu
);
1088 if (copy_to_user(argp
, &fpu
, sizeof fpu
))
1097 if (copy_from_user(&fpu
, argp
, sizeof fpu
))
1099 r
= kvm_arch_vcpu_ioctl_set_fpu(vcpu
, &fpu
);
1106 r
= kvm_arch_vcpu_ioctl(filp
, ioctl
, arg
);
1112 static long kvm_vm_ioctl(struct file
*filp
,
1113 unsigned int ioctl
, unsigned long arg
)
1115 struct kvm
*kvm
= filp
->private_data
;
1116 void __user
*argp
= (void __user
*)arg
;
1119 if (kvm
->mm
!= current
->mm
)
1122 case KVM_CREATE_VCPU
:
1123 r
= kvm_vm_ioctl_create_vcpu(kvm
, arg
);
1127 case KVM_SET_USER_MEMORY_REGION
: {
1128 struct kvm_userspace_memory_region kvm_userspace_mem
;
1131 if (copy_from_user(&kvm_userspace_mem
, argp
,
1132 sizeof kvm_userspace_mem
))
1135 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 1);
1140 case KVM_GET_DIRTY_LOG
: {
1141 struct kvm_dirty_log log
;
1144 if (copy_from_user(&log
, argp
, sizeof log
))
1146 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
1152 r
= kvm_arch_vm_ioctl(filp
, ioctl
, arg
);
1158 static int kvm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1160 struct kvm
*kvm
= vma
->vm_file
->private_data
;
1163 if (!kvm_is_visible_gfn(kvm
, vmf
->pgoff
))
1164 return VM_FAULT_SIGBUS
;
1165 page
= gfn_to_page(kvm
, vmf
->pgoff
);
1166 if (is_error_page(page
)) {
1167 kvm_release_page_clean(page
);
1168 return VM_FAULT_SIGBUS
;
1174 static struct vm_operations_struct kvm_vm_vm_ops
= {
1175 .fault
= kvm_vm_fault
,
1178 static int kvm_vm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1180 vma
->vm_ops
= &kvm_vm_vm_ops
;
1184 static const struct file_operations kvm_vm_fops
= {
1185 .release
= kvm_vm_release
,
1186 .unlocked_ioctl
= kvm_vm_ioctl
,
1187 .compat_ioctl
= kvm_vm_ioctl
,
1188 .mmap
= kvm_vm_mmap
,
1191 static int kvm_dev_ioctl_create_vm(void)
1196 kvm
= kvm_create_vm();
1198 return PTR_ERR(kvm
);
1199 fd
= anon_inode_getfd("kvm-vm", &kvm_vm_fops
, kvm
);
1206 static long kvm_dev_ioctl(struct file
*filp
,
1207 unsigned int ioctl
, unsigned long arg
)
1212 case KVM_GET_API_VERSION
:
1216 r
= KVM_API_VERSION
;
1222 r
= kvm_dev_ioctl_create_vm();
1224 case KVM_CHECK_EXTENSION
:
1225 r
= kvm_dev_ioctl_check_extension(arg
);
1227 case KVM_GET_VCPU_MMAP_SIZE
:
1231 r
= PAGE_SIZE
; /* struct kvm_run */
1233 r
+= PAGE_SIZE
; /* pio data page */
1236 case KVM_TRACE_ENABLE
:
1237 case KVM_TRACE_PAUSE
:
1238 case KVM_TRACE_DISABLE
:
1239 r
= kvm_trace_ioctl(ioctl
, arg
);
1242 return kvm_arch_dev_ioctl(filp
, ioctl
, arg
);
1248 static struct file_operations kvm_chardev_ops
= {
1249 .unlocked_ioctl
= kvm_dev_ioctl
,
1250 .compat_ioctl
= kvm_dev_ioctl
,
1253 static struct miscdevice kvm_dev
= {
1259 static void hardware_enable(void *junk
)
1261 int cpu
= raw_smp_processor_id();
1263 if (cpu_isset(cpu
, cpus_hardware_enabled
))
1265 cpu_set(cpu
, cpus_hardware_enabled
);
1266 kvm_arch_hardware_enable(NULL
);
1269 static void hardware_disable(void *junk
)
1271 int cpu
= raw_smp_processor_id();
1273 if (!cpu_isset(cpu
, cpus_hardware_enabled
))
1275 cpu_clear(cpu
, cpus_hardware_enabled
);
1276 decache_vcpus_on_cpu(cpu
);
1277 kvm_arch_hardware_disable(NULL
);
1280 static int kvm_cpu_hotplug(struct notifier_block
*notifier
, unsigned long val
,
1285 val
&= ~CPU_TASKS_FROZEN
;
1288 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
1290 hardware_disable(NULL
);
1292 case CPU_UP_CANCELED
:
1293 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
1295 smp_call_function_single(cpu
, hardware_disable
, NULL
, 1);
1298 printk(KERN_INFO
"kvm: enabling virtualization on CPU%d\n",
1300 smp_call_function_single(cpu
, hardware_enable
, NULL
, 1);
1307 asmlinkage
void kvm_handle_fault_on_reboot(void)
1310 /* spin while reset goes on */
1313 /* Fault while not rebooting. We want the trace. */
1316 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot
);
1318 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
1321 if (val
== SYS_RESTART
) {
1323 * Some (well, at least mine) BIOSes hang on reboot if
1326 printk(KERN_INFO
"kvm: exiting hardware virtualization\n");
1327 kvm_rebooting
= true;
1328 on_each_cpu(hardware_disable
, NULL
, 1);
1333 static struct notifier_block kvm_reboot_notifier
= {
1334 .notifier_call
= kvm_reboot
,
1338 void kvm_io_bus_init(struct kvm_io_bus
*bus
)
1340 memset(bus
, 0, sizeof(*bus
));
1343 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
)
1347 for (i
= 0; i
< bus
->dev_count
; i
++) {
1348 struct kvm_io_device
*pos
= bus
->devs
[i
];
1350 kvm_iodevice_destructor(pos
);
1354 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
, gpa_t addr
)
1358 for (i
= 0; i
< bus
->dev_count
; i
++) {
1359 struct kvm_io_device
*pos
= bus
->devs
[i
];
1361 if (pos
->in_range(pos
, addr
))
1368 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
, struct kvm_io_device
*dev
)
1370 BUG_ON(bus
->dev_count
> (NR_IOBUS_DEVS
-1));
1372 bus
->devs
[bus
->dev_count
++] = dev
;
1375 static struct notifier_block kvm_cpu_notifier
= {
1376 .notifier_call
= kvm_cpu_hotplug
,
1377 .priority
= 20, /* must be > scheduler priority */
1380 static int vm_stat_get(void *_offset
, u64
*val
)
1382 unsigned offset
= (long)_offset
;
1386 spin_lock(&kvm_lock
);
1387 list_for_each_entry(kvm
, &vm_list
, vm_list
)
1388 *val
+= *(u32
*)((void *)kvm
+ offset
);
1389 spin_unlock(&kvm_lock
);
1393 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops
, vm_stat_get
, NULL
, "%llu\n");
1395 static int vcpu_stat_get(void *_offset
, u64
*val
)
1397 unsigned offset
= (long)_offset
;
1399 struct kvm_vcpu
*vcpu
;
1403 spin_lock(&kvm_lock
);
1404 list_for_each_entry(kvm
, &vm_list
, vm_list
)
1405 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
1406 vcpu
= kvm
->vcpus
[i
];
1408 *val
+= *(u32
*)((void *)vcpu
+ offset
);
1410 spin_unlock(&kvm_lock
);
1414 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops
, vcpu_stat_get
, NULL
, "%llu\n");
1416 static struct file_operations
*stat_fops
[] = {
1417 [KVM_STAT_VCPU
] = &vcpu_stat_fops
,
1418 [KVM_STAT_VM
] = &vm_stat_fops
,
1421 static void kvm_init_debug(void)
1423 struct kvm_stats_debugfs_item
*p
;
1425 kvm_debugfs_dir
= debugfs_create_dir("kvm", NULL
);
1426 for (p
= debugfs_entries
; p
->name
; ++p
)
1427 p
->dentry
= debugfs_create_file(p
->name
, 0444, kvm_debugfs_dir
,
1428 (void *)(long)p
->offset
,
1429 stat_fops
[p
->kind
]);
1432 static void kvm_exit_debug(void)
1434 struct kvm_stats_debugfs_item
*p
;
1436 for (p
= debugfs_entries
; p
->name
; ++p
)
1437 debugfs_remove(p
->dentry
);
1438 debugfs_remove(kvm_debugfs_dir
);
1441 static int kvm_suspend(struct sys_device
*dev
, pm_message_t state
)
1443 hardware_disable(NULL
);
1447 static int kvm_resume(struct sys_device
*dev
)
1449 hardware_enable(NULL
);
1453 static struct sysdev_class kvm_sysdev_class
= {
1455 .suspend
= kvm_suspend
,
1456 .resume
= kvm_resume
,
1459 static struct sys_device kvm_sysdev
= {
1461 .cls
= &kvm_sysdev_class
,
1464 struct page
*bad_page
;
1468 struct kvm_vcpu
*preempt_notifier_to_vcpu(struct preempt_notifier
*pn
)
1470 return container_of(pn
, struct kvm_vcpu
, preempt_notifier
);
1473 static void kvm_sched_in(struct preempt_notifier
*pn
, int cpu
)
1475 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
1477 kvm_arch_vcpu_load(vcpu
, cpu
);
1480 static void kvm_sched_out(struct preempt_notifier
*pn
,
1481 struct task_struct
*next
)
1483 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
1485 kvm_arch_vcpu_put(vcpu
);
1488 int kvm_init(void *opaque
, unsigned int vcpu_size
,
1489 struct module
*module
)
1496 r
= kvm_arch_init(opaque
);
1500 bad_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
1502 if (bad_page
== NULL
) {
1507 bad_pfn
= page_to_pfn(bad_page
);
1509 r
= kvm_arch_hardware_setup();
1513 for_each_online_cpu(cpu
) {
1514 smp_call_function_single(cpu
,
1515 kvm_arch_check_processor_compat
,
1521 on_each_cpu(hardware_enable
, NULL
, 1);
1522 r
= register_cpu_notifier(&kvm_cpu_notifier
);
1525 register_reboot_notifier(&kvm_reboot_notifier
);
1527 r
= sysdev_class_register(&kvm_sysdev_class
);
1531 r
= sysdev_register(&kvm_sysdev
);
1535 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1536 kvm_vcpu_cache
= kmem_cache_create("kvm_vcpu", vcpu_size
,
1537 __alignof__(struct kvm_vcpu
),
1539 if (!kvm_vcpu_cache
) {
1544 kvm_chardev_ops
.owner
= module
;
1546 r
= misc_register(&kvm_dev
);
1548 printk(KERN_ERR
"kvm: misc device register failed\n");
1552 kvm_preempt_ops
.sched_in
= kvm_sched_in
;
1553 kvm_preempt_ops
.sched_out
= kvm_sched_out
;
1558 kmem_cache_destroy(kvm_vcpu_cache
);
1560 sysdev_unregister(&kvm_sysdev
);
1562 sysdev_class_unregister(&kvm_sysdev_class
);
1564 unregister_reboot_notifier(&kvm_reboot_notifier
);
1565 unregister_cpu_notifier(&kvm_cpu_notifier
);
1567 on_each_cpu(hardware_disable
, NULL
, 1);
1569 kvm_arch_hardware_unsetup();
1571 __free_page(bad_page
);
1578 EXPORT_SYMBOL_GPL(kvm_init
);
1582 kvm_trace_cleanup();
1583 misc_deregister(&kvm_dev
);
1584 kmem_cache_destroy(kvm_vcpu_cache
);
1585 sysdev_unregister(&kvm_sysdev
);
1586 sysdev_class_unregister(&kvm_sysdev_class
);
1587 unregister_reboot_notifier(&kvm_reboot_notifier
);
1588 unregister_cpu_notifier(&kvm_cpu_notifier
);
1589 on_each_cpu(hardware_disable
, NULL
, 1);
1590 kvm_arch_hardware_unsetup();
1593 __free_page(bad_page
);
1595 EXPORT_SYMBOL_GPL(kvm_exit
);