2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44 #include <linux/bitops.h>
45 #include <linux/spinlock.h>
47 #include <asm/processor.h>
49 #include <asm/uaccess.h>
50 #include <asm/pgtable.h>
52 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
53 #include "coalesced_mmio.h"
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/kvm.h>
59 MODULE_AUTHOR("Qumranet");
60 MODULE_LICENSE("GPL");
65 * kvm->slots_lock --> kvm->lock --> kvm->irq_lock
68 DEFINE_SPINLOCK(kvm_lock
);
71 static cpumask_var_t cpus_hardware_enabled
;
72 static int kvm_usage_count
= 0;
73 static atomic_t hardware_enable_failed
;
75 struct kmem_cache
*kvm_vcpu_cache
;
76 EXPORT_SYMBOL_GPL(kvm_vcpu_cache
);
78 static __read_mostly
struct preempt_ops kvm_preempt_ops
;
80 struct dentry
*kvm_debugfs_dir
;
82 static long kvm_vcpu_ioctl(struct file
*file
, unsigned int ioctl
,
84 static int hardware_enable_all(void);
85 static void hardware_disable_all(void);
87 static bool kvm_rebooting
;
89 static bool largepages_enabled
= true;
91 inline int kvm_is_mmio_pfn(pfn_t pfn
)
94 struct page
*page
= compound_head(pfn_to_page(pfn
));
95 return PageReserved(page
);
102 * Switches to specified vcpu, until a matching vcpu_put()
104 void vcpu_load(struct kvm_vcpu
*vcpu
)
108 mutex_lock(&vcpu
->mutex
);
110 preempt_notifier_register(&vcpu
->preempt_notifier
);
111 kvm_arch_vcpu_load(vcpu
, cpu
);
115 void vcpu_put(struct kvm_vcpu
*vcpu
)
118 kvm_arch_vcpu_put(vcpu
);
119 preempt_notifier_unregister(&vcpu
->preempt_notifier
);
121 mutex_unlock(&vcpu
->mutex
);
124 static void ack_flush(void *_completed
)
128 static bool make_all_cpus_request(struct kvm
*kvm
, unsigned int req
)
133 struct kvm_vcpu
*vcpu
;
135 zalloc_cpumask_var(&cpus
, GFP_ATOMIC
);
137 spin_lock(&kvm
->requests_lock
);
138 me
= smp_processor_id();
139 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
140 if (test_and_set_bit(req
, &vcpu
->requests
))
143 if (cpus
!= NULL
&& cpu
!= -1 && cpu
!= me
)
144 cpumask_set_cpu(cpu
, cpus
);
146 if (unlikely(cpus
== NULL
))
147 smp_call_function_many(cpu_online_mask
, ack_flush
, NULL
, 1);
148 else if (!cpumask_empty(cpus
))
149 smp_call_function_many(cpus
, ack_flush
, NULL
, 1);
152 spin_unlock(&kvm
->requests_lock
);
153 free_cpumask_var(cpus
);
157 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
159 if (make_all_cpus_request(kvm
, KVM_REQ_TLB_FLUSH
))
160 ++kvm
->stat
.remote_tlb_flush
;
163 void kvm_reload_remote_mmus(struct kvm
*kvm
)
165 make_all_cpus_request(kvm
, KVM_REQ_MMU_RELOAD
);
168 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
)
173 mutex_init(&vcpu
->mutex
);
177 init_waitqueue_head(&vcpu
->wq
);
179 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
184 vcpu
->run
= page_address(page
);
186 r
= kvm_arch_vcpu_init(vcpu
);
192 free_page((unsigned long)vcpu
->run
);
196 EXPORT_SYMBOL_GPL(kvm_vcpu_init
);
198 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
)
200 kvm_arch_vcpu_uninit(vcpu
);
201 free_page((unsigned long)vcpu
->run
);
203 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit
);
205 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
206 static inline struct kvm
*mmu_notifier_to_kvm(struct mmu_notifier
*mn
)
208 return container_of(mn
, struct kvm
, mmu_notifier
);
211 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier
*mn
,
212 struct mm_struct
*mm
,
213 unsigned long address
)
215 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
219 * When ->invalidate_page runs, the linux pte has been zapped
220 * already but the page is still allocated until
221 * ->invalidate_page returns. So if we increase the sequence
222 * here the kvm page fault will notice if the spte can't be
223 * established because the page is going to be freed. If
224 * instead the kvm page fault establishes the spte before
225 * ->invalidate_page runs, kvm_unmap_hva will release it
228 * The sequence increase only need to be seen at spin_unlock
229 * time, and not at spin_lock time.
231 * Increasing the sequence after the spin_unlock would be
232 * unsafe because the kvm page fault could then establish the
233 * pte after kvm_unmap_hva returned, without noticing the page
234 * is going to be freed.
236 spin_lock(&kvm
->mmu_lock
);
237 kvm
->mmu_notifier_seq
++;
238 need_tlb_flush
= kvm_unmap_hva(kvm
, address
);
239 spin_unlock(&kvm
->mmu_lock
);
241 /* we've to flush the tlb before the pages can be freed */
243 kvm_flush_remote_tlbs(kvm
);
247 static void kvm_mmu_notifier_change_pte(struct mmu_notifier
*mn
,
248 struct mm_struct
*mm
,
249 unsigned long address
,
252 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
254 spin_lock(&kvm
->mmu_lock
);
255 kvm
->mmu_notifier_seq
++;
256 kvm_set_spte_hva(kvm
, address
, pte
);
257 spin_unlock(&kvm
->mmu_lock
);
260 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier
*mn
,
261 struct mm_struct
*mm
,
265 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
266 int need_tlb_flush
= 0;
268 spin_lock(&kvm
->mmu_lock
);
270 * The count increase must become visible at unlock time as no
271 * spte can be established without taking the mmu_lock and
272 * count is also read inside the mmu_lock critical section.
274 kvm
->mmu_notifier_count
++;
275 for (; start
< end
; start
+= PAGE_SIZE
)
276 need_tlb_flush
|= kvm_unmap_hva(kvm
, start
);
277 spin_unlock(&kvm
->mmu_lock
);
279 /* we've to flush the tlb before the pages can be freed */
281 kvm_flush_remote_tlbs(kvm
);
284 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier
*mn
,
285 struct mm_struct
*mm
,
289 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
291 spin_lock(&kvm
->mmu_lock
);
293 * This sequence increase will notify the kvm page fault that
294 * the page that is going to be mapped in the spte could have
297 kvm
->mmu_notifier_seq
++;
299 * The above sequence increase must be visible before the
300 * below count decrease but both values are read by the kvm
301 * page fault under mmu_lock spinlock so we don't need to add
302 * a smb_wmb() here in between the two.
304 kvm
->mmu_notifier_count
--;
305 spin_unlock(&kvm
->mmu_lock
);
307 BUG_ON(kvm
->mmu_notifier_count
< 0);
310 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier
*mn
,
311 struct mm_struct
*mm
,
312 unsigned long address
)
314 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
317 spin_lock(&kvm
->mmu_lock
);
318 young
= kvm_age_hva(kvm
, address
);
319 spin_unlock(&kvm
->mmu_lock
);
322 kvm_flush_remote_tlbs(kvm
);
327 static void kvm_mmu_notifier_release(struct mmu_notifier
*mn
,
328 struct mm_struct
*mm
)
330 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
331 kvm_arch_flush_shadow(kvm
);
334 static const struct mmu_notifier_ops kvm_mmu_notifier_ops
= {
335 .invalidate_page
= kvm_mmu_notifier_invalidate_page
,
336 .invalidate_range_start
= kvm_mmu_notifier_invalidate_range_start
,
337 .invalidate_range_end
= kvm_mmu_notifier_invalidate_range_end
,
338 .clear_flush_young
= kvm_mmu_notifier_clear_flush_young
,
339 .change_pte
= kvm_mmu_notifier_change_pte
,
340 .release
= kvm_mmu_notifier_release
,
342 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
344 static struct kvm
*kvm_create_vm(void)
347 struct kvm
*kvm
= kvm_arch_create_vm();
348 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
355 r
= hardware_enable_all();
357 goto out_err_nodisable
;
359 #ifdef CONFIG_HAVE_KVM_IRQCHIP
360 INIT_HLIST_HEAD(&kvm
->mask_notifier_list
);
361 INIT_HLIST_HEAD(&kvm
->irq_ack_notifier_list
);
364 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
365 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
370 kvm
->coalesced_mmio_ring
=
371 (struct kvm_coalesced_mmio_ring
*)page_address(page
);
374 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
376 kvm
->mmu_notifier
.ops
= &kvm_mmu_notifier_ops
;
377 r
= mmu_notifier_register(&kvm
->mmu_notifier
, current
->mm
);
379 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
387 kvm
->mm
= current
->mm
;
388 atomic_inc(&kvm
->mm
->mm_count
);
389 spin_lock_init(&kvm
->mmu_lock
);
390 spin_lock_init(&kvm
->requests_lock
);
391 kvm_io_bus_init(&kvm
->pio_bus
);
392 kvm_eventfd_init(kvm
);
393 mutex_init(&kvm
->lock
);
394 mutex_init(&kvm
->irq_lock
);
395 kvm_io_bus_init(&kvm
->mmio_bus
);
396 init_rwsem(&kvm
->slots_lock
);
397 atomic_set(&kvm
->users_count
, 1);
398 spin_lock(&kvm_lock
);
399 list_add(&kvm
->vm_list
, &vm_list
);
400 spin_unlock(&kvm_lock
);
401 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
402 kvm_coalesced_mmio_init(kvm
);
408 hardware_disable_all();
415 * Free any memory in @free but not in @dont.
417 static void kvm_free_physmem_slot(struct kvm_memory_slot
*free
,
418 struct kvm_memory_slot
*dont
)
422 if (!dont
|| free
->rmap
!= dont
->rmap
)
425 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
426 vfree(free
->dirty_bitmap
);
429 for (i
= 0; i
< KVM_NR_PAGE_SIZES
- 1; ++i
) {
430 if (!dont
|| free
->lpage_info
[i
] != dont
->lpage_info
[i
]) {
431 vfree(free
->lpage_info
[i
]);
432 free
->lpage_info
[i
] = NULL
;
437 free
->dirty_bitmap
= NULL
;
441 void kvm_free_physmem(struct kvm
*kvm
)
445 for (i
= 0; i
< kvm
->nmemslots
; ++i
)
446 kvm_free_physmem_slot(&kvm
->memslots
[i
], NULL
);
449 static void kvm_destroy_vm(struct kvm
*kvm
)
451 struct mm_struct
*mm
= kvm
->mm
;
453 kvm_arch_sync_events(kvm
);
454 spin_lock(&kvm_lock
);
455 list_del(&kvm
->vm_list
);
456 spin_unlock(&kvm_lock
);
457 kvm_free_irq_routing(kvm
);
458 kvm_io_bus_destroy(&kvm
->pio_bus
);
459 kvm_io_bus_destroy(&kvm
->mmio_bus
);
460 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
461 if (kvm
->coalesced_mmio_ring
!= NULL
)
462 free_page((unsigned long)kvm
->coalesced_mmio_ring
);
464 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
465 mmu_notifier_unregister(&kvm
->mmu_notifier
, kvm
->mm
);
467 kvm_arch_flush_shadow(kvm
);
469 kvm_arch_destroy_vm(kvm
);
470 hardware_disable_all();
474 void kvm_get_kvm(struct kvm
*kvm
)
476 atomic_inc(&kvm
->users_count
);
478 EXPORT_SYMBOL_GPL(kvm_get_kvm
);
480 void kvm_put_kvm(struct kvm
*kvm
)
482 if (atomic_dec_and_test(&kvm
->users_count
))
485 EXPORT_SYMBOL_GPL(kvm_put_kvm
);
488 static int kvm_vm_release(struct inode
*inode
, struct file
*filp
)
490 struct kvm
*kvm
= filp
->private_data
;
492 kvm_irqfd_release(kvm
);
499 * Allocate some memory and give it an address in the guest physical address
502 * Discontiguous memory is allowed, mostly for framebuffers.
504 * Must be called holding mmap_sem for write.
506 int __kvm_set_memory_region(struct kvm
*kvm
,
507 struct kvm_userspace_memory_region
*mem
,
512 unsigned long npages
;
514 struct kvm_memory_slot
*memslot
;
515 struct kvm_memory_slot old
, new;
518 /* General sanity checks */
519 if (mem
->memory_size
& (PAGE_SIZE
- 1))
521 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
523 if (user_alloc
&& (mem
->userspace_addr
& (PAGE_SIZE
- 1)))
525 if (mem
->slot
>= KVM_MEMORY_SLOTS
+ KVM_PRIVATE_MEM_SLOTS
)
527 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
530 memslot
= &kvm
->memslots
[mem
->slot
];
531 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
532 npages
= mem
->memory_size
>> PAGE_SHIFT
;
535 mem
->flags
&= ~KVM_MEM_LOG_DIRTY_PAGES
;
537 new = old
= *memslot
;
539 new.base_gfn
= base_gfn
;
541 new.flags
= mem
->flags
;
543 /* Disallow changing a memory slot's size. */
545 if (npages
&& old
.npages
&& npages
!= old
.npages
)
548 /* Check for overlaps */
550 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
551 struct kvm_memory_slot
*s
= &kvm
->memslots
[i
];
553 if (s
== memslot
|| !s
->npages
)
555 if (!((base_gfn
+ npages
<= s
->base_gfn
) ||
556 (base_gfn
>= s
->base_gfn
+ s
->npages
)))
560 /* Free page dirty bitmap if unneeded */
561 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
562 new.dirty_bitmap
= NULL
;
566 /* Allocate if a slot is being created */
568 if (npages
&& !new.rmap
) {
569 new.rmap
= vmalloc(npages
* sizeof(struct page
*));
574 memset(new.rmap
, 0, npages
* sizeof(*new.rmap
));
576 new.user_alloc
= user_alloc
;
578 * hva_to_rmmap() serialzies with the mmu_lock and to be
579 * safe it has to ignore memslots with !user_alloc &&
583 new.userspace_addr
= mem
->userspace_addr
;
585 new.userspace_addr
= 0;
590 for (i
= 0; i
< KVM_NR_PAGE_SIZES
- 1; ++i
) {
596 /* Avoid unused variable warning if no large pages */
599 if (new.lpage_info
[i
])
602 lpages
= 1 + (base_gfn
+ npages
- 1) /
603 KVM_PAGES_PER_HPAGE(level
);
604 lpages
-= base_gfn
/ KVM_PAGES_PER_HPAGE(level
);
606 new.lpage_info
[i
] = vmalloc(lpages
* sizeof(*new.lpage_info
[i
]));
608 if (!new.lpage_info
[i
])
611 memset(new.lpage_info
[i
], 0,
612 lpages
* sizeof(*new.lpage_info
[i
]));
614 if (base_gfn
% KVM_PAGES_PER_HPAGE(level
))
615 new.lpage_info
[i
][0].write_count
= 1;
616 if ((base_gfn
+npages
) % KVM_PAGES_PER_HPAGE(level
))
617 new.lpage_info
[i
][lpages
- 1].write_count
= 1;
618 ugfn
= new.userspace_addr
>> PAGE_SHIFT
;
620 * If the gfn and userspace address are not aligned wrt each
621 * other, or if explicitly asked to, disable large page
622 * support for this slot
624 if ((base_gfn
^ ugfn
) & (KVM_PAGES_PER_HPAGE(level
) - 1) ||
626 for (j
= 0; j
< lpages
; ++j
)
627 new.lpage_info
[i
][j
].write_count
= 1;
632 /* Allocate page dirty bitmap if needed */
633 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
634 unsigned dirty_bytes
= ALIGN(npages
, BITS_PER_LONG
) / 8;
636 new.dirty_bitmap
= vmalloc(dirty_bytes
);
637 if (!new.dirty_bitmap
)
639 memset(new.dirty_bitmap
, 0, dirty_bytes
);
641 kvm_arch_flush_shadow(kvm
);
643 #else /* not defined CONFIG_S390 */
644 new.user_alloc
= user_alloc
;
646 new.userspace_addr
= mem
->userspace_addr
;
647 #endif /* not defined CONFIG_S390 */
650 kvm_arch_flush_shadow(kvm
);
652 spin_lock(&kvm
->mmu_lock
);
653 if (mem
->slot
>= kvm
->nmemslots
)
654 kvm
->nmemslots
= mem
->slot
+ 1;
657 spin_unlock(&kvm
->mmu_lock
);
659 r
= kvm_arch_set_memory_region(kvm
, mem
, old
, user_alloc
);
661 spin_lock(&kvm
->mmu_lock
);
663 spin_unlock(&kvm
->mmu_lock
);
667 kvm_free_physmem_slot(&old
, npages
? &new : NULL
);
668 /* Slot deletion case: we have to update the current slot */
669 spin_lock(&kvm
->mmu_lock
);
672 spin_unlock(&kvm
->mmu_lock
);
674 /* map the pages in iommu page table */
675 r
= kvm_iommu_map_pages(kvm
, base_gfn
, npages
);
682 kvm_free_physmem_slot(&new, &old
);
687 EXPORT_SYMBOL_GPL(__kvm_set_memory_region
);
689 int kvm_set_memory_region(struct kvm
*kvm
,
690 struct kvm_userspace_memory_region
*mem
,
695 down_write(&kvm
->slots_lock
);
696 r
= __kvm_set_memory_region(kvm
, mem
, user_alloc
);
697 up_write(&kvm
->slots_lock
);
700 EXPORT_SYMBOL_GPL(kvm_set_memory_region
);
702 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
704 kvm_userspace_memory_region
*mem
,
707 if (mem
->slot
>= KVM_MEMORY_SLOTS
)
709 return kvm_set_memory_region(kvm
, mem
, user_alloc
);
712 int kvm_get_dirty_log(struct kvm
*kvm
,
713 struct kvm_dirty_log
*log
, int *is_dirty
)
715 struct kvm_memory_slot
*memslot
;
718 unsigned long any
= 0;
721 if (log
->slot
>= KVM_MEMORY_SLOTS
)
724 memslot
= &kvm
->memslots
[log
->slot
];
726 if (!memslot
->dirty_bitmap
)
729 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
731 for (i
= 0; !any
&& i
< n
/sizeof(long); ++i
)
732 any
= memslot
->dirty_bitmap
[i
];
735 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
746 void kvm_disable_largepages(void)
748 largepages_enabled
= false;
750 EXPORT_SYMBOL_GPL(kvm_disable_largepages
);
752 int is_error_page(struct page
*page
)
754 return page
== bad_page
;
756 EXPORT_SYMBOL_GPL(is_error_page
);
758 int is_error_pfn(pfn_t pfn
)
760 return pfn
== bad_pfn
;
762 EXPORT_SYMBOL_GPL(is_error_pfn
);
764 static inline unsigned long bad_hva(void)
769 int kvm_is_error_hva(unsigned long addr
)
771 return addr
== bad_hva();
773 EXPORT_SYMBOL_GPL(kvm_is_error_hva
);
775 struct kvm_memory_slot
*gfn_to_memslot_unaliased(struct kvm
*kvm
, gfn_t gfn
)
779 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
780 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
782 if (gfn
>= memslot
->base_gfn
783 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
788 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased
);
790 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
792 gfn
= unalias_gfn(kvm
, gfn
);
793 return gfn_to_memslot_unaliased(kvm
, gfn
);
796 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
)
800 gfn
= unalias_gfn(kvm
, gfn
);
801 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
802 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
804 if (gfn
>= memslot
->base_gfn
805 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
810 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn
);
812 unsigned long gfn_to_hva(struct kvm
*kvm
, gfn_t gfn
)
814 struct kvm_memory_slot
*slot
;
816 gfn
= unalias_gfn(kvm
, gfn
);
817 slot
= gfn_to_memslot_unaliased(kvm
, gfn
);
820 return (slot
->userspace_addr
+ (gfn
- slot
->base_gfn
) * PAGE_SIZE
);
822 EXPORT_SYMBOL_GPL(gfn_to_hva
);
824 pfn_t
gfn_to_pfn(struct kvm
*kvm
, gfn_t gfn
)
826 struct page
*page
[1];
833 addr
= gfn_to_hva(kvm
, gfn
);
834 if (kvm_is_error_hva(addr
)) {
836 return page_to_pfn(bad_page
);
839 npages
= get_user_pages_fast(addr
, 1, 1, page
);
841 if (unlikely(npages
!= 1)) {
842 struct vm_area_struct
*vma
;
844 down_read(¤t
->mm
->mmap_sem
);
845 vma
= find_vma(current
->mm
, addr
);
847 if (vma
== NULL
|| addr
< vma
->vm_start
||
848 !(vma
->vm_flags
& VM_PFNMAP
)) {
849 up_read(¤t
->mm
->mmap_sem
);
851 return page_to_pfn(bad_page
);
854 pfn
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
855 up_read(¤t
->mm
->mmap_sem
);
856 BUG_ON(!kvm_is_mmio_pfn(pfn
));
858 pfn
= page_to_pfn(page
[0]);
863 EXPORT_SYMBOL_GPL(gfn_to_pfn
);
865 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
869 pfn
= gfn_to_pfn(kvm
, gfn
);
870 if (!kvm_is_mmio_pfn(pfn
))
871 return pfn_to_page(pfn
);
873 WARN_ON(kvm_is_mmio_pfn(pfn
));
879 EXPORT_SYMBOL_GPL(gfn_to_page
);
881 void kvm_release_page_clean(struct page
*page
)
883 kvm_release_pfn_clean(page_to_pfn(page
));
885 EXPORT_SYMBOL_GPL(kvm_release_page_clean
);
887 void kvm_release_pfn_clean(pfn_t pfn
)
889 if (!kvm_is_mmio_pfn(pfn
))
890 put_page(pfn_to_page(pfn
));
892 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean
);
894 void kvm_release_page_dirty(struct page
*page
)
896 kvm_release_pfn_dirty(page_to_pfn(page
));
898 EXPORT_SYMBOL_GPL(kvm_release_page_dirty
);
900 void kvm_release_pfn_dirty(pfn_t pfn
)
902 kvm_set_pfn_dirty(pfn
);
903 kvm_release_pfn_clean(pfn
);
905 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty
);
907 void kvm_set_page_dirty(struct page
*page
)
909 kvm_set_pfn_dirty(page_to_pfn(page
));
911 EXPORT_SYMBOL_GPL(kvm_set_page_dirty
);
913 void kvm_set_pfn_dirty(pfn_t pfn
)
915 if (!kvm_is_mmio_pfn(pfn
)) {
916 struct page
*page
= pfn_to_page(pfn
);
917 if (!PageReserved(page
))
921 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty
);
923 void kvm_set_pfn_accessed(pfn_t pfn
)
925 if (!kvm_is_mmio_pfn(pfn
))
926 mark_page_accessed(pfn_to_page(pfn
));
928 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed
);
930 void kvm_get_pfn(pfn_t pfn
)
932 if (!kvm_is_mmio_pfn(pfn
))
933 get_page(pfn_to_page(pfn
));
935 EXPORT_SYMBOL_GPL(kvm_get_pfn
);
937 static int next_segment(unsigned long len
, int offset
)
939 if (len
> PAGE_SIZE
- offset
)
940 return PAGE_SIZE
- offset
;
945 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
951 addr
= gfn_to_hva(kvm
, gfn
);
952 if (kvm_is_error_hva(addr
))
954 r
= copy_from_user(data
, (void __user
*)addr
+ offset
, len
);
959 EXPORT_SYMBOL_GPL(kvm_read_guest_page
);
961 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
)
963 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
965 int offset
= offset_in_page(gpa
);
968 while ((seg
= next_segment(len
, offset
)) != 0) {
969 ret
= kvm_read_guest_page(kvm
, gfn
, data
, offset
, seg
);
979 EXPORT_SYMBOL_GPL(kvm_read_guest
);
981 int kvm_read_guest_atomic(struct kvm
*kvm
, gpa_t gpa
, void *data
,
986 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
987 int offset
= offset_in_page(gpa
);
989 addr
= gfn_to_hva(kvm
, gfn
);
990 if (kvm_is_error_hva(addr
))
993 r
= __copy_from_user_inatomic(data
, (void __user
*)addr
+ offset
, len
);
999 EXPORT_SYMBOL(kvm_read_guest_atomic
);
1001 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
1002 int offset
, int len
)
1007 addr
= gfn_to_hva(kvm
, gfn
);
1008 if (kvm_is_error_hva(addr
))
1010 r
= copy_to_user((void __user
*)addr
+ offset
, data
, len
);
1013 mark_page_dirty(kvm
, gfn
);
1016 EXPORT_SYMBOL_GPL(kvm_write_guest_page
);
1018 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
1021 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1023 int offset
= offset_in_page(gpa
);
1026 while ((seg
= next_segment(len
, offset
)) != 0) {
1027 ret
= kvm_write_guest_page(kvm
, gfn
, data
, offset
, seg
);
1038 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
)
1040 return kvm_write_guest_page(kvm
, gfn
, empty_zero_page
, offset
, len
);
1042 EXPORT_SYMBOL_GPL(kvm_clear_guest_page
);
1044 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
)
1046 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1048 int offset
= offset_in_page(gpa
);
1051 while ((seg
= next_segment(len
, offset
)) != 0) {
1052 ret
= kvm_clear_guest_page(kvm
, gfn
, offset
, seg
);
1061 EXPORT_SYMBOL_GPL(kvm_clear_guest
);
1063 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
1065 struct kvm_memory_slot
*memslot
;
1067 gfn
= unalias_gfn(kvm
, gfn
);
1068 memslot
= gfn_to_memslot_unaliased(kvm
, gfn
);
1069 if (memslot
&& memslot
->dirty_bitmap
) {
1070 unsigned long rel_gfn
= gfn
- memslot
->base_gfn
;
1073 if (!test_bit(rel_gfn
, memslot
->dirty_bitmap
))
1074 set_bit(rel_gfn
, memslot
->dirty_bitmap
);
1079 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1081 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
)
1086 prepare_to_wait(&vcpu
->wq
, &wait
, TASK_INTERRUPTIBLE
);
1088 if (kvm_arch_vcpu_runnable(vcpu
)) {
1089 set_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
1092 if (kvm_cpu_has_pending_timer(vcpu
))
1094 if (signal_pending(current
))
1100 finish_wait(&vcpu
->wq
, &wait
);
1103 void kvm_resched(struct kvm_vcpu
*vcpu
)
1105 if (!need_resched())
1109 EXPORT_SYMBOL_GPL(kvm_resched
);
1111 void kvm_vcpu_on_spin(struct kvm_vcpu
*vcpu
)
1116 prepare_to_wait(&vcpu
->wq
, &wait
, TASK_INTERRUPTIBLE
);
1118 /* Sleep for 100 us, and hope lock-holder got scheduled */
1119 expires
= ktime_add_ns(ktime_get(), 100000UL);
1120 schedule_hrtimeout(&expires
, HRTIMER_MODE_ABS
);
1122 finish_wait(&vcpu
->wq
, &wait
);
1124 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin
);
1126 static int kvm_vcpu_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1128 struct kvm_vcpu
*vcpu
= vma
->vm_file
->private_data
;
1131 if (vmf
->pgoff
== 0)
1132 page
= virt_to_page(vcpu
->run
);
1134 else if (vmf
->pgoff
== KVM_PIO_PAGE_OFFSET
)
1135 page
= virt_to_page(vcpu
->arch
.pio_data
);
1137 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1138 else if (vmf
->pgoff
== KVM_COALESCED_MMIO_PAGE_OFFSET
)
1139 page
= virt_to_page(vcpu
->kvm
->coalesced_mmio_ring
);
1142 return VM_FAULT_SIGBUS
;
1148 static const struct vm_operations_struct kvm_vcpu_vm_ops
= {
1149 .fault
= kvm_vcpu_fault
,
1152 static int kvm_vcpu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1154 vma
->vm_ops
= &kvm_vcpu_vm_ops
;
1158 static int kvm_vcpu_release(struct inode
*inode
, struct file
*filp
)
1160 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1162 kvm_put_kvm(vcpu
->kvm
);
1166 static struct file_operations kvm_vcpu_fops
= {
1167 .release
= kvm_vcpu_release
,
1168 .unlocked_ioctl
= kvm_vcpu_ioctl
,
1169 .compat_ioctl
= kvm_vcpu_ioctl
,
1170 .mmap
= kvm_vcpu_mmap
,
1174 * Allocates an inode for the vcpu.
1176 static int create_vcpu_fd(struct kvm_vcpu
*vcpu
)
1178 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops
, vcpu
, 0);
1182 * Creates some virtual cpus. Good luck creating more than one.
1184 static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm
, u32 id
)
1187 struct kvm_vcpu
*vcpu
, *v
;
1189 vcpu
= kvm_arch_vcpu_create(kvm
, id
);
1191 return PTR_ERR(vcpu
);
1193 preempt_notifier_init(&vcpu
->preempt_notifier
, &kvm_preempt_ops
);
1195 r
= kvm_arch_vcpu_setup(vcpu
);
1199 mutex_lock(&kvm
->lock
);
1200 if (atomic_read(&kvm
->online_vcpus
) == KVM_MAX_VCPUS
) {
1205 kvm_for_each_vcpu(r
, v
, kvm
)
1206 if (v
->vcpu_id
== id
) {
1211 BUG_ON(kvm
->vcpus
[atomic_read(&kvm
->online_vcpus
)]);
1213 /* Now it's all set up, let userspace reach it */
1215 r
= create_vcpu_fd(vcpu
);
1221 kvm
->vcpus
[atomic_read(&kvm
->online_vcpus
)] = vcpu
;
1223 atomic_inc(&kvm
->online_vcpus
);
1225 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1226 if (kvm
->bsp_vcpu_id
== id
)
1227 kvm
->bsp_vcpu
= vcpu
;
1229 mutex_unlock(&kvm
->lock
);
1233 mutex_unlock(&kvm
->lock
);
1234 kvm_arch_vcpu_destroy(vcpu
);
1238 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu
*vcpu
, sigset_t
*sigset
)
1241 sigdelsetmask(sigset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1242 vcpu
->sigset_active
= 1;
1243 vcpu
->sigset
= *sigset
;
1245 vcpu
->sigset_active
= 0;
1249 static long kvm_vcpu_ioctl(struct file
*filp
,
1250 unsigned int ioctl
, unsigned long arg
)
1252 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1253 void __user
*argp
= (void __user
*)arg
;
1255 struct kvm_fpu
*fpu
= NULL
;
1256 struct kvm_sregs
*kvm_sregs
= NULL
;
1258 if (vcpu
->kvm
->mm
!= current
->mm
)
1265 r
= kvm_arch_vcpu_ioctl_run(vcpu
, vcpu
->run
);
1267 case KVM_GET_REGS
: {
1268 struct kvm_regs
*kvm_regs
;
1271 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
1274 r
= kvm_arch_vcpu_ioctl_get_regs(vcpu
, kvm_regs
);
1278 if (copy_to_user(argp
, kvm_regs
, sizeof(struct kvm_regs
)))
1285 case KVM_SET_REGS
: {
1286 struct kvm_regs
*kvm_regs
;
1289 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
1293 if (copy_from_user(kvm_regs
, argp
, sizeof(struct kvm_regs
)))
1295 r
= kvm_arch_vcpu_ioctl_set_regs(vcpu
, kvm_regs
);
1303 case KVM_GET_SREGS
: {
1304 kvm_sregs
= kzalloc(sizeof(struct kvm_sregs
), GFP_KERNEL
);
1308 r
= kvm_arch_vcpu_ioctl_get_sregs(vcpu
, kvm_sregs
);
1312 if (copy_to_user(argp
, kvm_sregs
, sizeof(struct kvm_sregs
)))
1317 case KVM_SET_SREGS
: {
1318 kvm_sregs
= kmalloc(sizeof(struct kvm_sregs
), GFP_KERNEL
);
1323 if (copy_from_user(kvm_sregs
, argp
, sizeof(struct kvm_sregs
)))
1325 r
= kvm_arch_vcpu_ioctl_set_sregs(vcpu
, kvm_sregs
);
1331 case KVM_GET_MP_STATE
: {
1332 struct kvm_mp_state mp_state
;
1334 r
= kvm_arch_vcpu_ioctl_get_mpstate(vcpu
, &mp_state
);
1338 if (copy_to_user(argp
, &mp_state
, sizeof mp_state
))
1343 case KVM_SET_MP_STATE
: {
1344 struct kvm_mp_state mp_state
;
1347 if (copy_from_user(&mp_state
, argp
, sizeof mp_state
))
1349 r
= kvm_arch_vcpu_ioctl_set_mpstate(vcpu
, &mp_state
);
1355 case KVM_TRANSLATE
: {
1356 struct kvm_translation tr
;
1359 if (copy_from_user(&tr
, argp
, sizeof tr
))
1361 r
= kvm_arch_vcpu_ioctl_translate(vcpu
, &tr
);
1365 if (copy_to_user(argp
, &tr
, sizeof tr
))
1370 case KVM_SET_GUEST_DEBUG
: {
1371 struct kvm_guest_debug dbg
;
1374 if (copy_from_user(&dbg
, argp
, sizeof dbg
))
1376 r
= kvm_arch_vcpu_ioctl_set_guest_debug(vcpu
, &dbg
);
1382 case KVM_SET_SIGNAL_MASK
: {
1383 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
1384 struct kvm_signal_mask kvm_sigmask
;
1385 sigset_t sigset
, *p
;
1390 if (copy_from_user(&kvm_sigmask
, argp
,
1391 sizeof kvm_sigmask
))
1394 if (kvm_sigmask
.len
!= sizeof sigset
)
1397 if (copy_from_user(&sigset
, sigmask_arg
->sigset
,
1402 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, &sigset
);
1406 fpu
= kzalloc(sizeof(struct kvm_fpu
), GFP_KERNEL
);
1410 r
= kvm_arch_vcpu_ioctl_get_fpu(vcpu
, fpu
);
1414 if (copy_to_user(argp
, fpu
, sizeof(struct kvm_fpu
)))
1420 fpu
= kmalloc(sizeof(struct kvm_fpu
), GFP_KERNEL
);
1425 if (copy_from_user(fpu
, argp
, sizeof(struct kvm_fpu
)))
1427 r
= kvm_arch_vcpu_ioctl_set_fpu(vcpu
, fpu
);
1434 r
= kvm_arch_vcpu_ioctl(filp
, ioctl
, arg
);
1442 static long kvm_vm_ioctl(struct file
*filp
,
1443 unsigned int ioctl
, unsigned long arg
)
1445 struct kvm
*kvm
= filp
->private_data
;
1446 void __user
*argp
= (void __user
*)arg
;
1449 if (kvm
->mm
!= current
->mm
)
1452 case KVM_CREATE_VCPU
:
1453 r
= kvm_vm_ioctl_create_vcpu(kvm
, arg
);
1457 case KVM_SET_USER_MEMORY_REGION
: {
1458 struct kvm_userspace_memory_region kvm_userspace_mem
;
1461 if (copy_from_user(&kvm_userspace_mem
, argp
,
1462 sizeof kvm_userspace_mem
))
1465 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 1);
1470 case KVM_GET_DIRTY_LOG
: {
1471 struct kvm_dirty_log log
;
1474 if (copy_from_user(&log
, argp
, sizeof log
))
1476 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
1481 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1482 case KVM_REGISTER_COALESCED_MMIO
: {
1483 struct kvm_coalesced_mmio_zone zone
;
1485 if (copy_from_user(&zone
, argp
, sizeof zone
))
1488 r
= kvm_vm_ioctl_register_coalesced_mmio(kvm
, &zone
);
1494 case KVM_UNREGISTER_COALESCED_MMIO
: {
1495 struct kvm_coalesced_mmio_zone zone
;
1497 if (copy_from_user(&zone
, argp
, sizeof zone
))
1500 r
= kvm_vm_ioctl_unregister_coalesced_mmio(kvm
, &zone
);
1508 struct kvm_irqfd data
;
1511 if (copy_from_user(&data
, argp
, sizeof data
))
1513 r
= kvm_irqfd(kvm
, data
.fd
, data
.gsi
, data
.flags
);
1516 case KVM_IOEVENTFD
: {
1517 struct kvm_ioeventfd data
;
1520 if (copy_from_user(&data
, argp
, sizeof data
))
1522 r
= kvm_ioeventfd(kvm
, &data
);
1525 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1526 case KVM_SET_BOOT_CPU_ID
:
1528 mutex_lock(&kvm
->lock
);
1529 if (atomic_read(&kvm
->online_vcpus
) != 0)
1532 kvm
->bsp_vcpu_id
= arg
;
1533 mutex_unlock(&kvm
->lock
);
1537 r
= kvm_arch_vm_ioctl(filp
, ioctl
, arg
);
1539 r
= kvm_vm_ioctl_assigned_device(kvm
, ioctl
, arg
);
1545 static int kvm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1547 struct page
*page
[1];
1550 gfn_t gfn
= vmf
->pgoff
;
1551 struct kvm
*kvm
= vma
->vm_file
->private_data
;
1553 addr
= gfn_to_hva(kvm
, gfn
);
1554 if (kvm_is_error_hva(addr
))
1555 return VM_FAULT_SIGBUS
;
1557 npages
= get_user_pages(current
, current
->mm
, addr
, 1, 1, 0, page
,
1559 if (unlikely(npages
!= 1))
1560 return VM_FAULT_SIGBUS
;
1562 vmf
->page
= page
[0];
1566 static const struct vm_operations_struct kvm_vm_vm_ops
= {
1567 .fault
= kvm_vm_fault
,
1570 static int kvm_vm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1572 vma
->vm_ops
= &kvm_vm_vm_ops
;
1576 static struct file_operations kvm_vm_fops
= {
1577 .release
= kvm_vm_release
,
1578 .unlocked_ioctl
= kvm_vm_ioctl
,
1579 .compat_ioctl
= kvm_vm_ioctl
,
1580 .mmap
= kvm_vm_mmap
,
1583 static int kvm_dev_ioctl_create_vm(void)
1588 kvm
= kvm_create_vm();
1590 return PTR_ERR(kvm
);
1591 fd
= anon_inode_getfd("kvm-vm", &kvm_vm_fops
, kvm
, 0);
1598 static long kvm_dev_ioctl_check_extension_generic(long arg
)
1601 case KVM_CAP_USER_MEMORY
:
1602 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS
:
1603 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
:
1604 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1605 case KVM_CAP_SET_BOOT_CPU_ID
:
1608 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1609 case KVM_CAP_IRQ_ROUTING
:
1610 return KVM_MAX_IRQ_ROUTES
;
1615 return kvm_dev_ioctl_check_extension(arg
);
1618 static long kvm_dev_ioctl(struct file
*filp
,
1619 unsigned int ioctl
, unsigned long arg
)
1624 case KVM_GET_API_VERSION
:
1628 r
= KVM_API_VERSION
;
1634 r
= kvm_dev_ioctl_create_vm();
1636 case KVM_CHECK_EXTENSION
:
1637 r
= kvm_dev_ioctl_check_extension_generic(arg
);
1639 case KVM_GET_VCPU_MMAP_SIZE
:
1643 r
= PAGE_SIZE
; /* struct kvm_run */
1645 r
+= PAGE_SIZE
; /* pio data page */
1647 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1648 r
+= PAGE_SIZE
; /* coalesced mmio ring page */
1651 case KVM_TRACE_ENABLE
:
1652 case KVM_TRACE_PAUSE
:
1653 case KVM_TRACE_DISABLE
:
1657 return kvm_arch_dev_ioctl(filp
, ioctl
, arg
);
1663 static struct file_operations kvm_chardev_ops
= {
1664 .unlocked_ioctl
= kvm_dev_ioctl
,
1665 .compat_ioctl
= kvm_dev_ioctl
,
1668 static struct miscdevice kvm_dev
= {
1674 static void hardware_enable(void *junk
)
1676 int cpu
= raw_smp_processor_id();
1679 if (cpumask_test_cpu(cpu
, cpus_hardware_enabled
))
1682 cpumask_set_cpu(cpu
, cpus_hardware_enabled
);
1684 r
= kvm_arch_hardware_enable(NULL
);
1687 cpumask_clear_cpu(cpu
, cpus_hardware_enabled
);
1688 atomic_inc(&hardware_enable_failed
);
1689 printk(KERN_INFO
"kvm: enabling virtualization on "
1690 "CPU%d failed\n", cpu
);
1694 static void hardware_disable(void *junk
)
1696 int cpu
= raw_smp_processor_id();
1698 if (!cpumask_test_cpu(cpu
, cpus_hardware_enabled
))
1700 cpumask_clear_cpu(cpu
, cpus_hardware_enabled
);
1701 kvm_arch_hardware_disable(NULL
);
1704 static void hardware_disable_all_nolock(void)
1706 BUG_ON(!kvm_usage_count
);
1709 if (!kvm_usage_count
)
1710 on_each_cpu(hardware_disable
, NULL
, 1);
1713 static void hardware_disable_all(void)
1715 spin_lock(&kvm_lock
);
1716 hardware_disable_all_nolock();
1717 spin_unlock(&kvm_lock
);
1720 static int hardware_enable_all(void)
1724 spin_lock(&kvm_lock
);
1727 if (kvm_usage_count
== 1) {
1728 atomic_set(&hardware_enable_failed
, 0);
1729 on_each_cpu(hardware_enable
, NULL
, 1);
1731 if (atomic_read(&hardware_enable_failed
)) {
1732 hardware_disable_all_nolock();
1737 spin_unlock(&kvm_lock
);
1742 static int kvm_cpu_hotplug(struct notifier_block
*notifier
, unsigned long val
,
1747 if (!kvm_usage_count
)
1750 val
&= ~CPU_TASKS_FROZEN
;
1753 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
1755 hardware_disable(NULL
);
1757 case CPU_UP_CANCELED
:
1758 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
1760 smp_call_function_single(cpu
, hardware_disable
, NULL
, 1);
1763 printk(KERN_INFO
"kvm: enabling virtualization on CPU%d\n",
1765 smp_call_function_single(cpu
, hardware_enable
, NULL
, 1);
1772 asmlinkage
void kvm_handle_fault_on_reboot(void)
1775 /* spin while reset goes on */
1778 /* Fault while not rebooting. We want the trace. */
1781 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot
);
1783 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
1787 * Some (well, at least mine) BIOSes hang on reboot if
1790 * And Intel TXT required VMX off for all cpu when system shutdown.
1792 printk(KERN_INFO
"kvm: exiting hardware virtualization\n");
1793 kvm_rebooting
= true;
1794 on_each_cpu(hardware_disable
, NULL
, 1);
1798 static struct notifier_block kvm_reboot_notifier
= {
1799 .notifier_call
= kvm_reboot
,
1803 void kvm_io_bus_init(struct kvm_io_bus
*bus
)
1805 memset(bus
, 0, sizeof(*bus
));
1808 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
)
1812 for (i
= 0; i
< bus
->dev_count
; i
++) {
1813 struct kvm_io_device
*pos
= bus
->devs
[i
];
1815 kvm_iodevice_destructor(pos
);
1819 /* kvm_io_bus_write - called under kvm->slots_lock */
1820 int kvm_io_bus_write(struct kvm_io_bus
*bus
, gpa_t addr
,
1821 int len
, const void *val
)
1824 for (i
= 0; i
< bus
->dev_count
; i
++)
1825 if (!kvm_iodevice_write(bus
->devs
[i
], addr
, len
, val
))
1830 /* kvm_io_bus_read - called under kvm->slots_lock */
1831 int kvm_io_bus_read(struct kvm_io_bus
*bus
, gpa_t addr
, int len
, void *val
)
1834 for (i
= 0; i
< bus
->dev_count
; i
++)
1835 if (!kvm_iodevice_read(bus
->devs
[i
], addr
, len
, val
))
1840 int kvm_io_bus_register_dev(struct kvm
*kvm
, struct kvm_io_bus
*bus
,
1841 struct kvm_io_device
*dev
)
1845 down_write(&kvm
->slots_lock
);
1846 ret
= __kvm_io_bus_register_dev(bus
, dev
);
1847 up_write(&kvm
->slots_lock
);
1852 /* An unlocked version. Caller must have write lock on slots_lock. */
1853 int __kvm_io_bus_register_dev(struct kvm_io_bus
*bus
,
1854 struct kvm_io_device
*dev
)
1856 if (bus
->dev_count
> NR_IOBUS_DEVS
-1)
1859 bus
->devs
[bus
->dev_count
++] = dev
;
1864 void kvm_io_bus_unregister_dev(struct kvm
*kvm
,
1865 struct kvm_io_bus
*bus
,
1866 struct kvm_io_device
*dev
)
1868 down_write(&kvm
->slots_lock
);
1869 __kvm_io_bus_unregister_dev(bus
, dev
);
1870 up_write(&kvm
->slots_lock
);
1873 /* An unlocked version. Caller must have write lock on slots_lock. */
1874 void __kvm_io_bus_unregister_dev(struct kvm_io_bus
*bus
,
1875 struct kvm_io_device
*dev
)
1879 for (i
= 0; i
< bus
->dev_count
; i
++)
1880 if (bus
->devs
[i
] == dev
) {
1881 bus
->devs
[i
] = bus
->devs
[--bus
->dev_count
];
1886 static struct notifier_block kvm_cpu_notifier
= {
1887 .notifier_call
= kvm_cpu_hotplug
,
1888 .priority
= 20, /* must be > scheduler priority */
1891 static int vm_stat_get(void *_offset
, u64
*val
)
1893 unsigned offset
= (long)_offset
;
1897 spin_lock(&kvm_lock
);
1898 list_for_each_entry(kvm
, &vm_list
, vm_list
)
1899 *val
+= *(u32
*)((void *)kvm
+ offset
);
1900 spin_unlock(&kvm_lock
);
1904 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops
, vm_stat_get
, NULL
, "%llu\n");
1906 static int vcpu_stat_get(void *_offset
, u64
*val
)
1908 unsigned offset
= (long)_offset
;
1910 struct kvm_vcpu
*vcpu
;
1914 spin_lock(&kvm_lock
);
1915 list_for_each_entry(kvm
, &vm_list
, vm_list
)
1916 kvm_for_each_vcpu(i
, vcpu
, kvm
)
1917 *val
+= *(u32
*)((void *)vcpu
+ offset
);
1919 spin_unlock(&kvm_lock
);
1923 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops
, vcpu_stat_get
, NULL
, "%llu\n");
1925 static const struct file_operations
*stat_fops
[] = {
1926 [KVM_STAT_VCPU
] = &vcpu_stat_fops
,
1927 [KVM_STAT_VM
] = &vm_stat_fops
,
1930 static void kvm_init_debug(void)
1932 struct kvm_stats_debugfs_item
*p
;
1934 kvm_debugfs_dir
= debugfs_create_dir("kvm", NULL
);
1935 for (p
= debugfs_entries
; p
->name
; ++p
)
1936 p
->dentry
= debugfs_create_file(p
->name
, 0444, kvm_debugfs_dir
,
1937 (void *)(long)p
->offset
,
1938 stat_fops
[p
->kind
]);
1941 static void kvm_exit_debug(void)
1943 struct kvm_stats_debugfs_item
*p
;
1945 for (p
= debugfs_entries
; p
->name
; ++p
)
1946 debugfs_remove(p
->dentry
);
1947 debugfs_remove(kvm_debugfs_dir
);
1950 static int kvm_suspend(struct sys_device
*dev
, pm_message_t state
)
1952 if (kvm_usage_count
)
1953 hardware_disable(NULL
);
1957 static int kvm_resume(struct sys_device
*dev
)
1959 if (kvm_usage_count
)
1960 hardware_enable(NULL
);
1964 static struct sysdev_class kvm_sysdev_class
= {
1966 .suspend
= kvm_suspend
,
1967 .resume
= kvm_resume
,
1970 static struct sys_device kvm_sysdev
= {
1972 .cls
= &kvm_sysdev_class
,
1975 struct page
*bad_page
;
1979 struct kvm_vcpu
*preempt_notifier_to_vcpu(struct preempt_notifier
*pn
)
1981 return container_of(pn
, struct kvm_vcpu
, preempt_notifier
);
1984 static void kvm_sched_in(struct preempt_notifier
*pn
, int cpu
)
1986 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
1988 kvm_arch_vcpu_load(vcpu
, cpu
);
1991 static void kvm_sched_out(struct preempt_notifier
*pn
,
1992 struct task_struct
*next
)
1994 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
1996 kvm_arch_vcpu_put(vcpu
);
1999 int kvm_init(void *opaque
, unsigned int vcpu_size
,
2000 struct module
*module
)
2005 r
= kvm_arch_init(opaque
);
2009 bad_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
2011 if (bad_page
== NULL
) {
2016 bad_pfn
= page_to_pfn(bad_page
);
2018 if (!zalloc_cpumask_var(&cpus_hardware_enabled
, GFP_KERNEL
)) {
2023 r
= kvm_arch_hardware_setup();
2027 for_each_online_cpu(cpu
) {
2028 smp_call_function_single(cpu
,
2029 kvm_arch_check_processor_compat
,
2035 r
= register_cpu_notifier(&kvm_cpu_notifier
);
2038 register_reboot_notifier(&kvm_reboot_notifier
);
2040 r
= sysdev_class_register(&kvm_sysdev_class
);
2044 r
= sysdev_register(&kvm_sysdev
);
2048 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2049 kvm_vcpu_cache
= kmem_cache_create("kvm_vcpu", vcpu_size
,
2050 __alignof__(struct kvm_vcpu
),
2052 if (!kvm_vcpu_cache
) {
2057 kvm_chardev_ops
.owner
= module
;
2058 kvm_vm_fops
.owner
= module
;
2059 kvm_vcpu_fops
.owner
= module
;
2061 r
= misc_register(&kvm_dev
);
2063 printk(KERN_ERR
"kvm: misc device register failed\n");
2067 kvm_preempt_ops
.sched_in
= kvm_sched_in
;
2068 kvm_preempt_ops
.sched_out
= kvm_sched_out
;
2075 kmem_cache_destroy(kvm_vcpu_cache
);
2077 sysdev_unregister(&kvm_sysdev
);
2079 sysdev_class_unregister(&kvm_sysdev_class
);
2081 unregister_reboot_notifier(&kvm_reboot_notifier
);
2082 unregister_cpu_notifier(&kvm_cpu_notifier
);
2085 kvm_arch_hardware_unsetup();
2087 free_cpumask_var(cpus_hardware_enabled
);
2089 __free_page(bad_page
);
2095 EXPORT_SYMBOL_GPL(kvm_init
);
2099 tracepoint_synchronize_unregister();
2101 misc_deregister(&kvm_dev
);
2102 kmem_cache_destroy(kvm_vcpu_cache
);
2103 sysdev_unregister(&kvm_sysdev
);
2104 sysdev_class_unregister(&kvm_sysdev_class
);
2105 unregister_reboot_notifier(&kvm_reboot_notifier
);
2106 unregister_cpu_notifier(&kvm_cpu_notifier
);
2107 on_each_cpu(hardware_disable
, NULL
, 1);
2108 kvm_arch_hardware_unsetup();
2110 free_cpumask_var(cpus_hardware_enabled
);
2111 __free_page(bad_page
);
2113 EXPORT_SYMBOL_GPL(kvm_exit
);