2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
19 #include "x86_emulate.h"
20 #include "segment_descriptor.h"
23 #include <linux/kvm.h>
24 #include <linux/module.h>
25 #include <linux/errno.h>
26 #include <linux/percpu.h>
27 #include <linux/gfp.h>
29 #include <linux/miscdevice.h>
30 #include <linux/vmalloc.h>
31 #include <linux/reboot.h>
32 #include <linux/debugfs.h>
33 #include <linux/highmem.h>
34 #include <linux/file.h>
35 #include <linux/sysdev.h>
36 #include <linux/cpu.h>
37 #include <linux/sched.h>
38 #include <linux/cpumask.h>
39 #include <linux/smp.h>
40 #include <linux/anon_inodes.h>
41 #include <linux/profile.h>
42 #include <linux/kvm_para.h>
43 #include <linux/pagemap.h>
45 #include <asm/processor.h>
48 #include <asm/uaccess.h>
51 MODULE_AUTHOR("Qumranet");
52 MODULE_LICENSE("GPL");
54 static DEFINE_SPINLOCK(kvm_lock
);
55 static LIST_HEAD(vm_list
);
57 static cpumask_t cpus_hardware_enabled
;
59 struct kvm_x86_ops
*kvm_x86_ops
;
60 struct kmem_cache
*kvm_vcpu_cache
;
61 EXPORT_SYMBOL_GPL(kvm_vcpu_cache
);
63 static __read_mostly
struct preempt_ops kvm_preempt_ops
;
65 #define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
67 static struct kvm_stats_debugfs_item
{
70 struct dentry
*dentry
;
71 } debugfs_entries
[] = {
72 { "pf_fixed", STAT_OFFSET(pf_fixed
) },
73 { "pf_guest", STAT_OFFSET(pf_guest
) },
74 { "tlb_flush", STAT_OFFSET(tlb_flush
) },
75 { "invlpg", STAT_OFFSET(invlpg
) },
76 { "exits", STAT_OFFSET(exits
) },
77 { "io_exits", STAT_OFFSET(io_exits
) },
78 { "mmio_exits", STAT_OFFSET(mmio_exits
) },
79 { "signal_exits", STAT_OFFSET(signal_exits
) },
80 { "irq_window", STAT_OFFSET(irq_window_exits
) },
81 { "halt_exits", STAT_OFFSET(halt_exits
) },
82 { "halt_wakeup", STAT_OFFSET(halt_wakeup
) },
83 { "request_irq", STAT_OFFSET(request_irq_exits
) },
84 { "irq_exits", STAT_OFFSET(irq_exits
) },
85 { "light_exits", STAT_OFFSET(light_exits
) },
86 { "efer_reload", STAT_OFFSET(efer_reload
) },
90 static struct dentry
*debugfs_dir
;
92 #define MAX_IO_MSRS 256
94 #define CR0_RESERVED_BITS \
95 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
96 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
97 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
98 #define CR4_RESERVED_BITS \
99 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
100 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
101 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
102 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
104 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
105 #define EFER_RESERVED_BITS 0xfffffffffffff2fe
108 /* LDT or TSS descriptor in the GDT. 16 bytes. */
109 struct segment_descriptor_64
{
110 struct segment_descriptor s
;
117 static long kvm_vcpu_ioctl(struct file
*file
, unsigned int ioctl
,
120 unsigned long segment_base(u16 selector
)
122 struct descriptor_table gdt
;
123 struct segment_descriptor
*d
;
124 unsigned long table_base
;
130 asm("sgdt %0" : "=m"(gdt
));
131 table_base
= gdt
.base
;
133 if (selector
& 4) { /* from ldt */
136 asm("sldt %0" : "=g"(ldt_selector
));
137 table_base
= segment_base(ldt_selector
);
139 d
= (struct segment_descriptor
*)(table_base
+ (selector
& ~7));
140 v
= d
->base_low
| ((unsigned long)d
->base_mid
<< 16) |
141 ((unsigned long)d
->base_high
<< 24);
143 if (d
->system
== 0 && (d
->type
== 2 || d
->type
== 9 || d
->type
== 11))
144 v
|= ((unsigned long) \
145 ((struct segment_descriptor_64
*)d
)->base_higher
) << 32;
149 EXPORT_SYMBOL_GPL(segment_base
);
151 static inline int valid_vcpu(int n
)
153 return likely(n
>= 0 && n
< KVM_MAX_VCPUS
);
156 void kvm_load_guest_fpu(struct kvm_vcpu
*vcpu
)
158 if (!vcpu
->fpu_active
|| vcpu
->guest_fpu_loaded
)
161 vcpu
->guest_fpu_loaded
= 1;
162 fx_save(&vcpu
->host_fx_image
);
163 fx_restore(&vcpu
->guest_fx_image
);
165 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu
);
167 void kvm_put_guest_fpu(struct kvm_vcpu
*vcpu
)
169 if (!vcpu
->guest_fpu_loaded
)
172 vcpu
->guest_fpu_loaded
= 0;
173 fx_save(&vcpu
->guest_fx_image
);
174 fx_restore(&vcpu
->host_fx_image
);
176 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu
);
179 * Switches to specified vcpu, until a matching vcpu_put()
181 static void vcpu_load(struct kvm_vcpu
*vcpu
)
185 mutex_lock(&vcpu
->mutex
);
187 preempt_notifier_register(&vcpu
->preempt_notifier
);
188 kvm_x86_ops
->vcpu_load(vcpu
, cpu
);
192 static void vcpu_put(struct kvm_vcpu
*vcpu
)
195 kvm_x86_ops
->vcpu_put(vcpu
);
196 preempt_notifier_unregister(&vcpu
->preempt_notifier
);
198 mutex_unlock(&vcpu
->mutex
);
201 static void ack_flush(void *_completed
)
205 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
209 struct kvm_vcpu
*vcpu
;
212 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
213 vcpu
= kvm
->vcpus
[i
];
216 if (test_and_set_bit(KVM_TLB_FLUSH
, &vcpu
->requests
))
219 if (cpu
!= -1 && cpu
!= raw_smp_processor_id())
222 smp_call_function_mask(cpus
, ack_flush
, NULL
, 1);
225 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
)
230 mutex_init(&vcpu
->mutex
);
232 vcpu
->mmu
.root_hpa
= INVALID_PAGE
;
235 if (!irqchip_in_kernel(kvm
) || id
== 0)
236 vcpu
->mp_state
= VCPU_MP_STATE_RUNNABLE
;
238 vcpu
->mp_state
= VCPU_MP_STATE_UNINITIALIZED
;
239 init_waitqueue_head(&vcpu
->wq
);
241 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
246 vcpu
->run
= page_address(page
);
248 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
253 vcpu
->pio_data
= page_address(page
);
255 r
= kvm_mmu_create(vcpu
);
257 goto fail_free_pio_data
;
259 if (irqchip_in_kernel(kvm
)) {
260 r
= kvm_create_lapic(vcpu
);
262 goto fail_mmu_destroy
;
268 kvm_mmu_destroy(vcpu
);
270 free_page((unsigned long)vcpu
->pio_data
);
272 free_page((unsigned long)vcpu
->run
);
276 EXPORT_SYMBOL_GPL(kvm_vcpu_init
);
278 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
)
280 kvm_free_lapic(vcpu
);
281 kvm_mmu_destroy(vcpu
);
282 free_page((unsigned long)vcpu
->pio_data
);
283 free_page((unsigned long)vcpu
->run
);
285 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit
);
287 static struct kvm
*kvm_create_vm(void)
289 struct kvm
*kvm
= kzalloc(sizeof(struct kvm
), GFP_KERNEL
);
292 return ERR_PTR(-ENOMEM
);
294 kvm_io_bus_init(&kvm
->pio_bus
);
295 mutex_init(&kvm
->lock
);
296 INIT_LIST_HEAD(&kvm
->active_mmu_pages
);
297 kvm_io_bus_init(&kvm
->mmio_bus
);
298 spin_lock(&kvm_lock
);
299 list_add(&kvm
->vm_list
, &vm_list
);
300 spin_unlock(&kvm_lock
);
304 static void kvm_free_userspace_physmem(struct kvm_memory_slot
*free
)
308 for (i
= 0; i
< free
->npages
; ++i
) {
309 if (free
->phys_mem
[i
]) {
310 if (!PageReserved(free
->phys_mem
[i
]))
311 SetPageDirty(free
->phys_mem
[i
]);
312 page_cache_release(free
->phys_mem
[i
]);
317 static void kvm_free_kernel_physmem(struct kvm_memory_slot
*free
)
321 for (i
= 0; i
< free
->npages
; ++i
)
322 if (free
->phys_mem
[i
])
323 __free_page(free
->phys_mem
[i
]);
327 * Free any memory in @free but not in @dont.
329 static void kvm_free_physmem_slot(struct kvm_memory_slot
*free
,
330 struct kvm_memory_slot
*dont
)
332 if (!dont
|| free
->phys_mem
!= dont
->phys_mem
)
333 if (free
->phys_mem
) {
334 if (free
->user_alloc
)
335 kvm_free_userspace_physmem(free
);
337 kvm_free_kernel_physmem(free
);
338 vfree(free
->phys_mem
);
340 if (!dont
|| free
->rmap
!= dont
->rmap
)
343 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
344 vfree(free
->dirty_bitmap
);
346 free
->phys_mem
= NULL
;
348 free
->dirty_bitmap
= NULL
;
351 static void kvm_free_physmem(struct kvm
*kvm
)
355 for (i
= 0; i
< kvm
->nmemslots
; ++i
)
356 kvm_free_physmem_slot(&kvm
->memslots
[i
], NULL
);
359 static void free_pio_guest_pages(struct kvm_vcpu
*vcpu
)
363 for (i
= 0; i
< ARRAY_SIZE(vcpu
->pio
.guest_pages
); ++i
)
364 if (vcpu
->pio
.guest_pages
[i
]) {
365 __free_page(vcpu
->pio
.guest_pages
[i
]);
366 vcpu
->pio
.guest_pages
[i
] = NULL
;
370 static void kvm_unload_vcpu_mmu(struct kvm_vcpu
*vcpu
)
373 kvm_mmu_unload(vcpu
);
377 static void kvm_free_vcpus(struct kvm
*kvm
)
382 * Unpin any mmu pages first.
384 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
)
386 kvm_unload_vcpu_mmu(kvm
->vcpus
[i
]);
387 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
389 kvm_x86_ops
->vcpu_free(kvm
->vcpus
[i
]);
390 kvm
->vcpus
[i
] = NULL
;
396 static void kvm_destroy_vm(struct kvm
*kvm
)
398 spin_lock(&kvm_lock
);
399 list_del(&kvm
->vm_list
);
400 spin_unlock(&kvm_lock
);
401 kvm_io_bus_destroy(&kvm
->pio_bus
);
402 kvm_io_bus_destroy(&kvm
->mmio_bus
);
406 kvm_free_physmem(kvm
);
410 static int kvm_vm_release(struct inode
*inode
, struct file
*filp
)
412 struct kvm
*kvm
= filp
->private_data
;
418 static void inject_gp(struct kvm_vcpu
*vcpu
)
420 kvm_x86_ops
->inject_gp(vcpu
, 0);
424 * Load the pae pdptrs. Return true is they are all valid.
426 static int load_pdptrs(struct kvm_vcpu
*vcpu
, unsigned long cr3
)
428 gfn_t pdpt_gfn
= cr3
>> PAGE_SHIFT
;
429 unsigned offset
= ((cr3
& (PAGE_SIZE
-1)) >> 5) << 2;
432 u64 pdpte
[ARRAY_SIZE(vcpu
->pdptrs
)];
434 mutex_lock(&vcpu
->kvm
->lock
);
435 ret
= kvm_read_guest_page(vcpu
->kvm
, pdpt_gfn
, pdpte
,
436 offset
* sizeof(u64
), sizeof(pdpte
));
441 for (i
= 0; i
< ARRAY_SIZE(pdpte
); ++i
) {
442 if ((pdpte
[i
] & 1) && (pdpte
[i
] & 0xfffffff0000001e6ull
)) {
449 memcpy(vcpu
->pdptrs
, pdpte
, sizeof(vcpu
->pdptrs
));
451 mutex_unlock(&vcpu
->kvm
->lock
);
456 void set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
458 if (cr0
& CR0_RESERVED_BITS
) {
459 printk(KERN_DEBUG
"set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
465 if ((cr0
& X86_CR0_NW
) && !(cr0
& X86_CR0_CD
)) {
466 printk(KERN_DEBUG
"set_cr0: #GP, CD == 0 && NW == 1\n");
471 if ((cr0
& X86_CR0_PG
) && !(cr0
& X86_CR0_PE
)) {
472 printk(KERN_DEBUG
"set_cr0: #GP, set PG flag "
473 "and a clear PE flag\n");
478 if (!is_paging(vcpu
) && (cr0
& X86_CR0_PG
)) {
480 if ((vcpu
->shadow_efer
& EFER_LME
)) {
484 printk(KERN_DEBUG
"set_cr0: #GP, start paging "
485 "in long mode while PAE is disabled\n");
489 kvm_x86_ops
->get_cs_db_l_bits(vcpu
, &cs_db
, &cs_l
);
491 printk(KERN_DEBUG
"set_cr0: #GP, start paging "
492 "in long mode while CS.L == 1\n");
499 if (is_pae(vcpu
) && !load_pdptrs(vcpu
, vcpu
->cr3
)) {
500 printk(KERN_DEBUG
"set_cr0: #GP, pdptrs "
508 kvm_x86_ops
->set_cr0(vcpu
, cr0
);
511 mutex_lock(&vcpu
->kvm
->lock
);
512 kvm_mmu_reset_context(vcpu
);
513 mutex_unlock(&vcpu
->kvm
->lock
);
516 EXPORT_SYMBOL_GPL(set_cr0
);
518 void lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
)
520 set_cr0(vcpu
, (vcpu
->cr0
& ~0x0ful
) | (msw
& 0x0f));
522 EXPORT_SYMBOL_GPL(lmsw
);
524 void set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
526 if (cr4
& CR4_RESERVED_BITS
) {
527 printk(KERN_DEBUG
"set_cr4: #GP, reserved bits\n");
532 if (is_long_mode(vcpu
)) {
533 if (!(cr4
& X86_CR4_PAE
)) {
534 printk(KERN_DEBUG
"set_cr4: #GP, clearing PAE while "
539 } else if (is_paging(vcpu
) && !is_pae(vcpu
) && (cr4
& X86_CR4_PAE
)
540 && !load_pdptrs(vcpu
, vcpu
->cr3
)) {
541 printk(KERN_DEBUG
"set_cr4: #GP, pdptrs reserved bits\n");
546 if (cr4
& X86_CR4_VMXE
) {
547 printk(KERN_DEBUG
"set_cr4: #GP, setting VMXE\n");
551 kvm_x86_ops
->set_cr4(vcpu
, cr4
);
553 mutex_lock(&vcpu
->kvm
->lock
);
554 kvm_mmu_reset_context(vcpu
);
555 mutex_unlock(&vcpu
->kvm
->lock
);
557 EXPORT_SYMBOL_GPL(set_cr4
);
559 void set_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr3
)
561 if (is_long_mode(vcpu
)) {
562 if (cr3
& CR3_L_MODE_RESERVED_BITS
) {
563 printk(KERN_DEBUG
"set_cr3: #GP, reserved bits\n");
569 if (cr3
& CR3_PAE_RESERVED_BITS
) {
571 "set_cr3: #GP, reserved bits\n");
575 if (is_paging(vcpu
) && !load_pdptrs(vcpu
, cr3
)) {
576 printk(KERN_DEBUG
"set_cr3: #GP, pdptrs "
583 * We don't check reserved bits in nonpae mode, because
584 * this isn't enforced, and VMware depends on this.
588 mutex_lock(&vcpu
->kvm
->lock
);
590 * Does the new cr3 value map to physical memory? (Note, we
591 * catch an invalid cr3 even in real-mode, because it would
592 * cause trouble later on when we turn on paging anyway.)
594 * A real CPU would silently accept an invalid cr3 and would
595 * attempt to use it - with largely undefined (and often hard
596 * to debug) behavior on the guest side.
598 if (unlikely(!gfn_to_memslot(vcpu
->kvm
, cr3
>> PAGE_SHIFT
)))
602 vcpu
->mmu
.new_cr3(vcpu
);
604 mutex_unlock(&vcpu
->kvm
->lock
);
606 EXPORT_SYMBOL_GPL(set_cr3
);
608 void set_cr8(struct kvm_vcpu
*vcpu
, unsigned long cr8
)
610 if (cr8
& CR8_RESERVED_BITS
) {
611 printk(KERN_DEBUG
"set_cr8: #GP, reserved bits 0x%lx\n", cr8
);
615 if (irqchip_in_kernel(vcpu
->kvm
))
616 kvm_lapic_set_tpr(vcpu
, cr8
);
620 EXPORT_SYMBOL_GPL(set_cr8
);
622 unsigned long get_cr8(struct kvm_vcpu
*vcpu
)
624 if (irqchip_in_kernel(vcpu
->kvm
))
625 return kvm_lapic_get_cr8(vcpu
);
629 EXPORT_SYMBOL_GPL(get_cr8
);
631 u64
kvm_get_apic_base(struct kvm_vcpu
*vcpu
)
633 if (irqchip_in_kernel(vcpu
->kvm
))
634 return vcpu
->apic_base
;
636 return vcpu
->apic_base
;
638 EXPORT_SYMBOL_GPL(kvm_get_apic_base
);
640 void kvm_set_apic_base(struct kvm_vcpu
*vcpu
, u64 data
)
642 /* TODO: reserve bits check */
643 if (irqchip_in_kernel(vcpu
->kvm
))
644 kvm_lapic_set_base(vcpu
, data
);
646 vcpu
->apic_base
= data
;
648 EXPORT_SYMBOL_GPL(kvm_set_apic_base
);
650 void fx_init(struct kvm_vcpu
*vcpu
)
652 unsigned after_mxcsr_mask
;
654 /* Initialize guest FPU by resetting ours and saving into guest's */
656 fx_save(&vcpu
->host_fx_image
);
658 fx_save(&vcpu
->guest_fx_image
);
659 fx_restore(&vcpu
->host_fx_image
);
662 vcpu
->cr0
|= X86_CR0_ET
;
663 after_mxcsr_mask
= offsetof(struct i387_fxsave_struct
, st_space
);
664 vcpu
->guest_fx_image
.mxcsr
= 0x1f80;
665 memset((void *)&vcpu
->guest_fx_image
+ after_mxcsr_mask
,
666 0, sizeof(struct i387_fxsave_struct
) - after_mxcsr_mask
);
668 EXPORT_SYMBOL_GPL(fx_init
);
671 * Allocate some memory and give it an address in the guest physical address
674 * Discontiguous memory is allowed, mostly for framebuffers.
676 static int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
678 kvm_userspace_memory_region
*mem
,
683 unsigned long npages
;
685 struct kvm_memory_slot
*memslot
;
686 struct kvm_memory_slot old
, new;
689 /* General sanity checks */
690 if (mem
->memory_size
& (PAGE_SIZE
- 1))
692 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
694 if (mem
->slot
>= KVM_MEMORY_SLOTS
)
696 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
699 memslot
= &kvm
->memslots
[mem
->slot
];
700 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
701 npages
= mem
->memory_size
>> PAGE_SHIFT
;
704 mem
->flags
&= ~KVM_MEM_LOG_DIRTY_PAGES
;
706 mutex_lock(&kvm
->lock
);
708 new = old
= *memslot
;
710 new.base_gfn
= base_gfn
;
712 new.flags
= mem
->flags
;
714 /* Disallow changing a memory slot's size. */
716 if (npages
&& old
.npages
&& npages
!= old
.npages
)
719 /* Check for overlaps */
721 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
722 struct kvm_memory_slot
*s
= &kvm
->memslots
[i
];
726 if (!((base_gfn
+ npages
<= s
->base_gfn
) ||
727 (base_gfn
>= s
->base_gfn
+ s
->npages
)))
731 /* Deallocate if slot is being removed */
735 /* Free page dirty bitmap if unneeded */
736 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
737 new.dirty_bitmap
= NULL
;
741 /* Allocate if a slot is being created */
742 if (npages
&& !new.phys_mem
) {
743 new.phys_mem
= vmalloc(npages
* sizeof(struct page
*));
748 new.rmap
= vmalloc(npages
* sizeof(struct page
*));
753 memset(new.phys_mem
, 0, npages
* sizeof(struct page
*));
754 memset(new.rmap
, 0, npages
* sizeof(*new.rmap
));
756 unsigned long pages_num
;
759 down_read(¤t
->mm
->mmap_sem
);
761 pages_num
= get_user_pages(current
, current
->mm
,
763 npages
, 1, 1, new.phys_mem
,
766 up_read(¤t
->mm
->mmap_sem
);
767 if (pages_num
!= npages
)
770 for (i
= 0; i
< npages
; ++i
) {
771 new.phys_mem
[i
] = alloc_page(GFP_HIGHUSER
773 if (!new.phys_mem
[i
])
779 /* Allocate page dirty bitmap if needed */
780 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
781 unsigned dirty_bytes
= ALIGN(npages
, BITS_PER_LONG
) / 8;
783 new.dirty_bitmap
= vmalloc(dirty_bytes
);
784 if (!new.dirty_bitmap
)
786 memset(new.dirty_bitmap
, 0, dirty_bytes
);
789 if (mem
->slot
>= kvm
->nmemslots
)
790 kvm
->nmemslots
= mem
->slot
+ 1;
792 if (!kvm
->n_requested_mmu_pages
) {
793 unsigned int n_pages
;
796 n_pages
= npages
* KVM_PERMILLE_MMU_PAGES
/ 1000;
797 kvm_mmu_change_mmu_pages(kvm
, kvm
->n_alloc_mmu_pages
+
800 unsigned int nr_mmu_pages
;
802 n_pages
= old
.npages
* KVM_PERMILLE_MMU_PAGES
/ 1000;
803 nr_mmu_pages
= kvm
->n_alloc_mmu_pages
- n_pages
;
804 nr_mmu_pages
= max(nr_mmu_pages
,
805 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES
);
806 kvm_mmu_change_mmu_pages(kvm
, nr_mmu_pages
);
812 kvm_mmu_slot_remove_write_access(kvm
, mem
->slot
);
813 kvm_flush_remote_tlbs(kvm
);
815 mutex_unlock(&kvm
->lock
);
817 kvm_free_physmem_slot(&old
, &new);
821 mutex_unlock(&kvm
->lock
);
822 kvm_free_physmem_slot(&new, &old
);
827 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm
*kvm
,
828 u32 kvm_nr_mmu_pages
)
830 if (kvm_nr_mmu_pages
< KVM_MIN_ALLOC_MMU_PAGES
)
833 mutex_lock(&kvm
->lock
);
835 kvm_mmu_change_mmu_pages(kvm
, kvm_nr_mmu_pages
);
836 kvm
->n_requested_mmu_pages
= kvm_nr_mmu_pages
;
838 mutex_unlock(&kvm
->lock
);
842 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm
*kvm
)
844 return kvm
->n_alloc_mmu_pages
;
848 * Get (and clear) the dirty memory log for a memory slot.
850 static int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
851 struct kvm_dirty_log
*log
)
853 struct kvm_memory_slot
*memslot
;
856 unsigned long any
= 0;
858 mutex_lock(&kvm
->lock
);
861 if (log
->slot
>= KVM_MEMORY_SLOTS
)
864 memslot
= &kvm
->memslots
[log
->slot
];
866 if (!memslot
->dirty_bitmap
)
869 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
871 for (i
= 0; !any
&& i
< n
/sizeof(long); ++i
)
872 any
= memslot
->dirty_bitmap
[i
];
875 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
878 /* If nothing is dirty, don't bother messing with page tables. */
880 kvm_mmu_slot_remove_write_access(kvm
, log
->slot
);
881 kvm_flush_remote_tlbs(kvm
);
882 memset(memslot
->dirty_bitmap
, 0, n
);
888 mutex_unlock(&kvm
->lock
);
893 * Set a new alias region. Aliases map a portion of physical memory into
894 * another portion. This is useful for memory windows, for example the PC
897 static int kvm_vm_ioctl_set_memory_alias(struct kvm
*kvm
,
898 struct kvm_memory_alias
*alias
)
901 struct kvm_mem_alias
*p
;
904 /* General sanity checks */
905 if (alias
->memory_size
& (PAGE_SIZE
- 1))
907 if (alias
->guest_phys_addr
& (PAGE_SIZE
- 1))
909 if (alias
->slot
>= KVM_ALIAS_SLOTS
)
911 if (alias
->guest_phys_addr
+ alias
->memory_size
912 < alias
->guest_phys_addr
)
914 if (alias
->target_phys_addr
+ alias
->memory_size
915 < alias
->target_phys_addr
)
918 mutex_lock(&kvm
->lock
);
920 p
= &kvm
->aliases
[alias
->slot
];
921 p
->base_gfn
= alias
->guest_phys_addr
>> PAGE_SHIFT
;
922 p
->npages
= alias
->memory_size
>> PAGE_SHIFT
;
923 p
->target_gfn
= alias
->target_phys_addr
>> PAGE_SHIFT
;
925 for (n
= KVM_ALIAS_SLOTS
; n
> 0; --n
)
926 if (kvm
->aliases
[n
- 1].npages
)
930 kvm_mmu_zap_all(kvm
);
932 mutex_unlock(&kvm
->lock
);
940 static int kvm_vm_ioctl_get_irqchip(struct kvm
*kvm
, struct kvm_irqchip
*chip
)
945 switch (chip
->chip_id
) {
946 case KVM_IRQCHIP_PIC_MASTER
:
947 memcpy(&chip
->chip
.pic
,
948 &pic_irqchip(kvm
)->pics
[0],
949 sizeof(struct kvm_pic_state
));
951 case KVM_IRQCHIP_PIC_SLAVE
:
952 memcpy(&chip
->chip
.pic
,
953 &pic_irqchip(kvm
)->pics
[1],
954 sizeof(struct kvm_pic_state
));
956 case KVM_IRQCHIP_IOAPIC
:
957 memcpy(&chip
->chip
.ioapic
,
959 sizeof(struct kvm_ioapic_state
));
968 static int kvm_vm_ioctl_set_irqchip(struct kvm
*kvm
, struct kvm_irqchip
*chip
)
973 switch (chip
->chip_id
) {
974 case KVM_IRQCHIP_PIC_MASTER
:
975 memcpy(&pic_irqchip(kvm
)->pics
[0],
977 sizeof(struct kvm_pic_state
));
979 case KVM_IRQCHIP_PIC_SLAVE
:
980 memcpy(&pic_irqchip(kvm
)->pics
[1],
982 sizeof(struct kvm_pic_state
));
984 case KVM_IRQCHIP_IOAPIC
:
985 memcpy(ioapic_irqchip(kvm
),
987 sizeof(struct kvm_ioapic_state
));
993 kvm_pic_update_irq(pic_irqchip(kvm
));
997 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
)
1000 struct kvm_mem_alias
*alias
;
1002 for (i
= 0; i
< kvm
->naliases
; ++i
) {
1003 alias
= &kvm
->aliases
[i
];
1004 if (gfn
>= alias
->base_gfn
1005 && gfn
< alias
->base_gfn
+ alias
->npages
)
1006 return alias
->target_gfn
+ gfn
- alias
->base_gfn
;
1011 static struct kvm_memory_slot
*__gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
1015 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
1016 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
1018 if (gfn
>= memslot
->base_gfn
1019 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
1025 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
1027 gfn
= unalias_gfn(kvm
, gfn
);
1028 return __gfn_to_memslot(kvm
, gfn
);
1031 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
1033 struct kvm_memory_slot
*slot
;
1035 gfn
= unalias_gfn(kvm
, gfn
);
1036 slot
= __gfn_to_memslot(kvm
, gfn
);
1039 return slot
->phys_mem
[gfn
- slot
->base_gfn
];
1041 EXPORT_SYMBOL_GPL(gfn_to_page
);
1043 static int next_segment(unsigned long len
, int offset
)
1045 if (len
> PAGE_SIZE
- offset
)
1046 return PAGE_SIZE
- offset
;
1051 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
1057 page
= gfn_to_page(kvm
, gfn
);
1060 page_virt
= kmap_atomic(page
, KM_USER0
);
1062 memcpy(data
, page_virt
+ offset
, len
);
1064 kunmap_atomic(page_virt
, KM_USER0
);
1067 EXPORT_SYMBOL_GPL(kvm_read_guest_page
);
1069 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
)
1071 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1073 int offset
= offset_in_page(gpa
);
1076 while ((seg
= next_segment(len
, offset
)) != 0) {
1077 ret
= kvm_read_guest_page(kvm
, gfn
, data
, offset
, seg
);
1087 EXPORT_SYMBOL_GPL(kvm_read_guest
);
1089 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
1090 int offset
, int len
)
1095 page
= gfn_to_page(kvm
, gfn
);
1098 page_virt
= kmap_atomic(page
, KM_USER0
);
1100 memcpy(page_virt
+ offset
, data
, len
);
1102 kunmap_atomic(page_virt
, KM_USER0
);
1103 mark_page_dirty(kvm
, gfn
);
1106 EXPORT_SYMBOL_GPL(kvm_write_guest_page
);
1108 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
1111 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1113 int offset
= offset_in_page(gpa
);
1116 while ((seg
= next_segment(len
, offset
)) != 0) {
1117 ret
= kvm_write_guest_page(kvm
, gfn
, data
, offset
, seg
);
1128 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
)
1133 page
= gfn_to_page(kvm
, gfn
);
1136 page_virt
= kmap_atomic(page
, KM_USER0
);
1138 memset(page_virt
+ offset
, 0, len
);
1140 kunmap_atomic(page_virt
, KM_USER0
);
1143 EXPORT_SYMBOL_GPL(kvm_clear_guest_page
);
1145 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
)
1147 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1149 int offset
= offset_in_page(gpa
);
1152 while ((seg
= next_segment(len
, offset
)) != 0) {
1153 ret
= kvm_clear_guest_page(kvm
, gfn
, offset
, seg
);
1162 EXPORT_SYMBOL_GPL(kvm_clear_guest
);
1164 /* WARNING: Does not work on aliased pages. */
1165 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
1167 struct kvm_memory_slot
*memslot
;
1169 memslot
= __gfn_to_memslot(kvm
, gfn
);
1170 if (memslot
&& memslot
->dirty_bitmap
) {
1171 unsigned long rel_gfn
= gfn
- memslot
->base_gfn
;
1174 if (!test_bit(rel_gfn
, memslot
->dirty_bitmap
))
1175 set_bit(rel_gfn
, memslot
->dirty_bitmap
);
1179 int emulator_read_std(unsigned long addr
,
1182 struct kvm_vcpu
*vcpu
)
1187 gpa_t gpa
= vcpu
->mmu
.gva_to_gpa(vcpu
, addr
);
1188 unsigned offset
= addr
& (PAGE_SIZE
-1);
1189 unsigned tocopy
= min(bytes
, (unsigned)PAGE_SIZE
- offset
);
1192 if (gpa
== UNMAPPED_GVA
)
1193 return X86EMUL_PROPAGATE_FAULT
;
1194 ret
= kvm_read_guest(vcpu
->kvm
, gpa
, data
, tocopy
);
1196 return X86EMUL_UNHANDLEABLE
;
1203 return X86EMUL_CONTINUE
;
1205 EXPORT_SYMBOL_GPL(emulator_read_std
);
1207 static int emulator_write_std(unsigned long addr
,
1210 struct kvm_vcpu
*vcpu
)
1212 pr_unimpl(vcpu
, "emulator_write_std: addr %lx n %d\n", addr
, bytes
);
1213 return X86EMUL_UNHANDLEABLE
;
1217 * Only apic need an MMIO device hook, so shortcut now..
1219 static struct kvm_io_device
*vcpu_find_pervcpu_dev(struct kvm_vcpu
*vcpu
,
1222 struct kvm_io_device
*dev
;
1225 dev
= &vcpu
->apic
->dev
;
1226 if (dev
->in_range(dev
, addr
))
1232 static struct kvm_io_device
*vcpu_find_mmio_dev(struct kvm_vcpu
*vcpu
,
1235 struct kvm_io_device
*dev
;
1237 dev
= vcpu_find_pervcpu_dev(vcpu
, addr
);
1239 dev
= kvm_io_bus_find_dev(&vcpu
->kvm
->mmio_bus
, addr
);
1243 static struct kvm_io_device
*vcpu_find_pio_dev(struct kvm_vcpu
*vcpu
,
1246 return kvm_io_bus_find_dev(&vcpu
->kvm
->pio_bus
, addr
);
1249 static int emulator_read_emulated(unsigned long addr
,
1252 struct kvm_vcpu
*vcpu
)
1254 struct kvm_io_device
*mmio_dev
;
1257 if (vcpu
->mmio_read_completed
) {
1258 memcpy(val
, vcpu
->mmio_data
, bytes
);
1259 vcpu
->mmio_read_completed
= 0;
1260 return X86EMUL_CONTINUE
;
1261 } else if (emulator_read_std(addr
, val
, bytes
, vcpu
)
1262 == X86EMUL_CONTINUE
)
1263 return X86EMUL_CONTINUE
;
1265 gpa
= vcpu
->mmu
.gva_to_gpa(vcpu
, addr
);
1266 if (gpa
== UNMAPPED_GVA
)
1267 return X86EMUL_PROPAGATE_FAULT
;
1270 * Is this MMIO handled locally?
1272 mmio_dev
= vcpu_find_mmio_dev(vcpu
, gpa
);
1274 kvm_iodevice_read(mmio_dev
, gpa
, bytes
, val
);
1275 return X86EMUL_CONTINUE
;
1278 vcpu
->mmio_needed
= 1;
1279 vcpu
->mmio_phys_addr
= gpa
;
1280 vcpu
->mmio_size
= bytes
;
1281 vcpu
->mmio_is_write
= 0;
1283 return X86EMUL_UNHANDLEABLE
;
1286 static int emulator_write_phys(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
1287 const void *val
, int bytes
)
1291 ret
= kvm_write_guest(vcpu
->kvm
, gpa
, val
, bytes
);
1294 kvm_mmu_pte_write(vcpu
, gpa
, val
, bytes
);
1298 static int emulator_write_emulated_onepage(unsigned long addr
,
1301 struct kvm_vcpu
*vcpu
)
1303 struct kvm_io_device
*mmio_dev
;
1304 gpa_t gpa
= vcpu
->mmu
.gva_to_gpa(vcpu
, addr
);
1306 if (gpa
== UNMAPPED_GVA
) {
1307 kvm_x86_ops
->inject_page_fault(vcpu
, addr
, 2);
1308 return X86EMUL_PROPAGATE_FAULT
;
1311 if (emulator_write_phys(vcpu
, gpa
, val
, bytes
))
1312 return X86EMUL_CONTINUE
;
1315 * Is this MMIO handled locally?
1317 mmio_dev
= vcpu_find_mmio_dev(vcpu
, gpa
);
1319 kvm_iodevice_write(mmio_dev
, gpa
, bytes
, val
);
1320 return X86EMUL_CONTINUE
;
1323 vcpu
->mmio_needed
= 1;
1324 vcpu
->mmio_phys_addr
= gpa
;
1325 vcpu
->mmio_size
= bytes
;
1326 vcpu
->mmio_is_write
= 1;
1327 memcpy(vcpu
->mmio_data
, val
, bytes
);
1329 return X86EMUL_CONTINUE
;
1332 int emulator_write_emulated(unsigned long addr
,
1335 struct kvm_vcpu
*vcpu
)
1337 /* Crossing a page boundary? */
1338 if (((addr
+ bytes
- 1) ^ addr
) & PAGE_MASK
) {
1341 now
= -addr
& ~PAGE_MASK
;
1342 rc
= emulator_write_emulated_onepage(addr
, val
, now
, vcpu
);
1343 if (rc
!= X86EMUL_CONTINUE
)
1349 return emulator_write_emulated_onepage(addr
, val
, bytes
, vcpu
);
1351 EXPORT_SYMBOL_GPL(emulator_write_emulated
);
1353 static int emulator_cmpxchg_emulated(unsigned long addr
,
1357 struct kvm_vcpu
*vcpu
)
1359 static int reported
;
1363 printk(KERN_WARNING
"kvm: emulating exchange as write\n");
1365 return emulator_write_emulated(addr
, new, bytes
, vcpu
);
1368 static unsigned long get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
1370 return kvm_x86_ops
->get_segment_base(vcpu
, seg
);
1373 int emulate_invlpg(struct kvm_vcpu
*vcpu
, gva_t address
)
1375 return X86EMUL_CONTINUE
;
1378 int emulate_clts(struct kvm_vcpu
*vcpu
)
1380 kvm_x86_ops
->set_cr0(vcpu
, vcpu
->cr0
& ~X86_CR0_TS
);
1381 return X86EMUL_CONTINUE
;
1384 int emulator_get_dr(struct x86_emulate_ctxt
*ctxt
, int dr
, unsigned long *dest
)
1386 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
1390 *dest
= kvm_x86_ops
->get_dr(vcpu
, dr
);
1391 return X86EMUL_CONTINUE
;
1393 pr_unimpl(vcpu
, "%s: unexpected dr %u\n", __FUNCTION__
, dr
);
1394 return X86EMUL_UNHANDLEABLE
;
1398 int emulator_set_dr(struct x86_emulate_ctxt
*ctxt
, int dr
, unsigned long value
)
1400 unsigned long mask
= (ctxt
->mode
== X86EMUL_MODE_PROT64
) ? ~0ULL : ~0U;
1403 kvm_x86_ops
->set_dr(ctxt
->vcpu
, dr
, value
& mask
, &exception
);
1405 /* FIXME: better handling */
1406 return X86EMUL_UNHANDLEABLE
;
1408 return X86EMUL_CONTINUE
;
1411 void kvm_report_emulation_failure(struct kvm_vcpu
*vcpu
, const char *context
)
1413 static int reported
;
1415 unsigned long rip
= vcpu
->rip
;
1416 unsigned long rip_linear
;
1418 rip_linear
= rip
+ get_segment_base(vcpu
, VCPU_SREG_CS
);
1423 emulator_read_std(rip_linear
, (void *)opcodes
, 4, vcpu
);
1425 printk(KERN_ERR
"emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
1426 context
, rip
, opcodes
[0], opcodes
[1], opcodes
[2], opcodes
[3]);
1429 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure
);
1431 struct x86_emulate_ops emulate_ops
= {
1432 .read_std
= emulator_read_std
,
1433 .write_std
= emulator_write_std
,
1434 .read_emulated
= emulator_read_emulated
,
1435 .write_emulated
= emulator_write_emulated
,
1436 .cmpxchg_emulated
= emulator_cmpxchg_emulated
,
1439 int emulate_instruction(struct kvm_vcpu
*vcpu
,
1440 struct kvm_run
*run
,
1447 vcpu
->mmio_fault_cr2
= cr2
;
1448 kvm_x86_ops
->cache_regs(vcpu
);
1450 vcpu
->mmio_is_write
= 0;
1451 vcpu
->pio
.string
= 0;
1455 kvm_x86_ops
->get_cs_db_l_bits(vcpu
, &cs_db
, &cs_l
);
1457 vcpu
->emulate_ctxt
.vcpu
= vcpu
;
1458 vcpu
->emulate_ctxt
.eflags
= kvm_x86_ops
->get_rflags(vcpu
);
1459 vcpu
->emulate_ctxt
.cr2
= cr2
;
1460 vcpu
->emulate_ctxt
.mode
=
1461 (vcpu
->emulate_ctxt
.eflags
& X86_EFLAGS_VM
)
1462 ? X86EMUL_MODE_REAL
: cs_l
1463 ? X86EMUL_MODE_PROT64
: cs_db
1464 ? X86EMUL_MODE_PROT32
: X86EMUL_MODE_PROT16
;
1466 if (vcpu
->emulate_ctxt
.mode
== X86EMUL_MODE_PROT64
) {
1467 vcpu
->emulate_ctxt
.cs_base
= 0;
1468 vcpu
->emulate_ctxt
.ds_base
= 0;
1469 vcpu
->emulate_ctxt
.es_base
= 0;
1470 vcpu
->emulate_ctxt
.ss_base
= 0;
1472 vcpu
->emulate_ctxt
.cs_base
=
1473 get_segment_base(vcpu
, VCPU_SREG_CS
);
1474 vcpu
->emulate_ctxt
.ds_base
=
1475 get_segment_base(vcpu
, VCPU_SREG_DS
);
1476 vcpu
->emulate_ctxt
.es_base
=
1477 get_segment_base(vcpu
, VCPU_SREG_ES
);
1478 vcpu
->emulate_ctxt
.ss_base
=
1479 get_segment_base(vcpu
, VCPU_SREG_SS
);
1482 vcpu
->emulate_ctxt
.gs_base
=
1483 get_segment_base(vcpu
, VCPU_SREG_GS
);
1484 vcpu
->emulate_ctxt
.fs_base
=
1485 get_segment_base(vcpu
, VCPU_SREG_FS
);
1487 r
= x86_decode_insn(&vcpu
->emulate_ctxt
, &emulate_ops
);
1489 if (kvm_mmu_unprotect_page_virt(vcpu
, cr2
))
1490 return EMULATE_DONE
;
1491 return EMULATE_FAIL
;
1495 r
= x86_emulate_insn(&vcpu
->emulate_ctxt
, &emulate_ops
);
1497 if (vcpu
->pio
.string
)
1498 return EMULATE_DO_MMIO
;
1500 if ((r
|| vcpu
->mmio_is_write
) && run
) {
1501 run
->exit_reason
= KVM_EXIT_MMIO
;
1502 run
->mmio
.phys_addr
= vcpu
->mmio_phys_addr
;
1503 memcpy(run
->mmio
.data
, vcpu
->mmio_data
, 8);
1504 run
->mmio
.len
= vcpu
->mmio_size
;
1505 run
->mmio
.is_write
= vcpu
->mmio_is_write
;
1509 if (kvm_mmu_unprotect_page_virt(vcpu
, cr2
))
1510 return EMULATE_DONE
;
1511 if (!vcpu
->mmio_needed
) {
1512 kvm_report_emulation_failure(vcpu
, "mmio");
1513 return EMULATE_FAIL
;
1515 return EMULATE_DO_MMIO
;
1518 kvm_x86_ops
->decache_regs(vcpu
);
1519 kvm_x86_ops
->set_rflags(vcpu
, vcpu
->emulate_ctxt
.eflags
);
1521 if (vcpu
->mmio_is_write
) {
1522 vcpu
->mmio_needed
= 0;
1523 return EMULATE_DO_MMIO
;
1526 return EMULATE_DONE
;
1528 EXPORT_SYMBOL_GPL(emulate_instruction
);
1531 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1533 static void kvm_vcpu_block(struct kvm_vcpu
*vcpu
)
1535 DECLARE_WAITQUEUE(wait
, current
);
1537 add_wait_queue(&vcpu
->wq
, &wait
);
1540 * We will block until either an interrupt or a signal wakes us up
1542 while (!kvm_cpu_has_interrupt(vcpu
)
1543 && !signal_pending(current
)
1544 && vcpu
->mp_state
!= VCPU_MP_STATE_RUNNABLE
1545 && vcpu
->mp_state
!= VCPU_MP_STATE_SIPI_RECEIVED
) {
1546 set_current_state(TASK_INTERRUPTIBLE
);
1552 __set_current_state(TASK_RUNNING
);
1553 remove_wait_queue(&vcpu
->wq
, &wait
);
1556 int kvm_emulate_halt(struct kvm_vcpu
*vcpu
)
1558 ++vcpu
->stat
.halt_exits
;
1559 if (irqchip_in_kernel(vcpu
->kvm
)) {
1560 vcpu
->mp_state
= VCPU_MP_STATE_HALTED
;
1561 kvm_vcpu_block(vcpu
);
1562 if (vcpu
->mp_state
!= VCPU_MP_STATE_RUNNABLE
)
1566 vcpu
->run
->exit_reason
= KVM_EXIT_HLT
;
1570 EXPORT_SYMBOL_GPL(kvm_emulate_halt
);
1572 int kvm_emulate_hypercall(struct kvm_vcpu
*vcpu
)
1574 unsigned long nr
, a0
, a1
, a2
, a3
, ret
;
1576 kvm_x86_ops
->cache_regs(vcpu
);
1578 nr
= vcpu
->regs
[VCPU_REGS_RAX
];
1579 a0
= vcpu
->regs
[VCPU_REGS_RBX
];
1580 a1
= vcpu
->regs
[VCPU_REGS_RCX
];
1581 a2
= vcpu
->regs
[VCPU_REGS_RDX
];
1582 a3
= vcpu
->regs
[VCPU_REGS_RSI
];
1584 if (!is_long_mode(vcpu
)) {
1597 vcpu
->regs
[VCPU_REGS_RAX
] = ret
;
1598 kvm_x86_ops
->decache_regs(vcpu
);
1601 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall
);
1603 int kvm_fix_hypercall(struct kvm_vcpu
*vcpu
)
1605 char instruction
[3];
1608 mutex_lock(&vcpu
->kvm
->lock
);
1611 * Blow out the MMU to ensure that no other VCPU has an active mapping
1612 * to ensure that the updated hypercall appears atomically across all
1615 kvm_mmu_zap_all(vcpu
->kvm
);
1617 kvm_x86_ops
->cache_regs(vcpu
);
1618 kvm_x86_ops
->patch_hypercall(vcpu
, instruction
);
1619 if (emulator_write_emulated(vcpu
->rip
, instruction
, 3, vcpu
)
1620 != X86EMUL_CONTINUE
)
1623 mutex_unlock(&vcpu
->kvm
->lock
);
1628 static u64
mk_cr_64(u64 curr_cr
, u32 new_val
)
1630 return (curr_cr
& ~((1ULL << 32) - 1)) | new_val
;
1633 void realmode_lgdt(struct kvm_vcpu
*vcpu
, u16 limit
, unsigned long base
)
1635 struct descriptor_table dt
= { limit
, base
};
1637 kvm_x86_ops
->set_gdt(vcpu
, &dt
);
1640 void realmode_lidt(struct kvm_vcpu
*vcpu
, u16 limit
, unsigned long base
)
1642 struct descriptor_table dt
= { limit
, base
};
1644 kvm_x86_ops
->set_idt(vcpu
, &dt
);
1647 void realmode_lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
,
1648 unsigned long *rflags
)
1651 *rflags
= kvm_x86_ops
->get_rflags(vcpu
);
1654 unsigned long realmode_get_cr(struct kvm_vcpu
*vcpu
, int cr
)
1656 kvm_x86_ops
->decache_cr4_guest_bits(vcpu
);
1667 vcpu_printf(vcpu
, "%s: unexpected cr %u\n", __FUNCTION__
, cr
);
1672 void realmode_set_cr(struct kvm_vcpu
*vcpu
, int cr
, unsigned long val
,
1673 unsigned long *rflags
)
1677 set_cr0(vcpu
, mk_cr_64(vcpu
->cr0
, val
));
1678 *rflags
= kvm_x86_ops
->get_rflags(vcpu
);
1687 set_cr4(vcpu
, mk_cr_64(vcpu
->cr4
, val
));
1690 vcpu_printf(vcpu
, "%s: unexpected cr %u\n", __FUNCTION__
, cr
);
1694 int kvm_get_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
)
1699 case 0xc0010010: /* SYSCFG */
1700 case 0xc0010015: /* HWCR */
1701 case MSR_IA32_PLATFORM_ID
:
1702 case MSR_IA32_P5_MC_ADDR
:
1703 case MSR_IA32_P5_MC_TYPE
:
1704 case MSR_IA32_MC0_CTL
:
1705 case MSR_IA32_MCG_STATUS
:
1706 case MSR_IA32_MCG_CAP
:
1707 case MSR_IA32_MC0_MISC
:
1708 case MSR_IA32_MC0_MISC
+4:
1709 case MSR_IA32_MC0_MISC
+8:
1710 case MSR_IA32_MC0_MISC
+12:
1711 case MSR_IA32_MC0_MISC
+16:
1712 case MSR_IA32_UCODE_REV
:
1713 case MSR_IA32_PERF_STATUS
:
1714 case MSR_IA32_EBL_CR_POWERON
:
1715 /* MTRR registers */
1717 case 0x200 ... 0x2ff:
1720 case 0xcd: /* fsb frequency */
1723 case MSR_IA32_APICBASE
:
1724 data
= kvm_get_apic_base(vcpu
);
1726 case MSR_IA32_MISC_ENABLE
:
1727 data
= vcpu
->ia32_misc_enable_msr
;
1729 #ifdef CONFIG_X86_64
1731 data
= vcpu
->shadow_efer
;
1735 pr_unimpl(vcpu
, "unhandled rdmsr: 0x%x\n", msr
);
1741 EXPORT_SYMBOL_GPL(kvm_get_msr_common
);
1744 * Reads an msr value (of 'msr_index') into 'pdata'.
1745 * Returns 0 on success, non-0 otherwise.
1746 * Assumes vcpu_load() was already called.
1748 int kvm_get_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64
*pdata
)
1750 return kvm_x86_ops
->get_msr(vcpu
, msr_index
, pdata
);
1753 #ifdef CONFIG_X86_64
1755 static void set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
1757 if (efer
& EFER_RESERVED_BITS
) {
1758 printk(KERN_DEBUG
"set_efer: 0x%llx #GP, reserved bits\n",
1765 && (vcpu
->shadow_efer
& EFER_LME
) != (efer
& EFER_LME
)) {
1766 printk(KERN_DEBUG
"set_efer: #GP, change LME while paging\n");
1771 kvm_x86_ops
->set_efer(vcpu
, efer
);
1774 efer
|= vcpu
->shadow_efer
& EFER_LMA
;
1776 vcpu
->shadow_efer
= efer
;
1781 int kvm_set_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
)
1784 #ifdef CONFIG_X86_64
1786 set_efer(vcpu
, data
);
1789 case MSR_IA32_MC0_STATUS
:
1790 pr_unimpl(vcpu
, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
1791 __FUNCTION__
, data
);
1793 case MSR_IA32_MCG_STATUS
:
1794 pr_unimpl(vcpu
, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
1795 __FUNCTION__
, data
);
1797 case MSR_IA32_UCODE_REV
:
1798 case MSR_IA32_UCODE_WRITE
:
1799 case 0x200 ... 0x2ff: /* MTRRs */
1801 case MSR_IA32_APICBASE
:
1802 kvm_set_apic_base(vcpu
, data
);
1804 case MSR_IA32_MISC_ENABLE
:
1805 vcpu
->ia32_misc_enable_msr
= data
;
1808 pr_unimpl(vcpu
, "unhandled wrmsr: 0x%x\n", msr
);
1813 EXPORT_SYMBOL_GPL(kvm_set_msr_common
);
1816 * Writes msr value into into the appropriate "register".
1817 * Returns 0 on success, non-0 otherwise.
1818 * Assumes vcpu_load() was already called.
1820 int kvm_set_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
)
1822 return kvm_x86_ops
->set_msr(vcpu
, msr_index
, data
);
1825 void kvm_resched(struct kvm_vcpu
*vcpu
)
1827 if (!need_resched())
1831 EXPORT_SYMBOL_GPL(kvm_resched
);
1833 void kvm_emulate_cpuid(struct kvm_vcpu
*vcpu
)
1837 struct kvm_cpuid_entry
*e
, *best
;
1839 kvm_x86_ops
->cache_regs(vcpu
);
1840 function
= vcpu
->regs
[VCPU_REGS_RAX
];
1841 vcpu
->regs
[VCPU_REGS_RAX
] = 0;
1842 vcpu
->regs
[VCPU_REGS_RBX
] = 0;
1843 vcpu
->regs
[VCPU_REGS_RCX
] = 0;
1844 vcpu
->regs
[VCPU_REGS_RDX
] = 0;
1846 for (i
= 0; i
< vcpu
->cpuid_nent
; ++i
) {
1847 e
= &vcpu
->cpuid_entries
[i
];
1848 if (e
->function
== function
) {
1853 * Both basic or both extended?
1855 if (((e
->function
^ function
) & 0x80000000) == 0)
1856 if (!best
|| e
->function
> best
->function
)
1860 vcpu
->regs
[VCPU_REGS_RAX
] = best
->eax
;
1861 vcpu
->regs
[VCPU_REGS_RBX
] = best
->ebx
;
1862 vcpu
->regs
[VCPU_REGS_RCX
] = best
->ecx
;
1863 vcpu
->regs
[VCPU_REGS_RDX
] = best
->edx
;
1865 kvm_x86_ops
->decache_regs(vcpu
);
1866 kvm_x86_ops
->skip_emulated_instruction(vcpu
);
1868 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid
);
1870 static int pio_copy_data(struct kvm_vcpu
*vcpu
)
1872 void *p
= vcpu
->pio_data
;
1875 int nr_pages
= vcpu
->pio
.guest_pages
[1] ? 2 : 1;
1877 q
= vmap(vcpu
->pio
.guest_pages
, nr_pages
, VM_READ
|VM_WRITE
,
1880 free_pio_guest_pages(vcpu
);
1883 q
+= vcpu
->pio
.guest_page_offset
;
1884 bytes
= vcpu
->pio
.size
* vcpu
->pio
.cur_count
;
1886 memcpy(q
, p
, bytes
);
1888 memcpy(p
, q
, bytes
);
1889 q
-= vcpu
->pio
.guest_page_offset
;
1891 free_pio_guest_pages(vcpu
);
1895 static int complete_pio(struct kvm_vcpu
*vcpu
)
1897 struct kvm_pio_request
*io
= &vcpu
->pio
;
1901 kvm_x86_ops
->cache_regs(vcpu
);
1905 memcpy(&vcpu
->regs
[VCPU_REGS_RAX
], vcpu
->pio_data
,
1909 r
= pio_copy_data(vcpu
);
1911 kvm_x86_ops
->cache_regs(vcpu
);
1918 delta
*= io
->cur_count
;
1920 * The size of the register should really depend on
1921 * current address size.
1923 vcpu
->regs
[VCPU_REGS_RCX
] -= delta
;
1929 vcpu
->regs
[VCPU_REGS_RDI
] += delta
;
1931 vcpu
->regs
[VCPU_REGS_RSI
] += delta
;
1934 kvm_x86_ops
->decache_regs(vcpu
);
1936 io
->count
-= io
->cur_count
;
1942 static void kernel_pio(struct kvm_io_device
*pio_dev
,
1943 struct kvm_vcpu
*vcpu
,
1946 /* TODO: String I/O for in kernel device */
1948 mutex_lock(&vcpu
->kvm
->lock
);
1950 kvm_iodevice_read(pio_dev
, vcpu
->pio
.port
,
1954 kvm_iodevice_write(pio_dev
, vcpu
->pio
.port
,
1957 mutex_unlock(&vcpu
->kvm
->lock
);
1960 static void pio_string_write(struct kvm_io_device
*pio_dev
,
1961 struct kvm_vcpu
*vcpu
)
1963 struct kvm_pio_request
*io
= &vcpu
->pio
;
1964 void *pd
= vcpu
->pio_data
;
1967 mutex_lock(&vcpu
->kvm
->lock
);
1968 for (i
= 0; i
< io
->cur_count
; i
++) {
1969 kvm_iodevice_write(pio_dev
, io
->port
,
1974 mutex_unlock(&vcpu
->kvm
->lock
);
1977 int kvm_emulate_pio(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, int in
,
1978 int size
, unsigned port
)
1980 struct kvm_io_device
*pio_dev
;
1982 vcpu
->run
->exit_reason
= KVM_EXIT_IO
;
1983 vcpu
->run
->io
.direction
= in
? KVM_EXIT_IO_IN
: KVM_EXIT_IO_OUT
;
1984 vcpu
->run
->io
.size
= vcpu
->pio
.size
= size
;
1985 vcpu
->run
->io
.data_offset
= KVM_PIO_PAGE_OFFSET
* PAGE_SIZE
;
1986 vcpu
->run
->io
.count
= vcpu
->pio
.count
= vcpu
->pio
.cur_count
= 1;
1987 vcpu
->run
->io
.port
= vcpu
->pio
.port
= port
;
1989 vcpu
->pio
.string
= 0;
1991 vcpu
->pio
.guest_page_offset
= 0;
1994 kvm_x86_ops
->cache_regs(vcpu
);
1995 memcpy(vcpu
->pio_data
, &vcpu
->regs
[VCPU_REGS_RAX
], 4);
1996 kvm_x86_ops
->decache_regs(vcpu
);
1998 kvm_x86_ops
->skip_emulated_instruction(vcpu
);
2000 pio_dev
= vcpu_find_pio_dev(vcpu
, port
);
2002 kernel_pio(pio_dev
, vcpu
, vcpu
->pio_data
);
2008 EXPORT_SYMBOL_GPL(kvm_emulate_pio
);
2010 int kvm_emulate_pio_string(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, int in
,
2011 int size
, unsigned long count
, int down
,
2012 gva_t address
, int rep
, unsigned port
)
2014 unsigned now
, in_page
;
2018 struct kvm_io_device
*pio_dev
;
2020 vcpu
->run
->exit_reason
= KVM_EXIT_IO
;
2021 vcpu
->run
->io
.direction
= in
? KVM_EXIT_IO_IN
: KVM_EXIT_IO_OUT
;
2022 vcpu
->run
->io
.size
= vcpu
->pio
.size
= size
;
2023 vcpu
->run
->io
.data_offset
= KVM_PIO_PAGE_OFFSET
* PAGE_SIZE
;
2024 vcpu
->run
->io
.count
= vcpu
->pio
.count
= vcpu
->pio
.cur_count
= count
;
2025 vcpu
->run
->io
.port
= vcpu
->pio
.port
= port
;
2027 vcpu
->pio
.string
= 1;
2028 vcpu
->pio
.down
= down
;
2029 vcpu
->pio
.guest_page_offset
= offset_in_page(address
);
2030 vcpu
->pio
.rep
= rep
;
2033 kvm_x86_ops
->skip_emulated_instruction(vcpu
);
2038 in_page
= PAGE_SIZE
- offset_in_page(address
);
2040 in_page
= offset_in_page(address
) + size
;
2041 now
= min(count
, (unsigned long)in_page
/ size
);
2044 * String I/O straddles page boundary. Pin two guest pages
2045 * so that we satisfy atomicity constraints. Do just one
2046 * transaction to avoid complexity.
2053 * String I/O in reverse. Yuck. Kill the guest, fix later.
2055 pr_unimpl(vcpu
, "guest string pio down\n");
2059 vcpu
->run
->io
.count
= now
;
2060 vcpu
->pio
.cur_count
= now
;
2062 if (vcpu
->pio
.cur_count
== vcpu
->pio
.count
)
2063 kvm_x86_ops
->skip_emulated_instruction(vcpu
);
2065 for (i
= 0; i
< nr_pages
; ++i
) {
2066 mutex_lock(&vcpu
->kvm
->lock
);
2067 page
= gva_to_page(vcpu
, address
+ i
* PAGE_SIZE
);
2070 vcpu
->pio
.guest_pages
[i
] = page
;
2071 mutex_unlock(&vcpu
->kvm
->lock
);
2074 free_pio_guest_pages(vcpu
);
2079 pio_dev
= vcpu_find_pio_dev(vcpu
, port
);
2080 if (!vcpu
->pio
.in
) {
2081 /* string PIO write */
2082 ret
= pio_copy_data(vcpu
);
2083 if (ret
>= 0 && pio_dev
) {
2084 pio_string_write(pio_dev
, vcpu
);
2086 if (vcpu
->pio
.count
== 0)
2090 pr_unimpl(vcpu
, "no string pio read support yet, "
2091 "port %x size %d count %ld\n",
2096 EXPORT_SYMBOL_GPL(kvm_emulate_pio_string
);
2099 * Check if userspace requested an interrupt window, and that the
2100 * interrupt window is open.
2102 * No need to exit to userspace if we already have an interrupt queued.
2104 static int dm_request_for_irq_injection(struct kvm_vcpu
*vcpu
,
2105 struct kvm_run
*kvm_run
)
2107 return (!vcpu
->irq_summary
&&
2108 kvm_run
->request_interrupt_window
&&
2109 vcpu
->interrupt_window_open
&&
2110 (kvm_x86_ops
->get_rflags(vcpu
) & X86_EFLAGS_IF
));
2113 static void post_kvm_run_save(struct kvm_vcpu
*vcpu
,
2114 struct kvm_run
*kvm_run
)
2116 kvm_run
->if_flag
= (kvm_x86_ops
->get_rflags(vcpu
) & X86_EFLAGS_IF
) != 0;
2117 kvm_run
->cr8
= get_cr8(vcpu
);
2118 kvm_run
->apic_base
= kvm_get_apic_base(vcpu
);
2119 if (irqchip_in_kernel(vcpu
->kvm
))
2120 kvm_run
->ready_for_interrupt_injection
= 1;
2122 kvm_run
->ready_for_interrupt_injection
=
2123 (vcpu
->interrupt_window_open
&&
2124 vcpu
->irq_summary
== 0);
2127 static int __vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2131 if (unlikely(vcpu
->mp_state
== VCPU_MP_STATE_SIPI_RECEIVED
)) {
2132 pr_debug("vcpu %d received sipi with vector # %x\n",
2133 vcpu
->vcpu_id
, vcpu
->sipi_vector
);
2134 kvm_lapic_reset(vcpu
);
2135 kvm_x86_ops
->vcpu_reset(vcpu
);
2136 vcpu
->mp_state
= VCPU_MP_STATE_RUNNABLE
;
2140 if (vcpu
->guest_debug
.enabled
)
2141 kvm_x86_ops
->guest_debug_pre(vcpu
);
2144 r
= kvm_mmu_reload(vcpu
);
2150 kvm_x86_ops
->prepare_guest_switch(vcpu
);
2151 kvm_load_guest_fpu(vcpu
);
2153 local_irq_disable();
2155 if (signal_pending(current
)) {
2159 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
2160 ++vcpu
->stat
.signal_exits
;
2164 if (irqchip_in_kernel(vcpu
->kvm
))
2165 kvm_x86_ops
->inject_pending_irq(vcpu
);
2166 else if (!vcpu
->mmio_read_completed
)
2167 kvm_x86_ops
->inject_pending_vectors(vcpu
, kvm_run
);
2169 vcpu
->guest_mode
= 1;
2173 if (test_and_clear_bit(KVM_TLB_FLUSH
, &vcpu
->requests
))
2174 kvm_x86_ops
->tlb_flush(vcpu
);
2176 kvm_x86_ops
->run(vcpu
, kvm_run
);
2178 vcpu
->guest_mode
= 0;
2184 * We must have an instruction between local_irq_enable() and
2185 * kvm_guest_exit(), so the timer interrupt isn't delayed by
2186 * the interrupt shadow. The stat.exits increment will do nicely.
2187 * But we need to prevent reordering, hence this barrier():
2196 * Profile KVM exit RIPs:
2198 if (unlikely(prof_on
== KVM_PROFILING
)) {
2199 kvm_x86_ops
->cache_regs(vcpu
);
2200 profile_hit(KVM_PROFILING
, (void *)vcpu
->rip
);
2203 r
= kvm_x86_ops
->handle_exit(kvm_run
, vcpu
);
2206 if (dm_request_for_irq_injection(vcpu
, kvm_run
)) {
2208 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
2209 ++vcpu
->stat
.request_irq_exits
;
2212 if (!need_resched()) {
2213 ++vcpu
->stat
.light_exits
;
2224 post_kvm_run_save(vcpu
, kvm_run
);
2230 static int kvm_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2237 if (unlikely(vcpu
->mp_state
== VCPU_MP_STATE_UNINITIALIZED
)) {
2238 kvm_vcpu_block(vcpu
);
2243 if (vcpu
->sigset_active
)
2244 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
2246 /* re-sync apic's tpr */
2247 if (!irqchip_in_kernel(vcpu
->kvm
))
2248 set_cr8(vcpu
, kvm_run
->cr8
);
2250 if (vcpu
->pio
.cur_count
) {
2251 r
= complete_pio(vcpu
);
2256 if (vcpu
->mmio_needed
) {
2257 memcpy(vcpu
->mmio_data
, kvm_run
->mmio
.data
, 8);
2258 vcpu
->mmio_read_completed
= 1;
2259 vcpu
->mmio_needed
= 0;
2260 r
= emulate_instruction(vcpu
, kvm_run
,
2261 vcpu
->mmio_fault_cr2
, 0, 1);
2262 if (r
== EMULATE_DO_MMIO
) {
2264 * Read-modify-write. Back to userspace.
2271 if (kvm_run
->exit_reason
== KVM_EXIT_HYPERCALL
) {
2272 kvm_x86_ops
->cache_regs(vcpu
);
2273 vcpu
->regs
[VCPU_REGS_RAX
] = kvm_run
->hypercall
.ret
;
2274 kvm_x86_ops
->decache_regs(vcpu
);
2277 r
= __vcpu_run(vcpu
, kvm_run
);
2280 if (vcpu
->sigset_active
)
2281 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
2287 static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
,
2288 struct kvm_regs
*regs
)
2292 kvm_x86_ops
->cache_regs(vcpu
);
2294 regs
->rax
= vcpu
->regs
[VCPU_REGS_RAX
];
2295 regs
->rbx
= vcpu
->regs
[VCPU_REGS_RBX
];
2296 regs
->rcx
= vcpu
->regs
[VCPU_REGS_RCX
];
2297 regs
->rdx
= vcpu
->regs
[VCPU_REGS_RDX
];
2298 regs
->rsi
= vcpu
->regs
[VCPU_REGS_RSI
];
2299 regs
->rdi
= vcpu
->regs
[VCPU_REGS_RDI
];
2300 regs
->rsp
= vcpu
->regs
[VCPU_REGS_RSP
];
2301 regs
->rbp
= vcpu
->regs
[VCPU_REGS_RBP
];
2302 #ifdef CONFIG_X86_64
2303 regs
->r8
= vcpu
->regs
[VCPU_REGS_R8
];
2304 regs
->r9
= vcpu
->regs
[VCPU_REGS_R9
];
2305 regs
->r10
= vcpu
->regs
[VCPU_REGS_R10
];
2306 regs
->r11
= vcpu
->regs
[VCPU_REGS_R11
];
2307 regs
->r12
= vcpu
->regs
[VCPU_REGS_R12
];
2308 regs
->r13
= vcpu
->regs
[VCPU_REGS_R13
];
2309 regs
->r14
= vcpu
->regs
[VCPU_REGS_R14
];
2310 regs
->r15
= vcpu
->regs
[VCPU_REGS_R15
];
2313 regs
->rip
= vcpu
->rip
;
2314 regs
->rflags
= kvm_x86_ops
->get_rflags(vcpu
);
2317 * Don't leak debug flags in case they were set for guest debugging
2319 if (vcpu
->guest_debug
.enabled
&& vcpu
->guest_debug
.singlestep
)
2320 regs
->rflags
&= ~(X86_EFLAGS_TF
| X86_EFLAGS_RF
);
2327 static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
,
2328 struct kvm_regs
*regs
)
2332 vcpu
->regs
[VCPU_REGS_RAX
] = regs
->rax
;
2333 vcpu
->regs
[VCPU_REGS_RBX
] = regs
->rbx
;
2334 vcpu
->regs
[VCPU_REGS_RCX
] = regs
->rcx
;
2335 vcpu
->regs
[VCPU_REGS_RDX
] = regs
->rdx
;
2336 vcpu
->regs
[VCPU_REGS_RSI
] = regs
->rsi
;
2337 vcpu
->regs
[VCPU_REGS_RDI
] = regs
->rdi
;
2338 vcpu
->regs
[VCPU_REGS_RSP
] = regs
->rsp
;
2339 vcpu
->regs
[VCPU_REGS_RBP
] = regs
->rbp
;
2340 #ifdef CONFIG_X86_64
2341 vcpu
->regs
[VCPU_REGS_R8
] = regs
->r8
;
2342 vcpu
->regs
[VCPU_REGS_R9
] = regs
->r9
;
2343 vcpu
->regs
[VCPU_REGS_R10
] = regs
->r10
;
2344 vcpu
->regs
[VCPU_REGS_R11
] = regs
->r11
;
2345 vcpu
->regs
[VCPU_REGS_R12
] = regs
->r12
;
2346 vcpu
->regs
[VCPU_REGS_R13
] = regs
->r13
;
2347 vcpu
->regs
[VCPU_REGS_R14
] = regs
->r14
;
2348 vcpu
->regs
[VCPU_REGS_R15
] = regs
->r15
;
2351 vcpu
->rip
= regs
->rip
;
2352 kvm_x86_ops
->set_rflags(vcpu
, regs
->rflags
);
2354 kvm_x86_ops
->decache_regs(vcpu
);
2361 static void get_segment(struct kvm_vcpu
*vcpu
,
2362 struct kvm_segment
*var
, int seg
)
2364 return kvm_x86_ops
->get_segment(vcpu
, var
, seg
);
2367 static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
2368 struct kvm_sregs
*sregs
)
2370 struct descriptor_table dt
;
2375 get_segment(vcpu
, &sregs
->cs
, VCPU_SREG_CS
);
2376 get_segment(vcpu
, &sregs
->ds
, VCPU_SREG_DS
);
2377 get_segment(vcpu
, &sregs
->es
, VCPU_SREG_ES
);
2378 get_segment(vcpu
, &sregs
->fs
, VCPU_SREG_FS
);
2379 get_segment(vcpu
, &sregs
->gs
, VCPU_SREG_GS
);
2380 get_segment(vcpu
, &sregs
->ss
, VCPU_SREG_SS
);
2382 get_segment(vcpu
, &sregs
->tr
, VCPU_SREG_TR
);
2383 get_segment(vcpu
, &sregs
->ldt
, VCPU_SREG_LDTR
);
2385 kvm_x86_ops
->get_idt(vcpu
, &dt
);
2386 sregs
->idt
.limit
= dt
.limit
;
2387 sregs
->idt
.base
= dt
.base
;
2388 kvm_x86_ops
->get_gdt(vcpu
, &dt
);
2389 sregs
->gdt
.limit
= dt
.limit
;
2390 sregs
->gdt
.base
= dt
.base
;
2392 kvm_x86_ops
->decache_cr4_guest_bits(vcpu
);
2393 sregs
->cr0
= vcpu
->cr0
;
2394 sregs
->cr2
= vcpu
->cr2
;
2395 sregs
->cr3
= vcpu
->cr3
;
2396 sregs
->cr4
= vcpu
->cr4
;
2397 sregs
->cr8
= get_cr8(vcpu
);
2398 sregs
->efer
= vcpu
->shadow_efer
;
2399 sregs
->apic_base
= kvm_get_apic_base(vcpu
);
2401 if (irqchip_in_kernel(vcpu
->kvm
)) {
2402 memset(sregs
->interrupt_bitmap
, 0,
2403 sizeof sregs
->interrupt_bitmap
);
2404 pending_vec
= kvm_x86_ops
->get_irq(vcpu
);
2405 if (pending_vec
>= 0)
2406 set_bit(pending_vec
,
2407 (unsigned long *)sregs
->interrupt_bitmap
);
2409 memcpy(sregs
->interrupt_bitmap
, vcpu
->irq_pending
,
2410 sizeof sregs
->interrupt_bitmap
);
2417 static void set_segment(struct kvm_vcpu
*vcpu
,
2418 struct kvm_segment
*var
, int seg
)
2420 return kvm_x86_ops
->set_segment(vcpu
, var
, seg
);
2423 static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
2424 struct kvm_sregs
*sregs
)
2426 int mmu_reset_needed
= 0;
2427 int i
, pending_vec
, max_bits
;
2428 struct descriptor_table dt
;
2432 dt
.limit
= sregs
->idt
.limit
;
2433 dt
.base
= sregs
->idt
.base
;
2434 kvm_x86_ops
->set_idt(vcpu
, &dt
);
2435 dt
.limit
= sregs
->gdt
.limit
;
2436 dt
.base
= sregs
->gdt
.base
;
2437 kvm_x86_ops
->set_gdt(vcpu
, &dt
);
2439 vcpu
->cr2
= sregs
->cr2
;
2440 mmu_reset_needed
|= vcpu
->cr3
!= sregs
->cr3
;
2441 vcpu
->cr3
= sregs
->cr3
;
2443 set_cr8(vcpu
, sregs
->cr8
);
2445 mmu_reset_needed
|= vcpu
->shadow_efer
!= sregs
->efer
;
2446 #ifdef CONFIG_X86_64
2447 kvm_x86_ops
->set_efer(vcpu
, sregs
->efer
);
2449 kvm_set_apic_base(vcpu
, sregs
->apic_base
);
2451 kvm_x86_ops
->decache_cr4_guest_bits(vcpu
);
2453 mmu_reset_needed
|= vcpu
->cr0
!= sregs
->cr0
;
2454 vcpu
->cr0
= sregs
->cr0
;
2455 kvm_x86_ops
->set_cr0(vcpu
, sregs
->cr0
);
2457 mmu_reset_needed
|= vcpu
->cr4
!= sregs
->cr4
;
2458 kvm_x86_ops
->set_cr4(vcpu
, sregs
->cr4
);
2459 if (!is_long_mode(vcpu
) && is_pae(vcpu
))
2460 load_pdptrs(vcpu
, vcpu
->cr3
);
2462 if (mmu_reset_needed
)
2463 kvm_mmu_reset_context(vcpu
);
2465 if (!irqchip_in_kernel(vcpu
->kvm
)) {
2466 memcpy(vcpu
->irq_pending
, sregs
->interrupt_bitmap
,
2467 sizeof vcpu
->irq_pending
);
2468 vcpu
->irq_summary
= 0;
2469 for (i
= 0; i
< ARRAY_SIZE(vcpu
->irq_pending
); ++i
)
2470 if (vcpu
->irq_pending
[i
])
2471 __set_bit(i
, &vcpu
->irq_summary
);
2473 max_bits
= (sizeof sregs
->interrupt_bitmap
) << 3;
2474 pending_vec
= find_first_bit(
2475 (const unsigned long *)sregs
->interrupt_bitmap
,
2477 /* Only pending external irq is handled here */
2478 if (pending_vec
< max_bits
) {
2479 kvm_x86_ops
->set_irq(vcpu
, pending_vec
);
2480 pr_debug("Set back pending irq %d\n",
2485 set_segment(vcpu
, &sregs
->cs
, VCPU_SREG_CS
);
2486 set_segment(vcpu
, &sregs
->ds
, VCPU_SREG_DS
);
2487 set_segment(vcpu
, &sregs
->es
, VCPU_SREG_ES
);
2488 set_segment(vcpu
, &sregs
->fs
, VCPU_SREG_FS
);
2489 set_segment(vcpu
, &sregs
->gs
, VCPU_SREG_GS
);
2490 set_segment(vcpu
, &sregs
->ss
, VCPU_SREG_SS
);
2492 set_segment(vcpu
, &sregs
->tr
, VCPU_SREG_TR
);
2493 set_segment(vcpu
, &sregs
->ldt
, VCPU_SREG_LDTR
);
2500 void kvm_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
)
2502 struct kvm_segment cs
;
2504 get_segment(vcpu
, &cs
, VCPU_SREG_CS
);
2508 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits
);
2511 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
2512 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
2514 * This list is modified at module load time to reflect the
2515 * capabilities of the host cpu.
2517 static u32 msrs_to_save
[] = {
2518 MSR_IA32_SYSENTER_CS
, MSR_IA32_SYSENTER_ESP
, MSR_IA32_SYSENTER_EIP
,
2520 #ifdef CONFIG_X86_64
2521 MSR_CSTAR
, MSR_KERNEL_GS_BASE
, MSR_SYSCALL_MASK
, MSR_LSTAR
,
2523 MSR_IA32_TIME_STAMP_COUNTER
,
2526 static unsigned num_msrs_to_save
;
2528 static u32 emulated_msrs
[] = {
2529 MSR_IA32_MISC_ENABLE
,
2532 static __init
void kvm_init_msr_list(void)
2537 for (i
= j
= 0; i
< ARRAY_SIZE(msrs_to_save
); i
++) {
2538 if (rdmsr_safe(msrs_to_save
[i
], &dummy
[0], &dummy
[1]) < 0)
2541 msrs_to_save
[j
] = msrs_to_save
[i
];
2544 num_msrs_to_save
= j
;
2548 * Adapt set_msr() to msr_io()'s calling convention
2550 static int do_set_msr(struct kvm_vcpu
*vcpu
, unsigned index
, u64
*data
)
2552 return kvm_set_msr(vcpu
, index
, *data
);
2556 * Read or write a bunch of msrs. All parameters are kernel addresses.
2558 * @return number of msrs set successfully.
2560 static int __msr_io(struct kvm_vcpu
*vcpu
, struct kvm_msrs
*msrs
,
2561 struct kvm_msr_entry
*entries
,
2562 int (*do_msr
)(struct kvm_vcpu
*vcpu
,
2563 unsigned index
, u64
*data
))
2569 for (i
= 0; i
< msrs
->nmsrs
; ++i
)
2570 if (do_msr(vcpu
, entries
[i
].index
, &entries
[i
].data
))
2579 * Read or write a bunch of msrs. Parameters are user addresses.
2581 * @return number of msrs set successfully.
2583 static int msr_io(struct kvm_vcpu
*vcpu
, struct kvm_msrs __user
*user_msrs
,
2584 int (*do_msr
)(struct kvm_vcpu
*vcpu
,
2585 unsigned index
, u64
*data
),
2588 struct kvm_msrs msrs
;
2589 struct kvm_msr_entry
*entries
;
2594 if (copy_from_user(&msrs
, user_msrs
, sizeof msrs
))
2598 if (msrs
.nmsrs
>= MAX_IO_MSRS
)
2602 size
= sizeof(struct kvm_msr_entry
) * msrs
.nmsrs
;
2603 entries
= vmalloc(size
);
2608 if (copy_from_user(entries
, user_msrs
->entries
, size
))
2611 r
= n
= __msr_io(vcpu
, &msrs
, entries
, do_msr
);
2616 if (writeback
&& copy_to_user(user_msrs
->entries
, entries
, size
))
2628 * Translate a guest virtual address to a guest physical address.
2630 static int kvm_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
2631 struct kvm_translation
*tr
)
2633 unsigned long vaddr
= tr
->linear_address
;
2637 mutex_lock(&vcpu
->kvm
->lock
);
2638 gpa
= vcpu
->mmu
.gva_to_gpa(vcpu
, vaddr
);
2639 tr
->physical_address
= gpa
;
2640 tr
->valid
= gpa
!= UNMAPPED_GVA
;
2643 mutex_unlock(&vcpu
->kvm
->lock
);
2649 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
,
2650 struct kvm_interrupt
*irq
)
2652 if (irq
->irq
< 0 || irq
->irq
>= 256)
2654 if (irqchip_in_kernel(vcpu
->kvm
))
2658 set_bit(irq
->irq
, vcpu
->irq_pending
);
2659 set_bit(irq
->irq
/ BITS_PER_LONG
, &vcpu
->irq_summary
);
2666 static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu
*vcpu
,
2667 struct kvm_debug_guest
*dbg
)
2673 r
= kvm_x86_ops
->set_guest_debug(vcpu
, dbg
);
2680 static struct page
*kvm_vcpu_nopage(struct vm_area_struct
*vma
,
2681 unsigned long address
,
2684 struct kvm_vcpu
*vcpu
= vma
->vm_file
->private_data
;
2685 unsigned long pgoff
;
2688 pgoff
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
2690 page
= virt_to_page(vcpu
->run
);
2691 else if (pgoff
== KVM_PIO_PAGE_OFFSET
)
2692 page
= virt_to_page(vcpu
->pio_data
);
2694 return NOPAGE_SIGBUS
;
2697 *type
= VM_FAULT_MINOR
;
2702 static struct vm_operations_struct kvm_vcpu_vm_ops
= {
2703 .nopage
= kvm_vcpu_nopage
,
2706 static int kvm_vcpu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2708 vma
->vm_ops
= &kvm_vcpu_vm_ops
;
2712 static int kvm_vcpu_release(struct inode
*inode
, struct file
*filp
)
2714 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2716 fput(vcpu
->kvm
->filp
);
2720 static struct file_operations kvm_vcpu_fops
= {
2721 .release
= kvm_vcpu_release
,
2722 .unlocked_ioctl
= kvm_vcpu_ioctl
,
2723 .compat_ioctl
= kvm_vcpu_ioctl
,
2724 .mmap
= kvm_vcpu_mmap
,
2728 * Allocates an inode for the vcpu.
2730 static int create_vcpu_fd(struct kvm_vcpu
*vcpu
)
2733 struct inode
*inode
;
2736 r
= anon_inode_getfd(&fd
, &inode
, &file
,
2737 "kvm-vcpu", &kvm_vcpu_fops
, vcpu
);
2740 atomic_inc(&vcpu
->kvm
->filp
->f_count
);
2745 * Creates some virtual cpus. Good luck creating more than one.
2747 static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm
, int n
)
2750 struct kvm_vcpu
*vcpu
;
2755 vcpu
= kvm_x86_ops
->vcpu_create(kvm
, n
);
2757 return PTR_ERR(vcpu
);
2759 preempt_notifier_init(&vcpu
->preempt_notifier
, &kvm_preempt_ops
);
2761 /* We do fxsave: this must be aligned. */
2762 BUG_ON((unsigned long)&vcpu
->host_fx_image
& 0xF);
2765 r
= kvm_mmu_setup(vcpu
);
2770 mutex_lock(&kvm
->lock
);
2771 if (kvm
->vcpus
[n
]) {
2773 mutex_unlock(&kvm
->lock
);
2776 kvm
->vcpus
[n
] = vcpu
;
2777 mutex_unlock(&kvm
->lock
);
2779 /* Now it's all set up, let userspace reach it */
2780 r
= create_vcpu_fd(vcpu
);
2786 mutex_lock(&kvm
->lock
);
2787 kvm
->vcpus
[n
] = NULL
;
2788 mutex_unlock(&kvm
->lock
);
2792 kvm_mmu_unload(vcpu
);
2796 kvm_x86_ops
->vcpu_free(vcpu
);
2800 static void cpuid_fix_nx_cap(struct kvm_vcpu
*vcpu
)
2804 struct kvm_cpuid_entry
*e
, *entry
;
2806 rdmsrl(MSR_EFER
, efer
);
2808 for (i
= 0; i
< vcpu
->cpuid_nent
; ++i
) {
2809 e
= &vcpu
->cpuid_entries
[i
];
2810 if (e
->function
== 0x80000001) {
2815 if (entry
&& (entry
->edx
& (1 << 20)) && !(efer
& EFER_NX
)) {
2816 entry
->edx
&= ~(1 << 20);
2817 printk(KERN_INFO
"kvm: guest NX capability removed\n");
2821 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu
*vcpu
,
2822 struct kvm_cpuid
*cpuid
,
2823 struct kvm_cpuid_entry __user
*entries
)
2828 if (cpuid
->nent
> KVM_MAX_CPUID_ENTRIES
)
2831 if (copy_from_user(&vcpu
->cpuid_entries
, entries
,
2832 cpuid
->nent
* sizeof(struct kvm_cpuid_entry
)))
2834 vcpu
->cpuid_nent
= cpuid
->nent
;
2835 cpuid_fix_nx_cap(vcpu
);
2842 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu
*vcpu
, sigset_t
*sigset
)
2845 sigdelsetmask(sigset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2846 vcpu
->sigset_active
= 1;
2847 vcpu
->sigset
= *sigset
;
2849 vcpu
->sigset_active
= 0;
2854 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
2855 * we have asm/x86/processor.h
2866 u32 st_space
[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
2867 #ifdef CONFIG_X86_64
2868 u32 xmm_space
[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
2870 u32 xmm_space
[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
2874 static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
2876 struct fxsave
*fxsave
= (struct fxsave
*)&vcpu
->guest_fx_image
;
2880 memcpy(fpu
->fpr
, fxsave
->st_space
, 128);
2881 fpu
->fcw
= fxsave
->cwd
;
2882 fpu
->fsw
= fxsave
->swd
;
2883 fpu
->ftwx
= fxsave
->twd
;
2884 fpu
->last_opcode
= fxsave
->fop
;
2885 fpu
->last_ip
= fxsave
->rip
;
2886 fpu
->last_dp
= fxsave
->rdp
;
2887 memcpy(fpu
->xmm
, fxsave
->xmm_space
, sizeof fxsave
->xmm_space
);
2894 static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
2896 struct fxsave
*fxsave
= (struct fxsave
*)&vcpu
->guest_fx_image
;
2900 memcpy(fxsave
->st_space
, fpu
->fpr
, 128);
2901 fxsave
->cwd
= fpu
->fcw
;
2902 fxsave
->swd
= fpu
->fsw
;
2903 fxsave
->twd
= fpu
->ftwx
;
2904 fxsave
->fop
= fpu
->last_opcode
;
2905 fxsave
->rip
= fpu
->last_ip
;
2906 fxsave
->rdp
= fpu
->last_dp
;
2907 memcpy(fxsave
->xmm_space
, fpu
->xmm
, sizeof fxsave
->xmm_space
);
2914 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu
*vcpu
,
2915 struct kvm_lapic_state
*s
)
2918 memcpy(s
->regs
, vcpu
->apic
->regs
, sizeof *s
);
2924 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu
*vcpu
,
2925 struct kvm_lapic_state
*s
)
2928 memcpy(vcpu
->apic
->regs
, s
->regs
, sizeof *s
);
2929 kvm_apic_post_state_restore(vcpu
);
2935 static long kvm_vcpu_ioctl(struct file
*filp
,
2936 unsigned int ioctl
, unsigned long arg
)
2938 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2939 void __user
*argp
= (void __user
*)arg
;
2947 r
= kvm_vcpu_ioctl_run(vcpu
, vcpu
->run
);
2949 case KVM_GET_REGS
: {
2950 struct kvm_regs kvm_regs
;
2952 memset(&kvm_regs
, 0, sizeof kvm_regs
);
2953 r
= kvm_vcpu_ioctl_get_regs(vcpu
, &kvm_regs
);
2957 if (copy_to_user(argp
, &kvm_regs
, sizeof kvm_regs
))
2962 case KVM_SET_REGS
: {
2963 struct kvm_regs kvm_regs
;
2966 if (copy_from_user(&kvm_regs
, argp
, sizeof kvm_regs
))
2968 r
= kvm_vcpu_ioctl_set_regs(vcpu
, &kvm_regs
);
2974 case KVM_GET_SREGS
: {
2975 struct kvm_sregs kvm_sregs
;
2977 memset(&kvm_sregs
, 0, sizeof kvm_sregs
);
2978 r
= kvm_vcpu_ioctl_get_sregs(vcpu
, &kvm_sregs
);
2982 if (copy_to_user(argp
, &kvm_sregs
, sizeof kvm_sregs
))
2987 case KVM_SET_SREGS
: {
2988 struct kvm_sregs kvm_sregs
;
2991 if (copy_from_user(&kvm_sregs
, argp
, sizeof kvm_sregs
))
2993 r
= kvm_vcpu_ioctl_set_sregs(vcpu
, &kvm_sregs
);
2999 case KVM_TRANSLATE
: {
3000 struct kvm_translation tr
;
3003 if (copy_from_user(&tr
, argp
, sizeof tr
))
3005 r
= kvm_vcpu_ioctl_translate(vcpu
, &tr
);
3009 if (copy_to_user(argp
, &tr
, sizeof tr
))
3014 case KVM_INTERRUPT
: {
3015 struct kvm_interrupt irq
;
3018 if (copy_from_user(&irq
, argp
, sizeof irq
))
3020 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
3026 case KVM_DEBUG_GUEST
: {
3027 struct kvm_debug_guest dbg
;
3030 if (copy_from_user(&dbg
, argp
, sizeof dbg
))
3032 r
= kvm_vcpu_ioctl_debug_guest(vcpu
, &dbg
);
3039 r
= msr_io(vcpu
, argp
, kvm_get_msr
, 1);
3042 r
= msr_io(vcpu
, argp
, do_set_msr
, 0);
3044 case KVM_SET_CPUID
: {
3045 struct kvm_cpuid __user
*cpuid_arg
= argp
;
3046 struct kvm_cpuid cpuid
;
3049 if (copy_from_user(&cpuid
, cpuid_arg
, sizeof cpuid
))
3051 r
= kvm_vcpu_ioctl_set_cpuid(vcpu
, &cpuid
, cpuid_arg
->entries
);
3056 case KVM_SET_SIGNAL_MASK
: {
3057 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
3058 struct kvm_signal_mask kvm_sigmask
;
3059 sigset_t sigset
, *p
;
3064 if (copy_from_user(&kvm_sigmask
, argp
,
3065 sizeof kvm_sigmask
))
3068 if (kvm_sigmask
.len
!= sizeof sigset
)
3071 if (copy_from_user(&sigset
, sigmask_arg
->sigset
,
3076 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, &sigset
);
3082 memset(&fpu
, 0, sizeof fpu
);
3083 r
= kvm_vcpu_ioctl_get_fpu(vcpu
, &fpu
);
3087 if (copy_to_user(argp
, &fpu
, sizeof fpu
))
3096 if (copy_from_user(&fpu
, argp
, sizeof fpu
))
3098 r
= kvm_vcpu_ioctl_set_fpu(vcpu
, &fpu
);
3104 case KVM_GET_LAPIC
: {
3105 struct kvm_lapic_state lapic
;
3107 memset(&lapic
, 0, sizeof lapic
);
3108 r
= kvm_vcpu_ioctl_get_lapic(vcpu
, &lapic
);
3112 if (copy_to_user(argp
, &lapic
, sizeof lapic
))
3117 case KVM_SET_LAPIC
: {
3118 struct kvm_lapic_state lapic
;
3121 if (copy_from_user(&lapic
, argp
, sizeof lapic
))
3123 r
= kvm_vcpu_ioctl_set_lapic(vcpu
, &lapic
);;
3136 static long kvm_vm_ioctl(struct file
*filp
,
3137 unsigned int ioctl
, unsigned long arg
)
3139 struct kvm
*kvm
= filp
->private_data
;
3140 void __user
*argp
= (void __user
*)arg
;
3144 case KVM_CREATE_VCPU
:
3145 r
= kvm_vm_ioctl_create_vcpu(kvm
, arg
);
3149 case KVM_SET_MEMORY_REGION
: {
3150 struct kvm_memory_region kvm_mem
;
3151 struct kvm_userspace_memory_region kvm_userspace_mem
;
3154 if (copy_from_user(&kvm_mem
, argp
, sizeof kvm_mem
))
3156 kvm_userspace_mem
.slot
= kvm_mem
.slot
;
3157 kvm_userspace_mem
.flags
= kvm_mem
.flags
;
3158 kvm_userspace_mem
.guest_phys_addr
= kvm_mem
.guest_phys_addr
;
3159 kvm_userspace_mem
.memory_size
= kvm_mem
.memory_size
;
3160 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 0);
3165 case KVM_SET_USER_MEMORY_REGION
: {
3166 struct kvm_userspace_memory_region kvm_userspace_mem
;
3169 if (copy_from_user(&kvm_userspace_mem
, argp
,
3170 sizeof kvm_userspace_mem
))
3173 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 1);
3178 case KVM_SET_NR_MMU_PAGES
:
3179 r
= kvm_vm_ioctl_set_nr_mmu_pages(kvm
, arg
);
3183 case KVM_GET_NR_MMU_PAGES
:
3184 r
= kvm_vm_ioctl_get_nr_mmu_pages(kvm
);
3186 case KVM_GET_DIRTY_LOG
: {
3187 struct kvm_dirty_log log
;
3190 if (copy_from_user(&log
, argp
, sizeof log
))
3192 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
3197 case KVM_SET_MEMORY_ALIAS
: {
3198 struct kvm_memory_alias alias
;
3201 if (copy_from_user(&alias
, argp
, sizeof alias
))
3203 r
= kvm_vm_ioctl_set_memory_alias(kvm
, &alias
);
3208 case KVM_CREATE_IRQCHIP
:
3210 kvm
->vpic
= kvm_create_pic(kvm
);
3212 r
= kvm_ioapic_init(kvm
);
3221 case KVM_IRQ_LINE
: {
3222 struct kvm_irq_level irq_event
;
3225 if (copy_from_user(&irq_event
, argp
, sizeof irq_event
))
3227 if (irqchip_in_kernel(kvm
)) {
3228 mutex_lock(&kvm
->lock
);
3229 if (irq_event
.irq
< 16)
3230 kvm_pic_set_irq(pic_irqchip(kvm
),
3233 kvm_ioapic_set_irq(kvm
->vioapic
,
3236 mutex_unlock(&kvm
->lock
);
3241 case KVM_GET_IRQCHIP
: {
3242 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3243 struct kvm_irqchip chip
;
3246 if (copy_from_user(&chip
, argp
, sizeof chip
))
3249 if (!irqchip_in_kernel(kvm
))
3251 r
= kvm_vm_ioctl_get_irqchip(kvm
, &chip
);
3255 if (copy_to_user(argp
, &chip
, sizeof chip
))
3260 case KVM_SET_IRQCHIP
: {
3261 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3262 struct kvm_irqchip chip
;
3265 if (copy_from_user(&chip
, argp
, sizeof chip
))
3268 if (!irqchip_in_kernel(kvm
))
3270 r
= kvm_vm_ioctl_set_irqchip(kvm
, &chip
);
3283 static struct page
*kvm_vm_nopage(struct vm_area_struct
*vma
,
3284 unsigned long address
,
3287 struct kvm
*kvm
= vma
->vm_file
->private_data
;
3288 unsigned long pgoff
;
3291 pgoff
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
3292 page
= gfn_to_page(kvm
, pgoff
);
3294 return NOPAGE_SIGBUS
;
3297 *type
= VM_FAULT_MINOR
;
3302 static struct vm_operations_struct kvm_vm_vm_ops
= {
3303 .nopage
= kvm_vm_nopage
,
3306 static int kvm_vm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
3308 vma
->vm_ops
= &kvm_vm_vm_ops
;
3312 static struct file_operations kvm_vm_fops
= {
3313 .release
= kvm_vm_release
,
3314 .unlocked_ioctl
= kvm_vm_ioctl
,
3315 .compat_ioctl
= kvm_vm_ioctl
,
3316 .mmap
= kvm_vm_mmap
,
3319 static int kvm_dev_ioctl_create_vm(void)
3322 struct inode
*inode
;
3326 kvm
= kvm_create_vm();
3328 return PTR_ERR(kvm
);
3329 r
= anon_inode_getfd(&fd
, &inode
, &file
, "kvm-vm", &kvm_vm_fops
, kvm
);
3331 kvm_destroy_vm(kvm
);
3340 static long kvm_dev_ioctl(struct file
*filp
,
3341 unsigned int ioctl
, unsigned long arg
)
3343 void __user
*argp
= (void __user
*)arg
;
3347 case KVM_GET_API_VERSION
:
3351 r
= KVM_API_VERSION
;
3357 r
= kvm_dev_ioctl_create_vm();
3359 case KVM_GET_MSR_INDEX_LIST
: {
3360 struct kvm_msr_list __user
*user_msr_list
= argp
;
3361 struct kvm_msr_list msr_list
;
3365 if (copy_from_user(&msr_list
, user_msr_list
, sizeof msr_list
))
3368 msr_list
.nmsrs
= num_msrs_to_save
+ ARRAY_SIZE(emulated_msrs
);
3369 if (copy_to_user(user_msr_list
, &msr_list
, sizeof msr_list
))
3372 if (n
< num_msrs_to_save
)
3375 if (copy_to_user(user_msr_list
->indices
, &msrs_to_save
,
3376 num_msrs_to_save
* sizeof(u32
)))
3378 if (copy_to_user(user_msr_list
->indices
3379 + num_msrs_to_save
* sizeof(u32
),
3381 ARRAY_SIZE(emulated_msrs
) * sizeof(u32
)))
3386 case KVM_CHECK_EXTENSION
: {
3387 int ext
= (long)argp
;
3390 case KVM_CAP_IRQCHIP
:
3392 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL
:
3393 case KVM_CAP_USER_MEMORY
:
3402 case KVM_GET_VCPU_MMAP_SIZE
:
3415 static struct file_operations kvm_chardev_ops
= {
3416 .unlocked_ioctl
= kvm_dev_ioctl
,
3417 .compat_ioctl
= kvm_dev_ioctl
,
3420 static struct miscdevice kvm_dev
= {
3427 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
3430 static void decache_vcpus_on_cpu(int cpu
)
3433 struct kvm_vcpu
*vcpu
;
3436 spin_lock(&kvm_lock
);
3437 list_for_each_entry(vm
, &vm_list
, vm_list
)
3438 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
3439 vcpu
= vm
->vcpus
[i
];
3443 * If the vcpu is locked, then it is running on some
3444 * other cpu and therefore it is not cached on the
3447 * If it's not locked, check the last cpu it executed
3450 if (mutex_trylock(&vcpu
->mutex
)) {
3451 if (vcpu
->cpu
== cpu
) {
3452 kvm_x86_ops
->vcpu_decache(vcpu
);
3455 mutex_unlock(&vcpu
->mutex
);
3458 spin_unlock(&kvm_lock
);
3461 static void hardware_enable(void *junk
)
3463 int cpu
= raw_smp_processor_id();
3465 if (cpu_isset(cpu
, cpus_hardware_enabled
))
3467 cpu_set(cpu
, cpus_hardware_enabled
);
3468 kvm_x86_ops
->hardware_enable(NULL
);
3471 static void hardware_disable(void *junk
)
3473 int cpu
= raw_smp_processor_id();
3475 if (!cpu_isset(cpu
, cpus_hardware_enabled
))
3477 cpu_clear(cpu
, cpus_hardware_enabled
);
3478 decache_vcpus_on_cpu(cpu
);
3479 kvm_x86_ops
->hardware_disable(NULL
);
3482 static int kvm_cpu_hotplug(struct notifier_block
*notifier
, unsigned long val
,
3489 case CPU_DYING_FROZEN
:
3490 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
3492 hardware_disable(NULL
);
3494 case CPU_UP_CANCELED
:
3495 case CPU_UP_CANCELED_FROZEN
:
3496 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
3498 smp_call_function_single(cpu
, hardware_disable
, NULL
, 0, 1);
3501 case CPU_ONLINE_FROZEN
:
3502 printk(KERN_INFO
"kvm: enabling virtualization on CPU%d\n",
3504 smp_call_function_single(cpu
, hardware_enable
, NULL
, 0, 1);
3510 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
3513 if (val
== SYS_RESTART
) {
3515 * Some (well, at least mine) BIOSes hang on reboot if
3518 printk(KERN_INFO
"kvm: exiting hardware virtualization\n");
3519 on_each_cpu(hardware_disable
, NULL
, 0, 1);
3524 static struct notifier_block kvm_reboot_notifier
= {
3525 .notifier_call
= kvm_reboot
,
3529 void kvm_io_bus_init(struct kvm_io_bus
*bus
)
3531 memset(bus
, 0, sizeof(*bus
));
3534 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
)
3538 for (i
= 0; i
< bus
->dev_count
; i
++) {
3539 struct kvm_io_device
*pos
= bus
->devs
[i
];
3541 kvm_iodevice_destructor(pos
);
3545 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
, gpa_t addr
)
3549 for (i
= 0; i
< bus
->dev_count
; i
++) {
3550 struct kvm_io_device
*pos
= bus
->devs
[i
];
3552 if (pos
->in_range(pos
, addr
))
3559 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
, struct kvm_io_device
*dev
)
3561 BUG_ON(bus
->dev_count
> (NR_IOBUS_DEVS
-1));
3563 bus
->devs
[bus
->dev_count
++] = dev
;
3566 static struct notifier_block kvm_cpu_notifier
= {
3567 .notifier_call
= kvm_cpu_hotplug
,
3568 .priority
= 20, /* must be > scheduler priority */
3571 static u64
stat_get(void *_offset
)
3573 unsigned offset
= (long)_offset
;
3576 struct kvm_vcpu
*vcpu
;
3579 spin_lock(&kvm_lock
);
3580 list_for_each_entry(kvm
, &vm_list
, vm_list
)
3581 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
3582 vcpu
= kvm
->vcpus
[i
];
3584 total
+= *(u32
*)((void *)vcpu
+ offset
);
3586 spin_unlock(&kvm_lock
);
3590 DEFINE_SIMPLE_ATTRIBUTE(stat_fops
, stat_get
, NULL
, "%llu\n");
3592 static __init
void kvm_init_debug(void)
3594 struct kvm_stats_debugfs_item
*p
;
3596 debugfs_dir
= debugfs_create_dir("kvm", NULL
);
3597 for (p
= debugfs_entries
; p
->name
; ++p
)
3598 p
->dentry
= debugfs_create_file(p
->name
, 0444, debugfs_dir
,
3599 (void *)(long)p
->offset
,
3603 static void kvm_exit_debug(void)
3605 struct kvm_stats_debugfs_item
*p
;
3607 for (p
= debugfs_entries
; p
->name
; ++p
)
3608 debugfs_remove(p
->dentry
);
3609 debugfs_remove(debugfs_dir
);
3612 static int kvm_suspend(struct sys_device
*dev
, pm_message_t state
)
3614 hardware_disable(NULL
);
3618 static int kvm_resume(struct sys_device
*dev
)
3620 hardware_enable(NULL
);
3624 static struct sysdev_class kvm_sysdev_class
= {
3626 .suspend
= kvm_suspend
,
3627 .resume
= kvm_resume
,
3630 static struct sys_device kvm_sysdev
= {
3632 .cls
= &kvm_sysdev_class
,
3635 hpa_t bad_page_address
;
3638 struct kvm_vcpu
*preempt_notifier_to_vcpu(struct preempt_notifier
*pn
)
3640 return container_of(pn
, struct kvm_vcpu
, preempt_notifier
);
3643 static void kvm_sched_in(struct preempt_notifier
*pn
, int cpu
)
3645 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
3647 kvm_x86_ops
->vcpu_load(vcpu
, cpu
);
3650 static void kvm_sched_out(struct preempt_notifier
*pn
,
3651 struct task_struct
*next
)
3653 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
3655 kvm_x86_ops
->vcpu_put(vcpu
);
3658 int kvm_init_x86(struct kvm_x86_ops
*ops
, unsigned int vcpu_size
,
3659 struct module
*module
)
3665 printk(KERN_ERR
"kvm: already loaded the other module\n");
3669 if (!ops
->cpu_has_kvm_support()) {
3670 printk(KERN_ERR
"kvm: no hardware support\n");
3673 if (ops
->disabled_by_bios()) {
3674 printk(KERN_ERR
"kvm: disabled by bios\n");
3680 r
= kvm_x86_ops
->hardware_setup();
3684 for_each_online_cpu(cpu
) {
3685 smp_call_function_single(cpu
,
3686 kvm_x86_ops
->check_processor_compatibility
,
3692 on_each_cpu(hardware_enable
, NULL
, 0, 1);
3693 r
= register_cpu_notifier(&kvm_cpu_notifier
);
3696 register_reboot_notifier(&kvm_reboot_notifier
);
3698 r
= sysdev_class_register(&kvm_sysdev_class
);
3702 r
= sysdev_register(&kvm_sysdev
);
3706 /* A kmem cache lets us meet the alignment requirements of fx_save. */
3707 kvm_vcpu_cache
= kmem_cache_create("kvm_vcpu", vcpu_size
,
3708 __alignof__(struct kvm_vcpu
), 0, 0);
3709 if (!kvm_vcpu_cache
) {
3714 kvm_chardev_ops
.owner
= module
;
3716 r
= misc_register(&kvm_dev
);
3718 printk(KERN_ERR
"kvm: misc device register failed\n");
3722 kvm_preempt_ops
.sched_in
= kvm_sched_in
;
3723 kvm_preempt_ops
.sched_out
= kvm_sched_out
;
3725 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
3730 kmem_cache_destroy(kvm_vcpu_cache
);
3732 sysdev_unregister(&kvm_sysdev
);
3734 sysdev_class_unregister(&kvm_sysdev_class
);
3736 unregister_reboot_notifier(&kvm_reboot_notifier
);
3737 unregister_cpu_notifier(&kvm_cpu_notifier
);
3739 on_each_cpu(hardware_disable
, NULL
, 0, 1);
3741 kvm_x86_ops
->hardware_unsetup();
3746 EXPORT_SYMBOL_GPL(kvm_init_x86
);
3748 void kvm_exit_x86(void)
3750 misc_deregister(&kvm_dev
);
3751 kmem_cache_destroy(kvm_vcpu_cache
);
3752 sysdev_unregister(&kvm_sysdev
);
3753 sysdev_class_unregister(&kvm_sysdev_class
);
3754 unregister_reboot_notifier(&kvm_reboot_notifier
);
3755 unregister_cpu_notifier(&kvm_cpu_notifier
);
3756 on_each_cpu(hardware_disable
, NULL
, 0, 1);
3757 kvm_x86_ops
->hardware_unsetup();
3760 EXPORT_SYMBOL_GPL(kvm_exit_x86
);
3762 static __init
int kvm_init(void)
3764 static struct page
*bad_page
;
3767 r
= kvm_mmu_module_init();
3773 kvm_init_msr_list();
3775 bad_page
= alloc_page(GFP_KERNEL
);
3777 if (bad_page
== NULL
) {
3782 bad_page_address
= page_to_pfn(bad_page
) << PAGE_SHIFT
;
3783 memset(__va(bad_page_address
), 0, PAGE_SIZE
);
3789 kvm_mmu_module_exit();
3794 static __exit
void kvm_exit(void)
3797 __free_page(pfn_to_page(bad_page_address
>> PAGE_SHIFT
));
3798 kvm_mmu_module_exit();
3801 module_init(kvm_init
)
3802 module_exit(kvm_exit
)