5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
17 #include <linux/preempt.h>
18 #include <linux/marker.h>
19 #include <asm/signal.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_para.h>
24 #include <linux/kvm_types.h>
26 #include <asm/kvm_host.h>
29 * vcpu->requests bit members
31 #define KVM_REQ_TLB_FLUSH 0
32 #define KVM_REQ_MIGRATE_TIMER 1
33 #define KVM_REQ_REPORT_TPR_ACCESS 2
34 #define KVM_REQ_MMU_RELOAD 3
35 #define KVM_REQ_TRIPLE_FAULT 4
36 #define KVM_REQ_PENDING_TIMER 5
39 extern struct kmem_cache
*kvm_vcpu_cache
;
42 * It would be nice to use something smarter than a linear search, TBD...
43 * Thankfully we dont expect many devices to register (famous last words :),
44 * so until then it will suffice. At least its abstracted so we can change
49 #define NR_IOBUS_DEVS 6
50 struct kvm_io_device
*devs
[NR_IOBUS_DEVS
];
53 void kvm_io_bus_init(struct kvm_io_bus
*bus
);
54 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
);
55 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
,
56 gpa_t addr
, int len
, int is_write
);
57 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
,
58 struct kvm_io_device
*dev
);
62 #ifdef CONFIG_PREEMPT_NOTIFIERS
63 struct preempt_notifier preempt_notifier
;
70 unsigned long requests
;
71 struct kvm_guest_debug guest_debug
;
77 struct kvm_vcpu_stat stat
;
79 #ifdef CONFIG_HAS_IOMEM
81 int mmio_read_completed
;
84 unsigned char mmio_data
[8];
88 struct kvm_vcpu_arch arch
;
91 struct kvm_memory_slot
{
96 unsigned long *dirty_bitmap
;
98 unsigned long rmap_pde
;
101 unsigned long userspace_addr
;
106 struct mutex lock
; /* protects the vcpus array and APIC accesses */
108 struct rw_semaphore slots_lock
;
109 struct mm_struct
*mm
; /* userspace tied to this vm */
111 struct kvm_memory_slot memslots
[KVM_MEMORY_SLOTS
+
112 KVM_PRIVATE_MEM_SLOTS
];
113 struct kvm_vcpu
*vcpus
[KVM_MAX_VCPUS
];
114 struct list_head vm_list
;
115 struct kvm_io_bus mmio_bus
;
116 struct kvm_io_bus pio_bus
;
117 struct kvm_vm_stat stat
;
118 struct kvm_arch arch
;
119 atomic_t users_count
;
120 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
121 struct kvm_coalesced_mmio_dev
*coalesced_mmio_dev
;
122 struct kvm_coalesced_mmio_ring
*coalesced_mmio_ring
;
125 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
126 struct mmu_notifier mmu_notifier
;
127 unsigned long mmu_notifier_seq
;
128 long mmu_notifier_count
;
132 /* The guest did something we don't support. */
133 #define pr_unimpl(vcpu, fmt, ...) \
135 if (printk_ratelimit()) \
136 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
137 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
140 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
141 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
143 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
);
144 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
);
146 void vcpu_load(struct kvm_vcpu
*vcpu
);
147 void vcpu_put(struct kvm_vcpu
*vcpu
);
149 int kvm_init(void *opaque
, unsigned int vcpu_size
,
150 struct module
*module
);
153 void kvm_get_kvm(struct kvm
*kvm
);
154 void kvm_put_kvm(struct kvm
*kvm
);
156 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
157 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
158 static inline int is_error_hpa(hpa_t hpa
) { return hpa
>> HPA_MSB
; }
159 struct page
*gva_to_page(struct kvm_vcpu
*vcpu
, gva_t gva
);
161 extern struct page
*bad_page
;
162 extern pfn_t bad_pfn
;
164 int is_error_page(struct page
*page
);
165 int is_error_pfn(pfn_t pfn
);
166 int kvm_is_error_hva(unsigned long addr
);
167 int kvm_set_memory_region(struct kvm
*kvm
,
168 struct kvm_userspace_memory_region
*mem
,
170 int __kvm_set_memory_region(struct kvm
*kvm
,
171 struct kvm_userspace_memory_region
*mem
,
173 int kvm_arch_set_memory_region(struct kvm
*kvm
,
174 struct kvm_userspace_memory_region
*mem
,
175 struct kvm_memory_slot old
,
177 void kvm_arch_flush_shadow(struct kvm
*kvm
);
178 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
);
179 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
);
180 unsigned long gfn_to_hva(struct kvm
*kvm
, gfn_t gfn
);
181 void kvm_release_page_clean(struct page
*page
);
182 void kvm_release_page_dirty(struct page
*page
);
183 void kvm_set_page_dirty(struct page
*page
);
184 void kvm_set_page_accessed(struct page
*page
);
186 pfn_t
gfn_to_pfn(struct kvm
*kvm
, gfn_t gfn
);
187 void kvm_release_pfn_dirty(pfn_t
);
188 void kvm_release_pfn_clean(pfn_t pfn
);
189 void kvm_set_pfn_dirty(pfn_t pfn
);
190 void kvm_set_pfn_accessed(pfn_t pfn
);
191 void kvm_get_pfn(pfn_t pfn
);
193 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
195 int kvm_read_guest_atomic(struct kvm
*kvm
, gpa_t gpa
, void *data
,
197 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
);
198 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
199 int offset
, int len
);
200 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
202 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
);
203 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
);
204 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
);
205 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
);
206 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
);
208 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
);
209 void kvm_resched(struct kvm_vcpu
*vcpu
);
210 void kvm_load_guest_fpu(struct kvm_vcpu
*vcpu
);
211 void kvm_put_guest_fpu(struct kvm_vcpu
*vcpu
);
212 void kvm_flush_remote_tlbs(struct kvm
*kvm
);
213 void kvm_reload_remote_mmus(struct kvm
*kvm
);
215 long kvm_arch_dev_ioctl(struct file
*filp
,
216 unsigned int ioctl
, unsigned long arg
);
217 long kvm_arch_vcpu_ioctl(struct file
*filp
,
218 unsigned int ioctl
, unsigned long arg
);
219 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
);
220 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
);
222 int kvm_dev_ioctl_check_extension(long ext
);
224 int kvm_get_dirty_log(struct kvm
*kvm
,
225 struct kvm_dirty_log
*log
, int *is_dirty
);
226 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
227 struct kvm_dirty_log
*log
);
229 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
231 kvm_userspace_memory_region
*mem
,
233 long kvm_arch_vm_ioctl(struct file
*filp
,
234 unsigned int ioctl
, unsigned long arg
);
235 void kvm_arch_destroy_vm(struct kvm
*kvm
);
237 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
);
238 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
);
240 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
241 struct kvm_translation
*tr
);
243 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
);
244 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
);
245 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
246 struct kvm_sregs
*sregs
);
247 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
248 struct kvm_sregs
*sregs
);
249 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
250 struct kvm_mp_state
*mp_state
);
251 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
252 struct kvm_mp_state
*mp_state
);
253 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu
*vcpu
,
254 struct kvm_debug_guest
*dbg
);
255 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
);
257 int kvm_arch_init(void *opaque
);
258 void kvm_arch_exit(void);
260 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
);
261 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
);
263 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
);
264 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
);
265 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
);
266 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
);
267 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
);
268 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
);
270 int kvm_arch_vcpu_reset(struct kvm_vcpu
*vcpu
);
271 void kvm_arch_hardware_enable(void *garbage
);
272 void kvm_arch_hardware_disable(void *garbage
);
273 int kvm_arch_hardware_setup(void);
274 void kvm_arch_hardware_unsetup(void);
275 void kvm_arch_check_processor_compat(void *rtn
);
276 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
);
278 void kvm_free_physmem(struct kvm
*kvm
);
280 struct kvm
*kvm_arch_create_vm(void);
281 void kvm_arch_destroy_vm(struct kvm
*kvm
);
283 int kvm_cpu_get_interrupt(struct kvm_vcpu
*v
);
284 int kvm_cpu_has_interrupt(struct kvm_vcpu
*v
);
285 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
);
286 void kvm_vcpu_kick(struct kvm_vcpu
*vcpu
);
288 static inline void kvm_guest_enter(void)
290 account_system_vtime(current
);
291 current
->flags
|= PF_VCPU
;
294 static inline void kvm_guest_exit(void)
296 account_system_vtime(current
);
297 current
->flags
&= ~PF_VCPU
;
300 static inline int memslot_id(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
302 return slot
- kvm
->memslots
;
305 static inline gpa_t
gfn_to_gpa(gfn_t gfn
)
307 return (gpa_t
)gfn
<< PAGE_SHIFT
;
310 static inline void kvm_migrate_timers(struct kvm_vcpu
*vcpu
)
312 set_bit(KVM_REQ_MIGRATE_TIMER
, &vcpu
->requests
);
320 struct kvm_stats_debugfs_item
{
323 enum kvm_stat_kind kind
;
324 struct dentry
*dentry
;
326 extern struct kvm_stats_debugfs_item debugfs_entries
[];
327 extern struct dentry
*kvm_debugfs_dir
;
329 #ifdef CONFIG_KVM_TRACE
330 int kvm_trace_ioctl(unsigned int ioctl
, unsigned long arg
);
331 void kvm_trace_cleanup(void);
334 int kvm_trace_ioctl(unsigned int ioctl
, unsigned long arg
)
338 #define kvm_trace_cleanup() ((void)0)
341 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
342 static inline int mmu_notifier_retry(struct kvm_vcpu
*vcpu
, unsigned long mmu_seq
)
344 if (unlikely(vcpu
->kvm
->mmu_notifier_count
))
347 * Both reads happen under the mmu_lock and both values are
348 * modified under mmu_lock, so there's no need of smb_rmb()
349 * here in between, otherwise mmu_notifier_count should be
350 * read before mmu_notifier_seq, see
351 * mmu_notifier_invalidate_range_end write side.
353 if (vcpu
->kvm
->mmu_notifier_seq
!= mmu_seq
)