KVM: Export memory slot allocation mechanism
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / kvm / kvm.h
blobf3dda088e34b9a69351aa65c82c9037bfe548f10
1 #ifndef __KVM_H
2 #define __KVM_H
4 /*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/preempt.h>
18 #include <asm/signal.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
23 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
24 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
25 #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
27 #define KVM_GUEST_CR0_MASK \
28 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
29 | X86_CR0_NW | X86_CR0_CD)
30 #define KVM_VM_CR0_ALWAYS_ON \
31 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
32 | X86_CR0_MP)
33 #define KVM_GUEST_CR4_MASK \
34 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
35 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
36 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
38 #define INVALID_PAGE (~(hpa_t)0)
39 #define UNMAPPED_GVA (~(gpa_t)0)
41 #define KVM_MAX_VCPUS 4
42 #define KVM_ALIAS_SLOTS 4
43 #define KVM_MEMORY_SLOTS 8
44 #define KVM_PERMILLE_MMU_PAGES 20
45 #define KVM_MIN_ALLOC_MMU_PAGES 64
46 #define KVM_NUM_MMU_PAGES 1024
47 #define KVM_MIN_FREE_MMU_PAGES 5
48 #define KVM_REFILL_PAGES 25
49 #define KVM_MAX_CPUID_ENTRIES 40
51 #define DE_VECTOR 0
52 #define UD_VECTOR 6
53 #define NM_VECTOR 7
54 #define DF_VECTOR 8
55 #define TS_VECTOR 10
56 #define NP_VECTOR 11
57 #define SS_VECTOR 12
58 #define GP_VECTOR 13
59 #define PF_VECTOR 14
61 #define SELECTOR_TI_MASK (1 << 2)
62 #define SELECTOR_RPL_MASK 0x03
64 #define IOPL_SHIFT 12
66 #define KVM_PIO_PAGE_OFFSET 1
69 * vcpu->requests bit members
71 #define KVM_REQ_TLB_FLUSH 0
74 * Address types:
76 * gva - guest virtual address
77 * gpa - guest physical address
78 * gfn - guest frame number
79 * hva - host virtual address
80 * hpa - host physical address
81 * hfn - host frame number
84 typedef unsigned long gva_t;
85 typedef u64 gpa_t;
86 typedef unsigned long gfn_t;
88 typedef unsigned long hva_t;
89 typedef u64 hpa_t;
90 typedef unsigned long hfn_t;
92 #define NR_PTE_CHAIN_ENTRIES 5
94 struct kvm_pte_chain {
95 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
96 struct hlist_node link;
100 * kvm_mmu_page_role, below, is defined as:
102 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
103 * bits 4:7 - page table level for this shadow (1-4)
104 * bits 8:9 - page table quadrant for 2-level guests
105 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
106 * bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
108 union kvm_mmu_page_role {
109 unsigned word;
110 struct {
111 unsigned glevels : 4;
112 unsigned level : 4;
113 unsigned quadrant : 2;
114 unsigned pad_for_nice_hex_output : 6;
115 unsigned metaphysical : 1;
116 unsigned hugepage_access : 3;
120 struct kvm_mmu_page {
121 struct list_head link;
122 struct hlist_node hash_link;
125 * The following two entries are used to key the shadow page in the
126 * hash table.
128 gfn_t gfn;
129 union kvm_mmu_page_role role;
131 u64 *spt;
132 /* hold the gfn of each spte inside spt */
133 gfn_t *gfns;
134 unsigned long slot_bitmap; /* One bit set per slot which has memory
135 * in this shadow page.
137 int multimapped; /* More than one parent_pte? */
138 int root_count; /* Currently serving as active root */
139 union {
140 u64 *parent_pte; /* !multimapped */
141 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
145 struct kvm_vcpu;
146 extern struct kmem_cache *kvm_vcpu_cache;
149 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
150 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
151 * mode.
153 struct kvm_mmu {
154 void (*new_cr3)(struct kvm_vcpu *vcpu);
155 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
156 void (*free)(struct kvm_vcpu *vcpu);
157 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
158 void (*prefetch_page)(struct kvm_vcpu *vcpu,
159 struct kvm_mmu_page *page);
160 hpa_t root_hpa;
161 int root_level;
162 int shadow_root_level;
164 u64 *pae_root;
167 #define KVM_NR_MEM_OBJS 40
169 struct kvm_mmu_memory_cache {
170 int nobjs;
171 void *objects[KVM_NR_MEM_OBJS];
175 * We don't want allocation failures within the mmu code, so we preallocate
176 * enough memory for a single page fault in a cache.
178 struct kvm_guest_debug {
179 int enabled;
180 unsigned long bp[4];
181 int singlestep;
184 enum {
185 VCPU_REGS_RAX = 0,
186 VCPU_REGS_RCX = 1,
187 VCPU_REGS_RDX = 2,
188 VCPU_REGS_RBX = 3,
189 VCPU_REGS_RSP = 4,
190 VCPU_REGS_RBP = 5,
191 VCPU_REGS_RSI = 6,
192 VCPU_REGS_RDI = 7,
193 #ifdef CONFIG_X86_64
194 VCPU_REGS_R8 = 8,
195 VCPU_REGS_R9 = 9,
196 VCPU_REGS_R10 = 10,
197 VCPU_REGS_R11 = 11,
198 VCPU_REGS_R12 = 12,
199 VCPU_REGS_R13 = 13,
200 VCPU_REGS_R14 = 14,
201 VCPU_REGS_R15 = 15,
202 #endif
203 NR_VCPU_REGS
206 enum {
207 VCPU_SREG_CS,
208 VCPU_SREG_DS,
209 VCPU_SREG_ES,
210 VCPU_SREG_FS,
211 VCPU_SREG_GS,
212 VCPU_SREG_SS,
213 VCPU_SREG_TR,
214 VCPU_SREG_LDTR,
217 #include "x86_emulate.h"
219 struct kvm_pio_request {
220 unsigned long count;
221 int cur_count;
222 struct page *guest_pages[2];
223 unsigned guest_page_offset;
224 int in;
225 int port;
226 int size;
227 int string;
228 int down;
229 int rep;
232 struct kvm_stat {
233 u32 pf_fixed;
234 u32 pf_guest;
235 u32 tlb_flush;
236 u32 invlpg;
238 u32 exits;
239 u32 io_exits;
240 u32 mmio_exits;
241 u32 signal_exits;
242 u32 irq_window_exits;
243 u32 halt_exits;
244 u32 halt_wakeup;
245 u32 request_irq_exits;
246 u32 irq_exits;
247 u32 light_exits;
248 u32 efer_reload;
251 struct kvm_io_device {
252 void (*read)(struct kvm_io_device *this,
253 gpa_t addr,
254 int len,
255 void *val);
256 void (*write)(struct kvm_io_device *this,
257 gpa_t addr,
258 int len,
259 const void *val);
260 int (*in_range)(struct kvm_io_device *this, gpa_t addr);
261 void (*destructor)(struct kvm_io_device *this);
263 void *private;
266 static inline void kvm_iodevice_read(struct kvm_io_device *dev,
267 gpa_t addr,
268 int len,
269 void *val)
271 dev->read(dev, addr, len, val);
274 static inline void kvm_iodevice_write(struct kvm_io_device *dev,
275 gpa_t addr,
276 int len,
277 const void *val)
279 dev->write(dev, addr, len, val);
282 static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
284 return dev->in_range(dev, addr);
287 static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
289 if (dev->destructor)
290 dev->destructor(dev);
294 * It would be nice to use something smarter than a linear search, TBD...
295 * Thankfully we dont expect many devices to register (famous last words :),
296 * so until then it will suffice. At least its abstracted so we can change
297 * in one place.
299 struct kvm_io_bus {
300 int dev_count;
301 #define NR_IOBUS_DEVS 6
302 struct kvm_io_device *devs[NR_IOBUS_DEVS];
305 void kvm_io_bus_init(struct kvm_io_bus *bus);
306 void kvm_io_bus_destroy(struct kvm_io_bus *bus);
307 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
308 void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
309 struct kvm_io_device *dev);
311 #ifdef CONFIG_HAS_IOMEM
312 #define KVM_VCPU_MMIO \
313 int mmio_needed; \
314 int mmio_read_completed; \
315 int mmio_is_write; \
316 int mmio_size; \
317 unsigned char mmio_data[8]; \
318 gpa_t mmio_phys_addr;
320 #else
321 #define KVM_VCPU_MMIO
323 #endif
325 #define KVM_VCPU_COMM \
326 struct kvm *kvm; \
327 struct preempt_notifier preempt_notifier; \
328 int vcpu_id; \
329 struct mutex mutex; \
330 int cpu; \
331 struct kvm_run *run; \
332 int guest_mode; \
333 unsigned long requests; \
334 struct kvm_guest_debug guest_debug; \
335 int fpu_active; \
336 int guest_fpu_loaded; \
337 wait_queue_head_t wq; \
338 int sigset_active; \
339 sigset_t sigset; \
340 struct kvm_stat stat; \
341 KVM_VCPU_MMIO
343 struct kvm_mem_alias {
344 gfn_t base_gfn;
345 unsigned long npages;
346 gfn_t target_gfn;
349 struct kvm_memory_slot {
350 gfn_t base_gfn;
351 unsigned long npages;
352 unsigned long flags;
353 unsigned long *rmap;
354 unsigned long *dirty_bitmap;
355 unsigned long userspace_addr;
356 int user_alloc;
359 struct kvm {
360 struct mutex lock; /* protects everything except vcpus */
361 int naliases;
362 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
363 int nmemslots;
364 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
366 * Hash table of struct kvm_mmu_page.
368 struct list_head active_mmu_pages;
369 unsigned int n_free_mmu_pages;
370 unsigned int n_requested_mmu_pages;
371 unsigned int n_alloc_mmu_pages;
372 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
373 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
374 unsigned long rmap_overflow;
375 struct list_head vm_list;
376 struct file *filp;
377 struct kvm_io_bus mmio_bus;
378 struct kvm_io_bus pio_bus;
379 struct kvm_pic *vpic;
380 struct kvm_ioapic *vioapic;
381 int round_robin_prev_vcpu;
384 static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
386 return kvm->vpic;
389 static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
391 return kvm->vioapic;
394 static inline int irqchip_in_kernel(struct kvm *kvm)
396 return pic_irqchip(kvm) != 0;
399 struct descriptor_table {
400 u16 limit;
401 unsigned long base;
402 } __attribute__((packed));
404 struct kvm_x86_ops {
405 int (*cpu_has_kvm_support)(void); /* __init */
406 int (*disabled_by_bios)(void); /* __init */
407 void (*hardware_enable)(void *dummy); /* __init */
408 void (*hardware_disable)(void *dummy);
409 void (*check_processor_compatibility)(void *rtn);
410 int (*hardware_setup)(void); /* __init */
411 void (*hardware_unsetup)(void); /* __exit */
413 /* Create, but do not attach this VCPU */
414 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
415 void (*vcpu_free)(struct kvm_vcpu *vcpu);
416 int (*vcpu_reset)(struct kvm_vcpu *vcpu);
418 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
419 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
420 void (*vcpu_put)(struct kvm_vcpu *vcpu);
421 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
423 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
424 struct kvm_debug_guest *dbg);
425 void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
426 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
427 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
428 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
429 void (*get_segment)(struct kvm_vcpu *vcpu,
430 struct kvm_segment *var, int seg);
431 void (*set_segment)(struct kvm_vcpu *vcpu,
432 struct kvm_segment *var, int seg);
433 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
434 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
435 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
436 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
437 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
438 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
439 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
440 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
441 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
442 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
443 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
444 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
445 int *exception);
446 void (*cache_regs)(struct kvm_vcpu *vcpu);
447 void (*decache_regs)(struct kvm_vcpu *vcpu);
448 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
449 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
451 void (*tlb_flush)(struct kvm_vcpu *vcpu);
452 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
453 unsigned long addr, u32 err_code);
455 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
457 void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
458 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
459 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
460 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
461 unsigned char *hypercall_addr);
462 int (*get_irq)(struct kvm_vcpu *vcpu);
463 void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
464 void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
465 void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
466 struct kvm_run *run);
469 extern struct kvm_x86_ops *kvm_x86_ops;
471 /* The guest did something we don't support. */
472 #define pr_unimpl(vcpu, fmt, ...) \
473 do { \
474 if (printk_ratelimit()) \
475 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
476 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
477 } while (0)
479 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
480 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
482 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
483 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
485 void vcpu_load(struct kvm_vcpu *vcpu);
486 void vcpu_put(struct kvm_vcpu *vcpu);
489 int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
490 struct module *module);
491 void kvm_exit_x86(void);
493 int kvm_mmu_module_init(void);
494 void kvm_mmu_module_exit(void);
496 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
497 int kvm_mmu_create(struct kvm_vcpu *vcpu);
498 int kvm_mmu_setup(struct kvm_vcpu *vcpu);
499 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
501 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
502 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
503 void kvm_mmu_zap_all(struct kvm *kvm);
504 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
506 hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa);
507 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
508 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
509 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
510 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
511 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
513 extern struct page *bad_page;
515 int is_error_page(struct page *page);
516 int kvm_set_memory_region(struct kvm *kvm,
517 struct kvm_userspace_memory_region *mem,
518 int user_alloc);
519 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
520 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
521 void kvm_release_page(struct page *page);
522 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
523 int len);
524 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
525 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
526 int offset, int len);
527 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
528 unsigned long len);
529 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
530 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
531 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
532 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
534 enum emulation_result {
535 EMULATE_DONE, /* no further processing */
536 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
537 EMULATE_FAIL, /* can't emulate this instruction */
540 int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
541 unsigned long cr2, u16 error_code, int no_decode);
542 void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
543 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
544 void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
545 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
546 unsigned long *rflags);
548 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
549 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
550 unsigned long *rflags);
551 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
552 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
554 struct x86_emulate_ctxt;
556 int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
557 int size, unsigned port);
558 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
559 int size, unsigned long count, int down,
560 gva_t address, int rep, unsigned port);
561 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
562 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
563 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
564 int emulate_clts(struct kvm_vcpu *vcpu);
565 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
566 unsigned long *dest);
567 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
568 unsigned long value);
570 void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
571 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
572 void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
573 void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
574 unsigned long get_cr8(struct kvm_vcpu *vcpu);
575 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
576 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
578 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
579 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
581 void fx_init(struct kvm_vcpu *vcpu);
583 void kvm_resched(struct kvm_vcpu *vcpu);
584 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
585 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
586 void kvm_flush_remote_tlbs(struct kvm *kvm);
588 int emulator_read_std(unsigned long addr,
589 void *val,
590 unsigned int bytes,
591 struct kvm_vcpu *vcpu);
592 int emulator_write_emulated(unsigned long addr,
593 const void *val,
594 unsigned int bytes,
595 struct kvm_vcpu *vcpu);
597 unsigned long segment_base(u16 selector);
599 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
600 const u8 *new, int bytes);
601 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
602 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
603 int kvm_mmu_load(struct kvm_vcpu *vcpu);
604 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
606 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
608 int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
610 long kvm_arch_dev_ioctl(struct file *filp,
611 unsigned int ioctl, unsigned long arg);
612 long kvm_arch_vcpu_ioctl(struct file *filp,
613 unsigned int ioctl, unsigned long arg);
614 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
615 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
617 __init void kvm_arch_init(void);
619 static inline void kvm_guest_enter(void)
621 account_system_vtime(current);
622 current->flags |= PF_VCPU;
625 static inline void kvm_guest_exit(void)
627 account_system_vtime(current);
628 current->flags &= ~PF_VCPU;
631 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
633 return slot - kvm->memslots;
636 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
638 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
640 return (struct kvm_mmu_page *)page_private(page);
643 static inline u16 read_fs(void)
645 u16 seg;
646 asm("mov %%fs, %0" : "=g"(seg));
647 return seg;
650 static inline u16 read_gs(void)
652 u16 seg;
653 asm("mov %%gs, %0" : "=g"(seg));
654 return seg;
657 static inline u16 read_ldt(void)
659 u16 ldt;
660 asm("sldt %0" : "=g"(ldt));
661 return ldt;
664 static inline void load_fs(u16 sel)
666 asm("mov %0, %%fs" : : "rm"(sel));
669 static inline void load_gs(u16 sel)
671 asm("mov %0, %%gs" : : "rm"(sel));
674 #ifndef load_ldt
675 static inline void load_ldt(u16 sel)
677 asm("lldt %0" : : "rm"(sel));
679 #endif
681 static inline void get_idt(struct descriptor_table *table)
683 asm("sidt %0" : "=m"(*table));
686 static inline void get_gdt(struct descriptor_table *table)
688 asm("sgdt %0" : "=m"(*table));
691 static inline unsigned long read_tr_base(void)
693 u16 tr;
694 asm("str %0" : "=g"(tr));
695 return segment_base(tr);
698 #ifdef CONFIG_X86_64
699 static inline unsigned long read_msr(unsigned long msr)
701 u64 value;
703 rdmsrl(msr, value);
704 return value;
706 #endif
708 static inline void fx_save(struct i387_fxsave_struct *image)
710 asm("fxsave (%0)":: "r" (image));
713 static inline void fx_restore(struct i387_fxsave_struct *image)
715 asm("fxrstor (%0)":: "r" (image));
718 static inline void fpu_init(void)
720 asm("finit");
723 static inline u32 get_rdx_init_val(void)
725 return 0x600; /* P6 family */
728 #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
729 #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
730 #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
731 #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
732 #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
733 #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
734 #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
735 #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
736 #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
738 #define MSR_IA32_TIME_STAMP_COUNTER 0x010
740 #define TSS_IOPB_BASE_OFFSET 0x66
741 #define TSS_BASE_SIZE 0x68
742 #define TSS_IOPB_SIZE (65536 / 8)
743 #define TSS_REDIRECTION_SIZE (256 / 8)
744 #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
746 #endif