2 * VMI specific paravirt-ops implementation
4 * Copyright (C) 2005, VMware, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * Send feedback to zach@vmware.com
25 #include <linux/module.h>
26 #include <linux/cpu.h>
27 #include <linux/bootmem.h>
29 #include <linux/highmem.h>
30 #include <linux/sched.h>
31 #include <linux/gfp.h>
34 #include <asm/fixmap.h>
35 #include <asm/apicdef.h>
37 #include <asm/pgalloc.h>
38 #include <asm/processor.h>
39 #include <asm/timer.h>
40 #include <asm/vmi_time.h>
41 #include <asm/kmap_types.h>
42 #include <asm/setup.h>
44 /* Convenient for calling VMI functions indirectly in the ROM */
45 typedef u32
__attribute__((regparm(1))) (VROMFUNC
)(void);
46 typedef u64
__attribute__((regparm(2))) (VROMLONGFUNC
)(int);
48 #define call_vrom_func(rom,func) \
49 (((VROMFUNC *)(rom->func))())
51 #define call_vrom_long_func(rom,func,arg) \
52 (((VROMLONGFUNC *)(rom->func)) (arg))
54 static struct vrom_header
*vmi_rom
;
55 static int disable_pge
;
56 static int disable_pse
;
57 static int disable_sep
;
58 static int disable_tsc
;
59 static int disable_mtrr
;
60 static int disable_noidle
;
61 static int disable_vmi_timer
;
63 /* Cached VMI operations */
65 void (*cpuid
)(void /* non-c */);
66 void (*_set_ldt
)(u32 selector
);
67 void (*set_tr
)(u32 selector
);
68 void (*write_idt_entry
)(struct desc_struct
*, int, u32
, u32
);
69 void (*write_gdt_entry
)(struct desc_struct
*, int, u32
, u32
);
70 void (*write_ldt_entry
)(struct desc_struct
*, int, u32
, u32
);
71 void (*set_kernel_stack
)(u32 selector
, u32 sp0
);
72 void (*allocate_page
)(u32
, u32
, u32
, u32
, u32
);
73 void (*release_page
)(u32
, u32
);
74 void (*set_pte
)(pte_t
, pte_t
*, unsigned);
75 void (*update_pte
)(pte_t
*, unsigned);
76 void (*set_linear_mapping
)(int, void *, u32
, u32
);
77 void (*_flush_tlb
)(int);
78 void (*set_initial_ap_state
)(int, int);
80 void (*set_lazy_mode
)(int mode
);
83 /* Cached VMI operations */
84 struct vmi_timer_ops vmi_timer_ops
;
87 * VMI patching routines.
89 #define MNEM_CALL 0xe8
93 #define IRQ_PATCH_INT_MASK 0
94 #define IRQ_PATCH_DISABLE 5
96 static inline void patch_offset(void *insnbuf
,
97 unsigned long ip
, unsigned long dest
)
99 *(unsigned long *)(insnbuf
+1) = dest
-ip
-5;
102 static unsigned patch_internal(int call
, unsigned len
, void *insnbuf
,
106 struct vmi_relocation_info
*const rel
= (struct vmi_relocation_info
*)&reloc
;
107 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, call
);
109 case VMI_RELOCATION_CALL_REL
:
111 *(char *)insnbuf
= MNEM_CALL
;
112 patch_offset(insnbuf
, ip
, (unsigned long)rel
->eip
);
115 case VMI_RELOCATION_JUMP_REL
:
117 *(char *)insnbuf
= MNEM_JMP
;
118 patch_offset(insnbuf
, ip
, (unsigned long)rel
->eip
);
121 case VMI_RELOCATION_NOP
:
122 /* obliterate the whole thing */
125 case VMI_RELOCATION_NONE
:
126 /* leave native code in place */
136 * Apply patch if appropriate, return length of new instruction
137 * sequence. The callee does nop padding for us.
139 static unsigned vmi_patch(u8 type
, u16 clobbers
, void *insns
,
140 unsigned long ip
, unsigned len
)
143 case PARAVIRT_PATCH(pv_irq_ops
.irq_disable
):
144 return patch_internal(VMI_CALL_DisableInterrupts
, len
,
146 case PARAVIRT_PATCH(pv_irq_ops
.irq_enable
):
147 return patch_internal(VMI_CALL_EnableInterrupts
, len
,
149 case PARAVIRT_PATCH(pv_irq_ops
.restore_fl
):
150 return patch_internal(VMI_CALL_SetInterruptMask
, len
,
152 case PARAVIRT_PATCH(pv_irq_ops
.save_fl
):
153 return patch_internal(VMI_CALL_GetInterruptMask
, len
,
155 case PARAVIRT_PATCH(pv_cpu_ops
.iret
):
156 return patch_internal(VMI_CALL_IRET
, len
, insns
, ip
);
157 case PARAVIRT_PATCH(pv_cpu_ops
.irq_enable_sysexit
):
158 return patch_internal(VMI_CALL_SYSEXIT
, len
, insns
, ip
);
165 /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
166 static void vmi_cpuid(unsigned int *ax
, unsigned int *bx
,
167 unsigned int *cx
, unsigned int *dx
)
172 asm volatile ("call *%6"
177 : "0" (*ax
), "2" (*cx
), "r" (vmi_ops
.cpuid
));
180 *dx
&= ~X86_FEATURE_PSE
;
182 *dx
&= ~X86_FEATURE_PGE
;
184 *dx
&= ~X86_FEATURE_SEP
;
186 *dx
&= ~X86_FEATURE_TSC
;
188 *dx
&= ~X86_FEATURE_MTRR
;
192 static inline void vmi_maybe_load_tls(struct desc_struct
*gdt
, int nr
, struct desc_struct
*new)
194 if (gdt
[nr
].a
!= new->a
|| gdt
[nr
].b
!= new->b
)
195 write_gdt_entry(gdt
, nr
, new, 0);
198 static void vmi_load_tls(struct thread_struct
*t
, unsigned int cpu
)
200 struct desc_struct
*gdt
= get_cpu_gdt_table(cpu
);
201 vmi_maybe_load_tls(gdt
, GDT_ENTRY_TLS_MIN
+ 0, &t
->tls_array
[0]);
202 vmi_maybe_load_tls(gdt
, GDT_ENTRY_TLS_MIN
+ 1, &t
->tls_array
[1]);
203 vmi_maybe_load_tls(gdt
, GDT_ENTRY_TLS_MIN
+ 2, &t
->tls_array
[2]);
206 static void vmi_set_ldt(const void *addr
, unsigned entries
)
208 unsigned cpu
= smp_processor_id();
209 struct desc_struct desc
;
211 pack_descriptor(&desc
, (unsigned long)addr
,
212 entries
* sizeof(struct desc_struct
) - 1,
214 write_gdt_entry(get_cpu_gdt_table(cpu
), GDT_ENTRY_LDT
, &desc
, DESC_LDT
);
215 vmi_ops
._set_ldt(entries
? GDT_ENTRY_LDT
*sizeof(struct desc_struct
) : 0);
218 static void vmi_set_tr(void)
220 vmi_ops
.set_tr(GDT_ENTRY_TSS
*sizeof(struct desc_struct
));
223 static void vmi_write_idt_entry(gate_desc
*dt
, int entry
, const gate_desc
*g
)
225 u32
*idt_entry
= (u32
*)g
;
226 vmi_ops
.write_idt_entry(dt
, entry
, idt_entry
[0], idt_entry
[1]);
229 static void vmi_write_gdt_entry(struct desc_struct
*dt
, int entry
,
230 const void *desc
, int type
)
232 u32
*gdt_entry
= (u32
*)desc
;
233 vmi_ops
.write_gdt_entry(dt
, entry
, gdt_entry
[0], gdt_entry
[1]);
236 static void vmi_write_ldt_entry(struct desc_struct
*dt
, int entry
,
239 u32
*ldt_entry
= (u32
*)desc
;
240 vmi_ops
.write_ldt_entry(dt
, entry
, ldt_entry
[0], ldt_entry
[1]);
243 static void vmi_load_sp0(struct tss_struct
*tss
,
244 struct thread_struct
*thread
)
246 tss
->x86_tss
.sp0
= thread
->sp0
;
248 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
249 if (unlikely(tss
->x86_tss
.ss1
!= thread
->sysenter_cs
)) {
250 tss
->x86_tss
.ss1
= thread
->sysenter_cs
;
251 wrmsr(MSR_IA32_SYSENTER_CS
, thread
->sysenter_cs
, 0);
253 vmi_ops
.set_kernel_stack(__KERNEL_DS
, tss
->x86_tss
.sp0
);
256 static void vmi_flush_tlb_user(void)
258 vmi_ops
._flush_tlb(VMI_FLUSH_TLB
);
261 static void vmi_flush_tlb_kernel(void)
263 vmi_ops
._flush_tlb(VMI_FLUSH_TLB
| VMI_FLUSH_GLOBAL
);
266 /* Stub to do nothing at all; used for delays and unimplemented calls */
267 static void vmi_nop(void)
271 static void vmi_allocate_pte(struct mm_struct
*mm
, unsigned long pfn
)
273 vmi_ops
.allocate_page(pfn
, VMI_PAGE_L1
, 0, 0, 0);
276 static void vmi_allocate_pmd(struct mm_struct
*mm
, unsigned long pfn
)
279 * This call comes in very early, before mem_map is setup.
280 * It is called only for swapper_pg_dir, which already has
283 vmi_ops
.allocate_page(pfn
, VMI_PAGE_L2
, 0, 0, 0);
286 static void vmi_allocate_pmd_clone(unsigned long pfn
, unsigned long clonepfn
, unsigned long start
, unsigned long count
)
288 vmi_ops
.allocate_page(pfn
, VMI_PAGE_L2
| VMI_PAGE_CLONE
, clonepfn
, start
, count
);
291 static void vmi_release_pte(unsigned long pfn
)
293 vmi_ops
.release_page(pfn
, VMI_PAGE_L1
);
296 static void vmi_release_pmd(unsigned long pfn
)
298 vmi_ops
.release_page(pfn
, VMI_PAGE_L2
);
302 * We use the pgd_free hook for releasing the pgd page:
304 static void vmi_pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
306 unsigned long pfn
= __pa(pgd
) >> PAGE_SHIFT
;
308 vmi_ops
.release_page(pfn
, VMI_PAGE_L2
);
312 * Helper macros for MMU update flags. We can defer updates until a flush
313 * or page invalidation only if the update is to the current address space
314 * (otherwise, there is no flush). We must check against init_mm, since
315 * this could be a kernel update, which usually passes init_mm, although
316 * sometimes this check can be skipped if we know the particular function
317 * is only called on user mode PTEs. We could change the kernel to pass
318 * current->active_mm here, but in particular, I was unsure if changing
319 * mm/highmem.c to do this would still be correct on other architectures.
321 #define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \
322 (!mustbeuser && (mm) == &init_mm))
323 #define vmi_flags_addr(mm, addr, level, user) \
324 ((level) | (is_current_as(mm, user) ? \
325 (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
326 #define vmi_flags_addr_defer(mm, addr, level, user) \
327 ((level) | (is_current_as(mm, user) ? \
328 (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
330 static void vmi_update_pte(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
332 vmi_ops
.update_pte(ptep
, vmi_flags_addr(mm
, addr
, VMI_PAGE_PT
, 0));
335 static void vmi_update_pte_defer(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
337 vmi_ops
.update_pte(ptep
, vmi_flags_addr_defer(mm
, addr
, VMI_PAGE_PT
, 0));
340 static void vmi_set_pte(pte_t
*ptep
, pte_t pte
)
342 vmi_ops
.set_pte(pte
, ptep
, VMI_PAGE_PT
);
345 static void vmi_set_pte_at(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
, pte_t pte
)
347 vmi_ops
.set_pte(pte
, ptep
, vmi_flags_addr(mm
, addr
, VMI_PAGE_PT
, 0));
350 static void vmi_set_pmd(pmd_t
*pmdp
, pmd_t pmdval
)
352 #ifdef CONFIG_X86_PAE
353 const pte_t pte
= { .pte
= pmdval
.pmd
};
355 const pte_t pte
= { pmdval
.pud
.pgd
.pgd
};
357 vmi_ops
.set_pte(pte
, (pte_t
*)pmdp
, VMI_PAGE_PD
);
360 #ifdef CONFIG_X86_PAE
362 static void vmi_set_pte_atomic(pte_t
*ptep
, pte_t pteval
)
364 set_64bit((unsigned long long *)ptep
,pte_val(pteval
));
365 vmi_ops
.update_pte(ptep
, VMI_PAGE_PT
);
368 static void vmi_set_pud(pud_t
*pudp
, pud_t pudval
)
371 const pte_t pte
= { .pte
= pudval
.pgd
.pgd
};
372 vmi_ops
.set_pte(pte
, (pte_t
*)pudp
, VMI_PAGE_PDP
);
375 static void vmi_pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
377 const pte_t pte
= { .pte
= 0 };
378 vmi_ops
.set_pte(pte
, ptep
, vmi_flags_addr(mm
, addr
, VMI_PAGE_PT
, 0));
381 static void vmi_pmd_clear(pmd_t
*pmd
)
383 const pte_t pte
= { .pte
= 0 };
384 vmi_ops
.set_pte(pte
, (pte_t
*)pmd
, VMI_PAGE_PD
);
389 static void __devinit
390 vmi_startup_ipi_hook(int phys_apicid
, unsigned long start_eip
,
391 unsigned long start_esp
)
393 struct vmi_ap_state ap
;
395 /* Default everything to zero. This is fine for most GPRs. */
396 memset(&ap
, 0, sizeof(struct vmi_ap_state
));
398 ap
.gdtr_limit
= GDT_SIZE
- 1;
399 ap
.gdtr_base
= (unsigned long) get_cpu_gdt_table(phys_apicid
);
401 ap
.idtr_limit
= IDT_ENTRIES
* 8 - 1;
402 ap
.idtr_base
= (unsigned long) idt_table
;
407 ap
.eip
= (unsigned long) start_eip
;
409 ap
.esp
= (unsigned long) start_esp
;
413 ap
.fs
= __KERNEL_PERCPU
;
414 ap
.gs
= __KERNEL_STACK_CANARY
;
418 #ifdef CONFIG_X86_PAE
419 /* efer should match BSP efer. */
422 rdmsr(MSR_EFER
, l
, h
);
423 ap
.efer
= (unsigned long long) h
<< 32 | l
;
427 ap
.cr3
= __pa(swapper_pg_dir
);
428 /* Protected mode, paging, AM, WP, NE, MP. */
430 ap
.cr4
= mmu_cr4_features
;
431 vmi_ops
.set_initial_ap_state((u32
)&ap
, phys_apicid
);
435 static void vmi_start_context_switch(struct task_struct
*prev
)
437 paravirt_start_context_switch(prev
);
438 vmi_ops
.set_lazy_mode(2);
441 static void vmi_end_context_switch(struct task_struct
*next
)
443 vmi_ops
.set_lazy_mode(0);
444 paravirt_end_context_switch(next
);
447 static void vmi_enter_lazy_mmu(void)
449 paravirt_enter_lazy_mmu();
450 vmi_ops
.set_lazy_mode(1);
453 static void vmi_leave_lazy_mmu(void)
455 vmi_ops
.set_lazy_mode(0);
456 paravirt_leave_lazy_mmu();
459 static inline int __init
check_vmi_rom(struct vrom_header
*rom
)
461 struct pci_header
*pci
;
462 struct pnp_header
*pnp
;
463 const char *manufacturer
= "UNKNOWN";
464 const char *product
= "UNKNOWN";
465 const char *license
= "unspecified";
467 if (rom
->rom_signature
!= 0xaa55)
469 if (rom
->vrom_signature
!= VMI_SIGNATURE
)
471 if (rom
->api_version_maj
!= VMI_API_REV_MAJOR
||
472 rom
->api_version_min
+1 < VMI_API_REV_MINOR
+1) {
473 printk(KERN_WARNING
"VMI: Found mismatched rom version %d.%d\n",
474 rom
->api_version_maj
,
475 rom
->api_version_min
);
480 * Relying on the VMI_SIGNATURE field is not 100% safe, so check
481 * the PCI header and device type to make sure this is really a
484 if (!rom
->pci_header_offs
) {
485 printk(KERN_WARNING
"VMI: ROM does not contain PCI header.\n");
489 pci
= (struct pci_header
*)((char *)rom
+rom
->pci_header_offs
);
490 if (pci
->vendorID
!= PCI_VENDOR_ID_VMWARE
||
491 pci
->deviceID
!= PCI_DEVICE_ID_VMWARE_VMI
) {
492 /* Allow it to run... anyways, but warn */
493 printk(KERN_WARNING
"VMI: ROM from unknown manufacturer\n");
496 if (rom
->pnp_header_offs
) {
497 pnp
= (struct pnp_header
*)((char *)rom
+rom
->pnp_header_offs
);
498 if (pnp
->manufacturer_offset
)
499 manufacturer
= (const char *)rom
+pnp
->manufacturer_offset
;
500 if (pnp
->product_offset
)
501 product
= (const char *)rom
+pnp
->product_offset
;
504 if (rom
->license_offs
)
505 license
= (char *)rom
+rom
->license_offs
;
507 printk(KERN_INFO
"VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n",
508 manufacturer
, product
,
509 rom
->api_version_maj
, rom
->api_version_min
,
510 pci
->rom_version_maj
, pci
->rom_version_min
);
512 /* Don't allow BSD/MIT here for now because we don't want to end up
513 with any binary only shim layers */
514 if (strcmp(license
, "GPL") && strcmp(license
, "GPL v2")) {
515 printk(KERN_WARNING
"VMI: Non GPL license `%s' found for ROM. Not used.\n",
524 * Probe for the VMI option ROM
526 static inline int __init
probe_vmi_rom(void)
530 /* VMI ROM is in option ROM area, check signature */
531 for (base
= 0xC0000; base
< 0xE0000; base
+= 2048) {
532 struct vrom_header
*romstart
;
533 romstart
= (struct vrom_header
*)isa_bus_to_virt(base
);
534 if (check_vmi_rom(romstart
)) {
543 * VMI setup common to all processors
545 void vmi_bringup(void)
547 /* We must establish the lowmem mapping for MMU ops to work */
548 if (vmi_ops
.set_linear_mapping
)
549 vmi_ops
.set_linear_mapping(0, (void *)__PAGE_OFFSET
, MAXMEM_PFN
, 0);
553 * Return a pointer to a VMI function or NULL if unimplemented
555 static void *vmi_get_function(int vmicall
)
558 const struct vmi_relocation_info
*rel
= (struct vmi_relocation_info
*)&reloc
;
559 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, vmicall
);
560 BUG_ON(rel
->type
== VMI_RELOCATION_JUMP_REL
);
561 if (rel
->type
== VMI_RELOCATION_CALL_REL
)
562 return (void *)rel
->eip
;
568 * Helper macro for making the VMI paravirt-ops fill code readable.
569 * For unimplemented operations, fall back to default, unless nop
570 * is returned by the ROM.
572 #define para_fill(opname, vmicall) \
574 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
575 VMI_CALL_##vmicall); \
576 if (rel->type == VMI_RELOCATION_CALL_REL) \
577 opname = (void *)rel->eip; \
578 else if (rel->type == VMI_RELOCATION_NOP) \
579 opname = (void *)vmi_nop; \
580 else if (rel->type != VMI_RELOCATION_NONE) \
581 printk(KERN_WARNING "VMI: Unknown relocation " \
582 "type %d for " #vmicall"\n",\
587 * Helper macro for making the VMI paravirt-ops fill code readable.
588 * For cached operations which do not match the VMI ROM ABI and must
589 * go through a tranlation stub. Ignore NOPs, since it is not clear
590 * a NOP * VMI function corresponds to a NOP paravirt-op when the
591 * functions are not in 1-1 correspondence.
593 #define para_wrap(opname, wrapper, cache, vmicall) \
595 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
596 VMI_CALL_##vmicall); \
597 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \
598 if (rel->type == VMI_RELOCATION_CALL_REL) { \
600 vmi_ops.cache = (void *)rel->eip; \
605 * Activate the VMI interface and switch into paravirtualized mode
607 static inline int __init
activate_vmi(void)
611 const struct vmi_relocation_info
*rel
= (struct vmi_relocation_info
*)&reloc
;
614 * Prevent page tables from being allocated in highmem, even if
615 * CONFIG_HIGHPTE is enabled.
617 __userpte_alloc_gfp
&= ~__GFP_HIGHMEM
;
619 if (call_vrom_func(vmi_rom
, vmi_init
) != 0) {
620 printk(KERN_ERR
"VMI ROM failed to initialize!");
623 savesegment(cs
, kernel_cs
);
625 pv_info
.paravirt_enabled
= 1;
626 pv_info
.kernel_rpl
= kernel_cs
& SEGMENT_RPL_MASK
;
627 pv_info
.name
= "vmi [deprecated]";
629 pv_init_ops
.patch
= vmi_patch
;
632 * Many of these operations are ABI compatible with VMI.
633 * This means we can fill in the paravirt-ops with direct
634 * pointers into the VMI ROM. If the calling convention for
635 * these operations changes, this code needs to be updated.
638 * CPUID paravirt-op uses pointers, not the native ISA
639 * halt has no VMI equivalent; all VMI halts are "safe"
640 * no MSR support yet - just trap and emulate. VMI uses the
641 * same ABI as the native ISA, but Linux wants exceptions
642 * from bogus MSR read / write handled
643 * rdpmc is not yet used in Linux
646 /* CPUID is special, so very special it gets wrapped like a present */
647 para_wrap(pv_cpu_ops
.cpuid
, vmi_cpuid
, cpuid
, CPUID
);
649 para_fill(pv_cpu_ops
.clts
, CLTS
);
650 para_fill(pv_cpu_ops
.get_debugreg
, GetDR
);
651 para_fill(pv_cpu_ops
.set_debugreg
, SetDR
);
652 para_fill(pv_cpu_ops
.read_cr0
, GetCR0
);
653 para_fill(pv_mmu_ops
.read_cr2
, GetCR2
);
654 para_fill(pv_mmu_ops
.read_cr3
, GetCR3
);
655 para_fill(pv_cpu_ops
.read_cr4
, GetCR4
);
656 para_fill(pv_cpu_ops
.write_cr0
, SetCR0
);
657 para_fill(pv_mmu_ops
.write_cr2
, SetCR2
);
658 para_fill(pv_mmu_ops
.write_cr3
, SetCR3
);
659 para_fill(pv_cpu_ops
.write_cr4
, SetCR4
);
661 para_fill(pv_irq_ops
.save_fl
.func
, GetInterruptMask
);
662 para_fill(pv_irq_ops
.restore_fl
.func
, SetInterruptMask
);
663 para_fill(pv_irq_ops
.irq_disable
.func
, DisableInterrupts
);
664 para_fill(pv_irq_ops
.irq_enable
.func
, EnableInterrupts
);
666 para_fill(pv_cpu_ops
.wbinvd
, WBINVD
);
667 para_fill(pv_cpu_ops
.read_tsc
, RDTSC
);
669 /* The following we emulate with trap and emulate for now */
670 /* paravirt_ops.read_msr = vmi_rdmsr */
671 /* paravirt_ops.write_msr = vmi_wrmsr */
672 /* paravirt_ops.rdpmc = vmi_rdpmc */
674 /* TR interface doesn't pass TR value, wrap */
675 para_wrap(pv_cpu_ops
.load_tr_desc
, vmi_set_tr
, set_tr
, SetTR
);
677 /* LDT is special, too */
678 para_wrap(pv_cpu_ops
.set_ldt
, vmi_set_ldt
, _set_ldt
, SetLDT
);
680 para_fill(pv_cpu_ops
.load_gdt
, SetGDT
);
681 para_fill(pv_cpu_ops
.load_idt
, SetIDT
);
682 para_fill(pv_cpu_ops
.store_gdt
, GetGDT
);
683 para_fill(pv_cpu_ops
.store_idt
, GetIDT
);
684 para_fill(pv_cpu_ops
.store_tr
, GetTR
);
685 pv_cpu_ops
.load_tls
= vmi_load_tls
;
686 para_wrap(pv_cpu_ops
.write_ldt_entry
, vmi_write_ldt_entry
,
687 write_ldt_entry
, WriteLDTEntry
);
688 para_wrap(pv_cpu_ops
.write_gdt_entry
, vmi_write_gdt_entry
,
689 write_gdt_entry
, WriteGDTEntry
);
690 para_wrap(pv_cpu_ops
.write_idt_entry
, vmi_write_idt_entry
,
691 write_idt_entry
, WriteIDTEntry
);
692 para_wrap(pv_cpu_ops
.load_sp0
, vmi_load_sp0
, set_kernel_stack
, UpdateKernelStack
);
693 para_fill(pv_cpu_ops
.set_iopl_mask
, SetIOPLMask
);
694 para_fill(pv_cpu_ops
.io_delay
, IODelay
);
696 para_wrap(pv_cpu_ops
.start_context_switch
, vmi_start_context_switch
,
697 set_lazy_mode
, SetLazyMode
);
698 para_wrap(pv_cpu_ops
.end_context_switch
, vmi_end_context_switch
,
699 set_lazy_mode
, SetLazyMode
);
701 para_wrap(pv_mmu_ops
.lazy_mode
.enter
, vmi_enter_lazy_mmu
,
702 set_lazy_mode
, SetLazyMode
);
703 para_wrap(pv_mmu_ops
.lazy_mode
.leave
, vmi_leave_lazy_mmu
,
704 set_lazy_mode
, SetLazyMode
);
706 /* user and kernel flush are just handled with different flags to FlushTLB */
707 para_wrap(pv_mmu_ops
.flush_tlb_user
, vmi_flush_tlb_user
, _flush_tlb
, FlushTLB
);
708 para_wrap(pv_mmu_ops
.flush_tlb_kernel
, vmi_flush_tlb_kernel
, _flush_tlb
, FlushTLB
);
709 para_fill(pv_mmu_ops
.flush_tlb_single
, InvalPage
);
712 * Until a standard flag format can be agreed on, we need to
713 * implement these as wrappers in Linux. Get the VMI ROM
714 * function pointers for the two backend calls.
716 #ifdef CONFIG_X86_PAE
717 vmi_ops
.set_pte
= vmi_get_function(VMI_CALL_SetPxELong
);
718 vmi_ops
.update_pte
= vmi_get_function(VMI_CALL_UpdatePxELong
);
720 vmi_ops
.set_pte
= vmi_get_function(VMI_CALL_SetPxE
);
721 vmi_ops
.update_pte
= vmi_get_function(VMI_CALL_UpdatePxE
);
724 if (vmi_ops
.set_pte
) {
725 pv_mmu_ops
.set_pte
= vmi_set_pte
;
726 pv_mmu_ops
.set_pte_at
= vmi_set_pte_at
;
727 pv_mmu_ops
.set_pmd
= vmi_set_pmd
;
728 #ifdef CONFIG_X86_PAE
729 pv_mmu_ops
.set_pte_atomic
= vmi_set_pte_atomic
;
730 pv_mmu_ops
.set_pud
= vmi_set_pud
;
731 pv_mmu_ops
.pte_clear
= vmi_pte_clear
;
732 pv_mmu_ops
.pmd_clear
= vmi_pmd_clear
;
736 if (vmi_ops
.update_pte
) {
737 pv_mmu_ops
.pte_update
= vmi_update_pte
;
738 pv_mmu_ops
.pte_update_defer
= vmi_update_pte_defer
;
741 vmi_ops
.allocate_page
= vmi_get_function(VMI_CALL_AllocatePage
);
742 if (vmi_ops
.allocate_page
) {
743 pv_mmu_ops
.alloc_pte
= vmi_allocate_pte
;
744 pv_mmu_ops
.alloc_pmd
= vmi_allocate_pmd
;
745 pv_mmu_ops
.alloc_pmd_clone
= vmi_allocate_pmd_clone
;
748 vmi_ops
.release_page
= vmi_get_function(VMI_CALL_ReleasePage
);
749 if (vmi_ops
.release_page
) {
750 pv_mmu_ops
.release_pte
= vmi_release_pte
;
751 pv_mmu_ops
.release_pmd
= vmi_release_pmd
;
752 pv_mmu_ops
.pgd_free
= vmi_pgd_free
;
755 /* Set linear is needed in all cases */
756 vmi_ops
.set_linear_mapping
= vmi_get_function(VMI_CALL_SetLinearMapping
);
759 * These MUST always be patched. Don't support indirect jumps
760 * through these operations, as the VMI interface may use either
761 * a jump or a call to get to these operations, depending on
762 * the backend. They are performance critical anyway, so requiring
763 * a patch is not a big problem.
765 pv_cpu_ops
.irq_enable_sysexit
= (void *)0xfeedbab0;
766 pv_cpu_ops
.iret
= (void *)0xbadbab0;
769 para_wrap(pv_apic_ops
.startup_ipi_hook
, vmi_startup_ipi_hook
, set_initial_ap_state
, SetInitialAPState
);
772 #ifdef CONFIG_X86_LOCAL_APIC
773 para_fill(apic
->read
, APICRead
);
774 para_fill(apic
->write
, APICWrite
);
778 * Check for VMI timer functionality by probing for a cycle frequency method
780 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, VMI_CALL_GetCycleFrequency
);
781 if (!disable_vmi_timer
&& rel
->type
!= VMI_RELOCATION_NONE
) {
782 vmi_timer_ops
.get_cycle_frequency
= (void *)rel
->eip
;
783 vmi_timer_ops
.get_cycle_counter
=
784 vmi_get_function(VMI_CALL_GetCycleCounter
);
785 vmi_timer_ops
.get_wallclock
=
786 vmi_get_function(VMI_CALL_GetWallclockTime
);
787 vmi_timer_ops
.wallclock_updated
=
788 vmi_get_function(VMI_CALL_WallclockUpdated
);
789 vmi_timer_ops
.set_alarm
= vmi_get_function(VMI_CALL_SetAlarm
);
790 vmi_timer_ops
.cancel_alarm
=
791 vmi_get_function(VMI_CALL_CancelAlarm
);
792 x86_init
.timers
.timer_init
= vmi_time_init
;
793 #ifdef CONFIG_X86_LOCAL_APIC
794 x86_init
.timers
.setup_percpu_clockev
= vmi_time_bsp_init
;
795 x86_cpuinit
.setup_percpu_clockev
= vmi_time_ap_init
;
797 pv_time_ops
.sched_clock
= vmi_sched_clock
;
798 x86_platform
.calibrate_tsc
= vmi_tsc_khz
;
799 x86_platform
.get_wallclock
= vmi_get_wallclock
;
800 x86_platform
.set_wallclock
= vmi_set_wallclock
;
802 /* We have true wallclock functions; disable CMOS clock sync */
803 no_sync_cmos_clock
= 1;
806 disable_vmi_timer
= 1;
809 para_fill(pv_irq_ops
.safe_halt
, Halt
);
812 * Alternative instruction rewriting doesn't happen soon enough
813 * to convert VMI_IRET to a call instead of a jump; so we have
814 * to do this before IRQs get reenabled. Fortunately, it is
817 apply_paravirt(__parainstructions
, __parainstructions_end
);
826 void __init
vmi_init(void)
831 check_vmi_rom(vmi_rom
);
833 /* In case probing for or validating the ROM failed, basil */
837 reserve_top_address(-vmi_rom
->virtual_top
);
839 #ifdef CONFIG_X86_IO_APIC
840 /* This is virtual hardware; timer routing is wired correctly */
845 void __init
vmi_activate(void)
852 local_irq_save(flags
);
854 local_irq_restore(flags
& X86_EFLAGS_IF
);
857 static int __init
parse_vmi(char *arg
)
862 if (!strcmp(arg
, "disable_pge")) {
863 clear_cpu_cap(&boot_cpu_data
, X86_FEATURE_PGE
);
865 } else if (!strcmp(arg
, "disable_pse")) {
866 clear_cpu_cap(&boot_cpu_data
, X86_FEATURE_PSE
);
868 } else if (!strcmp(arg
, "disable_sep")) {
869 clear_cpu_cap(&boot_cpu_data
, X86_FEATURE_SEP
);
871 } else if (!strcmp(arg
, "disable_tsc")) {
872 clear_cpu_cap(&boot_cpu_data
, X86_FEATURE_TSC
);
874 } else if (!strcmp(arg
, "disable_mtrr")) {
875 clear_cpu_cap(&boot_cpu_data
, X86_FEATURE_MTRR
);
877 } else if (!strcmp(arg
, "disable_timer")) {
878 disable_vmi_timer
= 1;
880 } else if (!strcmp(arg
, "disable_noidle"))
885 early_param("vmi", parse_vmi
);