2 * VMI specific paravirt-ops implementation
4 * Copyright (C) 2005, VMware, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * Send feedback to zach@vmware.com
25 #include <linux/module.h>
26 #include <linux/cpu.h>
27 #include <linux/bootmem.h>
29 #include <linux/highmem.h>
32 #include <asm/fixmap.h>
33 #include <asm/apicdef.h>
35 #include <asm/processor.h>
36 #include <asm/timer.h>
37 #include <asm/vmi_time.h>
38 #include <asm/kmap_types.h>
40 /* Convenient for calling VMI functions indirectly in the ROM */
41 typedef u32
__attribute__((regparm(1))) (VROMFUNC
)(void);
42 typedef u64
__attribute__((regparm(2))) (VROMLONGFUNC
)(int);
44 #define call_vrom_func(rom,func) \
45 (((VROMFUNC *)(rom->func))())
47 #define call_vrom_long_func(rom,func,arg) \
48 (((VROMLONGFUNC *)(rom->func)) (arg))
50 static struct vrom_header
*vmi_rom
;
51 static int disable_pge
;
52 static int disable_pse
;
53 static int disable_sep
;
54 static int disable_tsc
;
55 static int disable_mtrr
;
56 static int disable_noidle
;
57 static int disable_vmi_timer
;
59 /* Cached VMI operations */
61 void (*cpuid
)(void /* non-c */);
62 void (*_set_ldt
)(u32 selector
);
63 void (*set_tr
)(u32 selector
);
64 void (*set_kernel_stack
)(u32 selector
, u32 esp0
);
65 void (*allocate_page
)(u32
, u32
, u32
, u32
, u32
);
66 void (*release_page
)(u32
, u32
);
67 void (*set_pte
)(pte_t
, pte_t
*, unsigned);
68 void (*update_pte
)(pte_t
*, unsigned);
69 void (*set_linear_mapping
)(int, void *, u32
, u32
);
70 void (*_flush_tlb
)(int);
71 void (*set_initial_ap_state
)(int, int);
73 void (*set_lazy_mode
)(int mode
);
76 /* XXX move this to alternative.h */
77 extern struct paravirt_patch __start_parainstructions
[],
78 __stop_parainstructions
[];
80 /* Cached VMI operations */
81 struct vmi_timer_ops vmi_timer_ops
;
84 * VMI patching routines.
86 #define MNEM_CALL 0xe8
90 #define IRQ_PATCH_INT_MASK 0
91 #define IRQ_PATCH_DISABLE 5
93 static inline void patch_offset(unsigned char *eip
, unsigned char *dest
)
95 *(unsigned long *)(eip
+1) = dest
-eip
-5;
98 static unsigned patch_internal(int call
, unsigned len
, void *insns
)
101 struct vmi_relocation_info
*const rel
= (struct vmi_relocation_info
*)&reloc
;
102 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, call
);
104 case VMI_RELOCATION_CALL_REL
:
106 *(char *)insns
= MNEM_CALL
;
107 patch_offset(insns
, rel
->eip
);
110 case VMI_RELOCATION_JUMP_REL
:
112 *(char *)insns
= MNEM_JMP
;
113 patch_offset(insns
, rel
->eip
);
116 case VMI_RELOCATION_NOP
:
117 /* obliterate the whole thing */
120 case VMI_RELOCATION_NONE
:
121 /* leave native code in place */
131 * Apply patch if appropriate, return length of new instruction
132 * sequence. The callee does nop padding for us.
134 static unsigned vmi_patch(u8 type
, u16 clobbers
, void *insns
, unsigned len
)
137 case PARAVIRT_PATCH(irq_disable
):
138 return patch_internal(VMI_CALL_DisableInterrupts
, len
, insns
);
139 case PARAVIRT_PATCH(irq_enable
):
140 return patch_internal(VMI_CALL_EnableInterrupts
, len
, insns
);
141 case PARAVIRT_PATCH(restore_fl
):
142 return patch_internal(VMI_CALL_SetInterruptMask
, len
, insns
);
143 case PARAVIRT_PATCH(save_fl
):
144 return patch_internal(VMI_CALL_GetInterruptMask
, len
, insns
);
145 case PARAVIRT_PATCH(iret
):
146 return patch_internal(VMI_CALL_IRET
, len
, insns
);
147 case PARAVIRT_PATCH(irq_enable_sysexit
):
148 return patch_internal(VMI_CALL_SYSEXIT
, len
, insns
);
155 /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
156 static void vmi_cpuid(unsigned int *eax
, unsigned int *ebx
,
157 unsigned int *ecx
, unsigned int *edx
)
162 asm volatile ("call *%6"
167 : "0" (*eax
), "2" (*ecx
), "r" (vmi_ops
.cpuid
));
170 *edx
&= ~X86_FEATURE_PSE
;
172 *edx
&= ~X86_FEATURE_PGE
;
174 *edx
&= ~X86_FEATURE_SEP
;
176 *edx
&= ~X86_FEATURE_TSC
;
178 *edx
&= ~X86_FEATURE_MTRR
;
182 static inline void vmi_maybe_load_tls(struct desc_struct
*gdt
, int nr
, struct desc_struct
*new)
184 if (gdt
[nr
].a
!= new->a
|| gdt
[nr
].b
!= new->b
)
185 write_gdt_entry(gdt
, nr
, new->a
, new->b
);
188 static void vmi_load_tls(struct thread_struct
*t
, unsigned int cpu
)
190 struct desc_struct
*gdt
= get_cpu_gdt_table(cpu
);
191 vmi_maybe_load_tls(gdt
, GDT_ENTRY_TLS_MIN
+ 0, &t
->tls_array
[0]);
192 vmi_maybe_load_tls(gdt
, GDT_ENTRY_TLS_MIN
+ 1, &t
->tls_array
[1]);
193 vmi_maybe_load_tls(gdt
, GDT_ENTRY_TLS_MIN
+ 2, &t
->tls_array
[2]);
196 static void vmi_set_ldt(const void *addr
, unsigned entries
)
198 unsigned cpu
= smp_processor_id();
201 pack_descriptor(&low
, &high
, (unsigned long)addr
,
202 entries
* sizeof(struct desc_struct
) - 1,
204 write_gdt_entry(get_cpu_gdt_table(cpu
), GDT_ENTRY_LDT
, low
, high
);
205 vmi_ops
._set_ldt(entries
? GDT_ENTRY_LDT
*sizeof(struct desc_struct
) : 0);
208 static void vmi_set_tr(void)
210 vmi_ops
.set_tr(GDT_ENTRY_TSS
*sizeof(struct desc_struct
));
213 static void vmi_load_esp0(struct tss_struct
*tss
,
214 struct thread_struct
*thread
)
216 tss
->x86_tss
.esp0
= thread
->esp0
;
218 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
219 if (unlikely(tss
->x86_tss
.ss1
!= thread
->sysenter_cs
)) {
220 tss
->x86_tss
.ss1
= thread
->sysenter_cs
;
221 wrmsr(MSR_IA32_SYSENTER_CS
, thread
->sysenter_cs
, 0);
223 vmi_ops
.set_kernel_stack(__KERNEL_DS
, tss
->x86_tss
.esp0
);
226 static void vmi_flush_tlb_user(void)
228 vmi_ops
._flush_tlb(VMI_FLUSH_TLB
);
231 static void vmi_flush_tlb_kernel(void)
233 vmi_ops
._flush_tlb(VMI_FLUSH_TLB
| VMI_FLUSH_GLOBAL
);
236 /* Stub to do nothing at all; used for delays and unimplemented calls */
237 static void vmi_nop(void)
241 #ifdef CONFIG_DEBUG_PAGE_TYPE
243 #ifdef CONFIG_X86_PAE
244 #define MAX_BOOT_PTS (2048+4+1)
246 #define MAX_BOOT_PTS (1024+1)
250 * During boot, mem_map is not yet available in paging_init, so stash
251 * all the boot page allocations here.
256 } boot_page_allocations
[MAX_BOOT_PTS
];
257 static int num_boot_page_allocations
;
258 static int boot_allocations_applied
;
260 void vmi_apply_boot_page_allocations(void)
264 for (i
= 0; i
< num_boot_page_allocations
; i
++) {
265 struct page
*page
= pfn_to_page(boot_page_allocations
[i
].pfn
);
266 page
->type
= boot_page_allocations
[i
].type
;
267 page
->type
= boot_page_allocations
[i
].type
&
268 ~(VMI_PAGE_ZEROED
| VMI_PAGE_CLONE
);
270 boot_allocations_applied
= 1;
273 static void record_page_type(u32 pfn
, int type
)
275 BUG_ON(num_boot_page_allocations
>= MAX_BOOT_PTS
);
276 boot_page_allocations
[num_boot_page_allocations
].pfn
= pfn
;
277 boot_page_allocations
[num_boot_page_allocations
].type
= type
;
278 num_boot_page_allocations
++;
281 static void check_zeroed_page(u32 pfn
, int type
, struct page
*page
)
285 int limit
= PAGE_SIZE
/ sizeof(int);
287 if (page_address(page
))
288 ptr
= (u32
*)page_address(page
);
290 ptr
= (u32
*)__va(pfn
<< PAGE_SHIFT
);
292 * When cloning the root in non-PAE mode, only the userspace
293 * pdes need to be zeroed.
295 if (type
& VMI_PAGE_CLONE
)
296 limit
= USER_PTRS_PER_PGD
;
297 for (i
= 0; i
< limit
; i
++)
302 * We stash the page type into struct page so we can verify the page
303 * types are used properly.
305 static void vmi_set_page_type(u32 pfn
, int type
)
307 /* PAE can have multiple roots per page - don't track */
308 if (PTRS_PER_PMD
> 1 && (type
& VMI_PAGE_PDP
))
311 if (boot_allocations_applied
) {
312 struct page
*page
= pfn_to_page(pfn
);
313 if (type
!= VMI_PAGE_NORMAL
)
316 BUG_ON(page
->type
== VMI_PAGE_NORMAL
);
317 page
->type
= type
& ~(VMI_PAGE_ZEROED
| VMI_PAGE_CLONE
);
318 if (type
& VMI_PAGE_ZEROED
)
319 check_zeroed_page(pfn
, type
, page
);
321 record_page_type(pfn
, type
);
325 static void vmi_check_page_type(u32 pfn
, int type
)
327 /* PAE can have multiple roots per page - skip checks */
328 if (PTRS_PER_PMD
> 1 && (type
& VMI_PAGE_PDP
))
331 type
&= ~(VMI_PAGE_ZEROED
| VMI_PAGE_CLONE
);
332 if (boot_allocations_applied
) {
333 struct page
*page
= pfn_to_page(pfn
);
334 BUG_ON((page
->type
^ type
) & VMI_PAGE_PAE
);
335 BUG_ON(type
== VMI_PAGE_NORMAL
&& page
->type
);
336 BUG_ON((type
& page
->type
) == 0);
340 #define vmi_set_page_type(p,t) do { } while (0)
341 #define vmi_check_page_type(p,t) do { } while (0)
344 #ifdef CONFIG_HIGHPTE
345 static void *vmi_kmap_atomic_pte(struct page
*page
, enum km_type type
)
347 void *va
= kmap_atomic(page
, type
);
350 * Internally, the VMI ROM must map virtual addresses to physical
351 * addresses for processing MMU updates. By the time MMU updates
352 * are issued, this information is typically already lost.
353 * Fortunately, the VMI provides a cache of mapping slots for active
356 * We use slot zero for the linear mapping of physical memory, and
357 * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
359 * args: SLOT VA COUNT PFN
361 BUG_ON(type
!= KM_PTE0
&& type
!= KM_PTE1
);
362 vmi_ops
.set_linear_mapping((type
- KM_PTE0
)+1, va
, 1, page_to_pfn(page
));
368 static void vmi_allocate_pt(u32 pfn
)
370 vmi_set_page_type(pfn
, VMI_PAGE_L1
);
371 vmi_ops
.allocate_page(pfn
, VMI_PAGE_L1
, 0, 0, 0);
374 static void vmi_allocate_pd(u32 pfn
)
377 * This call comes in very early, before mem_map is setup.
378 * It is called only for swapper_pg_dir, which already has
381 vmi_set_page_type(pfn
, VMI_PAGE_L2
);
382 vmi_ops
.allocate_page(pfn
, VMI_PAGE_L2
, 0, 0, 0);
385 static void vmi_allocate_pd_clone(u32 pfn
, u32 clonepfn
, u32 start
, u32 count
)
387 vmi_set_page_type(pfn
, VMI_PAGE_L2
| VMI_PAGE_CLONE
);
388 vmi_check_page_type(clonepfn
, VMI_PAGE_L2
);
389 vmi_ops
.allocate_page(pfn
, VMI_PAGE_L2
| VMI_PAGE_CLONE
, clonepfn
, start
, count
);
392 static void vmi_release_pt(u32 pfn
)
394 vmi_ops
.release_page(pfn
, VMI_PAGE_L1
);
395 vmi_set_page_type(pfn
, VMI_PAGE_NORMAL
);
398 static void vmi_release_pd(u32 pfn
)
400 vmi_ops
.release_page(pfn
, VMI_PAGE_L2
);
401 vmi_set_page_type(pfn
, VMI_PAGE_NORMAL
);
405 * Helper macros for MMU update flags. We can defer updates until a flush
406 * or page invalidation only if the update is to the current address space
407 * (otherwise, there is no flush). We must check against init_mm, since
408 * this could be a kernel update, which usually passes init_mm, although
409 * sometimes this check can be skipped if we know the particular function
410 * is only called on user mode PTEs. We could change the kernel to pass
411 * current->active_mm here, but in particular, I was unsure if changing
412 * mm/highmem.c to do this would still be correct on other architectures.
414 #define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \
415 (!mustbeuser && (mm) == &init_mm))
416 #define vmi_flags_addr(mm, addr, level, user) \
417 ((level) | (is_current_as(mm, user) ? \
418 (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
419 #define vmi_flags_addr_defer(mm, addr, level, user) \
420 ((level) | (is_current_as(mm, user) ? \
421 (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
423 static void vmi_update_pte(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
425 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
426 vmi_ops
.update_pte(ptep
, vmi_flags_addr(mm
, addr
, VMI_PAGE_PT
, 0));
429 static void vmi_update_pte_defer(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
431 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
432 vmi_ops
.update_pte(ptep
, vmi_flags_addr_defer(mm
, addr
, VMI_PAGE_PT
, 0));
435 static void vmi_set_pte(pte_t
*ptep
, pte_t pte
)
437 /* XXX because of set_pmd_pte, this can be called on PT or PD layers */
438 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
| VMI_PAGE_PD
);
439 vmi_ops
.set_pte(pte
, ptep
, VMI_PAGE_PT
);
442 static void vmi_set_pte_at(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
, pte_t pte
)
444 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
445 vmi_ops
.set_pte(pte
, ptep
, vmi_flags_addr(mm
, addr
, VMI_PAGE_PT
, 0));
448 static void vmi_set_pmd(pmd_t
*pmdp
, pmd_t pmdval
)
450 #ifdef CONFIG_X86_PAE
451 const pte_t pte
= { pmdval
.pmd
, pmdval
.pmd
>> 32 };
452 vmi_check_page_type(__pa(pmdp
) >> PAGE_SHIFT
, VMI_PAGE_PMD
);
454 const pte_t pte
= { pmdval
.pud
.pgd
.pgd
};
455 vmi_check_page_type(__pa(pmdp
) >> PAGE_SHIFT
, VMI_PAGE_PGD
);
457 vmi_ops
.set_pte(pte
, (pte_t
*)pmdp
, VMI_PAGE_PD
);
460 #ifdef CONFIG_X86_PAE
462 static void vmi_set_pte_atomic(pte_t
*ptep
, pte_t pteval
)
465 * XXX This is called from set_pmd_pte, but at both PT
466 * and PD layers so the VMI_PAGE_PT flag is wrong. But
467 * it is only called for large page mapping changes,
468 * the Xen backend, doesn't support large pages, and the
469 * ESX backend doesn't depend on the flag.
471 set_64bit((unsigned long long *)ptep
,pte_val(pteval
));
472 vmi_ops
.update_pte(ptep
, VMI_PAGE_PT
);
475 static void vmi_set_pte_present(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
, pte_t pte
)
477 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
478 vmi_ops
.set_pte(pte
, ptep
, vmi_flags_addr_defer(mm
, addr
, VMI_PAGE_PT
, 1));
481 static void vmi_set_pud(pud_t
*pudp
, pud_t pudval
)
484 const pte_t pte
= { pudval
.pgd
.pgd
, pudval
.pgd
.pgd
>> 32 };
485 vmi_check_page_type(__pa(pudp
) >> PAGE_SHIFT
, VMI_PAGE_PGD
);
486 vmi_ops
.set_pte(pte
, (pte_t
*)pudp
, VMI_PAGE_PDP
);
489 static void vmi_pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
491 const pte_t pte
= { 0 };
492 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
493 vmi_ops
.set_pte(pte
, ptep
, vmi_flags_addr(mm
, addr
, VMI_PAGE_PT
, 0));
496 static void vmi_pmd_clear(pmd_t
*pmd
)
498 const pte_t pte
= { 0 };
499 vmi_check_page_type(__pa(pmd
) >> PAGE_SHIFT
, VMI_PAGE_PMD
);
500 vmi_ops
.set_pte(pte
, (pte_t
*)pmd
, VMI_PAGE_PD
);
505 static void __devinit
506 vmi_startup_ipi_hook(int phys_apicid
, unsigned long start_eip
,
507 unsigned long start_esp
)
509 struct vmi_ap_state ap
;
511 /* Default everything to zero. This is fine for most GPRs. */
512 memset(&ap
, 0, sizeof(struct vmi_ap_state
));
514 ap
.gdtr_limit
= GDT_SIZE
- 1;
515 ap
.gdtr_base
= (unsigned long) get_cpu_gdt_table(phys_apicid
);
517 ap
.idtr_limit
= IDT_ENTRIES
* 8 - 1;
518 ap
.idtr_base
= (unsigned long) idt_table
;
523 ap
.eip
= (unsigned long) start_eip
;
525 ap
.esp
= (unsigned long) start_esp
;
529 ap
.fs
= __KERNEL_PERCPU
;
534 #ifdef CONFIG_X86_PAE
535 /* efer should match BSP efer. */
538 rdmsr(MSR_EFER
, l
, h
);
539 ap
.efer
= (unsigned long long) h
<< 32 | l
;
543 ap
.cr3
= __pa(swapper_pg_dir
);
544 /* Protected mode, paging, AM, WP, NE, MP. */
546 ap
.cr4
= mmu_cr4_features
;
547 vmi_ops
.set_initial_ap_state((u32
)&ap
, phys_apicid
);
551 static void vmi_set_lazy_mode(int mode
)
553 static DEFINE_PER_CPU(int, lazy_mode
);
555 if (!vmi_ops
.set_lazy_mode
)
558 /* Modes should never nest or overlap */
559 BUG_ON(__get_cpu_var(lazy_mode
) && !(mode
== PARAVIRT_LAZY_NONE
||
560 mode
== PARAVIRT_LAZY_FLUSH
));
562 if (mode
== PARAVIRT_LAZY_FLUSH
) {
563 vmi_ops
.set_lazy_mode(0);
564 vmi_ops
.set_lazy_mode(__get_cpu_var(lazy_mode
));
566 vmi_ops
.set_lazy_mode(mode
);
567 __get_cpu_var(lazy_mode
) = mode
;
571 static inline int __init
check_vmi_rom(struct vrom_header
*rom
)
573 struct pci_header
*pci
;
574 struct pnp_header
*pnp
;
575 const char *manufacturer
= "UNKNOWN";
576 const char *product
= "UNKNOWN";
577 const char *license
= "unspecified";
579 if (rom
->rom_signature
!= 0xaa55)
581 if (rom
->vrom_signature
!= VMI_SIGNATURE
)
583 if (rom
->api_version_maj
!= VMI_API_REV_MAJOR
||
584 rom
->api_version_min
+1 < VMI_API_REV_MINOR
+1) {
585 printk(KERN_WARNING
"VMI: Found mismatched rom version %d.%d\n",
586 rom
->api_version_maj
,
587 rom
->api_version_min
);
592 * Relying on the VMI_SIGNATURE field is not 100% safe, so check
593 * the PCI header and device type to make sure this is really a
596 if (!rom
->pci_header_offs
) {
597 printk(KERN_WARNING
"VMI: ROM does not contain PCI header.\n");
601 pci
= (struct pci_header
*)((char *)rom
+rom
->pci_header_offs
);
602 if (pci
->vendorID
!= PCI_VENDOR_ID_VMWARE
||
603 pci
->deviceID
!= PCI_DEVICE_ID_VMWARE_VMI
) {
604 /* Allow it to run... anyways, but warn */
605 printk(KERN_WARNING
"VMI: ROM from unknown manufacturer\n");
608 if (rom
->pnp_header_offs
) {
609 pnp
= (struct pnp_header
*)((char *)rom
+rom
->pnp_header_offs
);
610 if (pnp
->manufacturer_offset
)
611 manufacturer
= (const char *)rom
+pnp
->manufacturer_offset
;
612 if (pnp
->product_offset
)
613 product
= (const char *)rom
+pnp
->product_offset
;
616 if (rom
->license_offs
)
617 license
= (char *)rom
+rom
->license_offs
;
619 printk(KERN_INFO
"VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n",
620 manufacturer
, product
,
621 rom
->api_version_maj
, rom
->api_version_min
,
622 pci
->rom_version_maj
, pci
->rom_version_min
);
624 /* Don't allow BSD/MIT here for now because we don't want to end up
625 with any binary only shim layers */
626 if (strcmp(license
, "GPL") && strcmp(license
, "GPL v2")) {
627 printk(KERN_WARNING
"VMI: Non GPL license `%s' found for ROM. Not used.\n",
636 * Probe for the VMI option ROM
638 static inline int __init
probe_vmi_rom(void)
642 /* VMI ROM is in option ROM area, check signature */
643 for (base
= 0xC0000; base
< 0xE0000; base
+= 2048) {
644 struct vrom_header
*romstart
;
645 romstart
= (struct vrom_header
*)isa_bus_to_virt(base
);
646 if (check_vmi_rom(romstart
)) {
655 * VMI setup common to all processors
657 void vmi_bringup(void)
659 /* We must establish the lowmem mapping for MMU ops to work */
660 if (vmi_ops
.set_linear_mapping
)
661 vmi_ops
.set_linear_mapping(0, (void *)__PAGE_OFFSET
, max_low_pfn
, 0);
665 * Return a pointer to a VMI function or NULL if unimplemented
667 static void *vmi_get_function(int vmicall
)
670 const struct vmi_relocation_info
*rel
= (struct vmi_relocation_info
*)&reloc
;
671 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, vmicall
);
672 BUG_ON(rel
->type
== VMI_RELOCATION_JUMP_REL
);
673 if (rel
->type
== VMI_RELOCATION_CALL_REL
)
674 return (void *)rel
->eip
;
680 * Helper macro for making the VMI paravirt-ops fill code readable.
681 * For unimplemented operations, fall back to default, unless nop
682 * is returned by the ROM.
684 #define para_fill(opname, vmicall) \
686 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
687 VMI_CALL_##vmicall); \
688 if (rel->type == VMI_RELOCATION_CALL_REL) \
689 paravirt_ops.opname = (void *)rel->eip; \
690 else if (rel->type == VMI_RELOCATION_NOP) \
691 paravirt_ops.opname = (void *)vmi_nop; \
692 else if (rel->type != VMI_RELOCATION_NONE) \
693 printk(KERN_WARNING "VMI: Unknown relocation " \
694 "type %d for " #vmicall"\n",\
699 * Helper macro for making the VMI paravirt-ops fill code readable.
700 * For cached operations which do not match the VMI ROM ABI and must
701 * go through a tranlation stub. Ignore NOPs, since it is not clear
702 * a NOP * VMI function corresponds to a NOP paravirt-op when the
703 * functions are not in 1-1 correspondence.
705 #define para_wrap(opname, wrapper, cache, vmicall) \
707 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
708 VMI_CALL_##vmicall); \
709 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \
710 if (rel->type == VMI_RELOCATION_CALL_REL) { \
711 paravirt_ops.opname = wrapper; \
712 vmi_ops.cache = (void *)rel->eip; \
717 * Activate the VMI interface and switch into paravirtualized mode
719 static inline int __init
activate_vmi(void)
723 const struct vmi_relocation_info
*rel
= (struct vmi_relocation_info
*)&reloc
;
725 if (call_vrom_func(vmi_rom
, vmi_init
) != 0) {
726 printk(KERN_ERR
"VMI ROM failed to initialize!");
729 savesegment(cs
, kernel_cs
);
731 paravirt_ops
.paravirt_enabled
= 1;
732 paravirt_ops
.kernel_rpl
= kernel_cs
& SEGMENT_RPL_MASK
;
734 paravirt_ops
.patch
= vmi_patch
;
735 paravirt_ops
.name
= "vmi";
738 * Many of these operations are ABI compatible with VMI.
739 * This means we can fill in the paravirt-ops with direct
740 * pointers into the VMI ROM. If the calling convention for
741 * these operations changes, this code needs to be updated.
744 * CPUID paravirt-op uses pointers, not the native ISA
745 * halt has no VMI equivalent; all VMI halts are "safe"
746 * no MSR support yet - just trap and emulate. VMI uses the
747 * same ABI as the native ISA, but Linux wants exceptions
748 * from bogus MSR read / write handled
749 * rdpmc is not yet used in Linux
752 /* CPUID is special, so very special it gets wrapped like a present */
753 para_wrap(cpuid
, vmi_cpuid
, cpuid
, CPUID
);
755 para_fill(clts
, CLTS
);
756 para_fill(get_debugreg
, GetDR
);
757 para_fill(set_debugreg
, SetDR
);
758 para_fill(read_cr0
, GetCR0
);
759 para_fill(read_cr2
, GetCR2
);
760 para_fill(read_cr3
, GetCR3
);
761 para_fill(read_cr4
, GetCR4
);
762 para_fill(write_cr0
, SetCR0
);
763 para_fill(write_cr2
, SetCR2
);
764 para_fill(write_cr3
, SetCR3
);
765 para_fill(write_cr4
, SetCR4
);
766 para_fill(save_fl
, GetInterruptMask
);
767 para_fill(restore_fl
, SetInterruptMask
);
768 para_fill(irq_disable
, DisableInterrupts
);
769 para_fill(irq_enable
, EnableInterrupts
);
771 para_fill(wbinvd
, WBINVD
);
772 para_fill(read_tsc
, RDTSC
);
774 /* The following we emulate with trap and emulate for now */
775 /* paravirt_ops.read_msr = vmi_rdmsr */
776 /* paravirt_ops.write_msr = vmi_wrmsr */
777 /* paravirt_ops.rdpmc = vmi_rdpmc */
779 /* TR interface doesn't pass TR value, wrap */
780 para_wrap(load_tr_desc
, vmi_set_tr
, set_tr
, SetTR
);
782 /* LDT is special, too */
783 para_wrap(set_ldt
, vmi_set_ldt
, _set_ldt
, SetLDT
);
785 para_fill(load_gdt
, SetGDT
);
786 para_fill(load_idt
, SetIDT
);
787 para_fill(store_gdt
, GetGDT
);
788 para_fill(store_idt
, GetIDT
);
789 para_fill(store_tr
, GetTR
);
790 paravirt_ops
.load_tls
= vmi_load_tls
;
791 para_fill(write_ldt_entry
, WriteLDTEntry
);
792 para_fill(write_gdt_entry
, WriteGDTEntry
);
793 para_fill(write_idt_entry
, WriteIDTEntry
);
794 para_wrap(load_esp0
, vmi_load_esp0
, set_kernel_stack
, UpdateKernelStack
);
795 para_fill(set_iopl_mask
, SetIOPLMask
);
796 para_fill(io_delay
, IODelay
);
797 para_wrap(set_lazy_mode
, vmi_set_lazy_mode
, set_lazy_mode
, SetLazyMode
);
799 /* user and kernel flush are just handled with different flags to FlushTLB */
800 para_wrap(flush_tlb_user
, vmi_flush_tlb_user
, _flush_tlb
, FlushTLB
);
801 para_wrap(flush_tlb_kernel
, vmi_flush_tlb_kernel
, _flush_tlb
, FlushTLB
);
802 para_fill(flush_tlb_single
, InvalPage
);
805 * Until a standard flag format can be agreed on, we need to
806 * implement these as wrappers in Linux. Get the VMI ROM
807 * function pointers for the two backend calls.
809 #ifdef CONFIG_X86_PAE
810 vmi_ops
.set_pte
= vmi_get_function(VMI_CALL_SetPxELong
);
811 vmi_ops
.update_pte
= vmi_get_function(VMI_CALL_UpdatePxELong
);
813 vmi_ops
.set_pte
= vmi_get_function(VMI_CALL_SetPxE
);
814 vmi_ops
.update_pte
= vmi_get_function(VMI_CALL_UpdatePxE
);
817 if (vmi_ops
.set_pte
) {
818 paravirt_ops
.set_pte
= vmi_set_pte
;
819 paravirt_ops
.set_pte_at
= vmi_set_pte_at
;
820 paravirt_ops
.set_pmd
= vmi_set_pmd
;
821 #ifdef CONFIG_X86_PAE
822 paravirt_ops
.set_pte_atomic
= vmi_set_pte_atomic
;
823 paravirt_ops
.set_pte_present
= vmi_set_pte_present
;
824 paravirt_ops
.set_pud
= vmi_set_pud
;
825 paravirt_ops
.pte_clear
= vmi_pte_clear
;
826 paravirt_ops
.pmd_clear
= vmi_pmd_clear
;
830 if (vmi_ops
.update_pte
) {
831 paravirt_ops
.pte_update
= vmi_update_pte
;
832 paravirt_ops
.pte_update_defer
= vmi_update_pte_defer
;
835 vmi_ops
.allocate_page
= vmi_get_function(VMI_CALL_AllocatePage
);
836 if (vmi_ops
.allocate_page
) {
837 paravirt_ops
.alloc_pt
= vmi_allocate_pt
;
838 paravirt_ops
.alloc_pd
= vmi_allocate_pd
;
839 paravirt_ops
.alloc_pd_clone
= vmi_allocate_pd_clone
;
842 vmi_ops
.release_page
= vmi_get_function(VMI_CALL_ReleasePage
);
843 if (vmi_ops
.release_page
) {
844 paravirt_ops
.release_pt
= vmi_release_pt
;
845 paravirt_ops
.release_pd
= vmi_release_pd
;
848 /* Set linear is needed in all cases */
849 vmi_ops
.set_linear_mapping
= vmi_get_function(VMI_CALL_SetLinearMapping
);
850 #ifdef CONFIG_HIGHPTE
851 if (vmi_ops
.set_linear_mapping
)
852 paravirt_ops
.kmap_atomic_pte
= vmi_kmap_atomic_pte
;
856 * These MUST always be patched. Don't support indirect jumps
857 * through these operations, as the VMI interface may use either
858 * a jump or a call to get to these operations, depending on
859 * the backend. They are performance critical anyway, so requiring
860 * a patch is not a big problem.
862 paravirt_ops
.irq_enable_sysexit
= (void *)0xfeedbab0;
863 paravirt_ops
.iret
= (void *)0xbadbab0;
866 para_wrap(startup_ipi_hook
, vmi_startup_ipi_hook
, set_initial_ap_state
, SetInitialAPState
);
869 #ifdef CONFIG_X86_LOCAL_APIC
870 para_fill(apic_read
, APICRead
);
871 para_fill(apic_write
, APICWrite
);
872 para_fill(apic_write_atomic
, APICWrite
);
876 * Check for VMI timer functionality by probing for a cycle frequency method
878 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, VMI_CALL_GetCycleFrequency
);
879 if (!disable_vmi_timer
&& rel
->type
!= VMI_RELOCATION_NONE
) {
880 vmi_timer_ops
.get_cycle_frequency
= (void *)rel
->eip
;
881 vmi_timer_ops
.get_cycle_counter
=
882 vmi_get_function(VMI_CALL_GetCycleCounter
);
883 vmi_timer_ops
.get_wallclock
=
884 vmi_get_function(VMI_CALL_GetWallclockTime
);
885 vmi_timer_ops
.wallclock_updated
=
886 vmi_get_function(VMI_CALL_WallclockUpdated
);
887 vmi_timer_ops
.set_alarm
= vmi_get_function(VMI_CALL_SetAlarm
);
888 vmi_timer_ops
.cancel_alarm
=
889 vmi_get_function(VMI_CALL_CancelAlarm
);
890 paravirt_ops
.time_init
= vmi_time_init
;
891 paravirt_ops
.get_wallclock
= vmi_get_wallclock
;
892 paravirt_ops
.set_wallclock
= vmi_set_wallclock
;
893 #ifdef CONFIG_X86_LOCAL_APIC
894 paravirt_ops
.setup_boot_clock
= vmi_time_bsp_init
;
895 paravirt_ops
.setup_secondary_clock
= vmi_time_ap_init
;
897 paravirt_ops
.get_scheduled_cycles
= vmi_get_sched_cycles
;
898 paravirt_ops
.get_cpu_khz
= vmi_cpu_khz
;
900 /* We have true wallclock functions; disable CMOS clock sync */
901 no_sync_cmos_clock
= 1;
904 disable_vmi_timer
= 1;
907 para_fill(safe_halt
, Halt
);
910 * Alternative instruction rewriting doesn't happen soon enough
911 * to convert VMI_IRET to a call instead of a jump; so we have
912 * to do this before IRQs get reenabled. Fortunately, it is
915 apply_paravirt(__start_parainstructions
, __stop_parainstructions
);
924 void __init
vmi_init(void)
931 check_vmi_rom(vmi_rom
);
933 /* In case probing for or validating the ROM failed, basil */
937 reserve_top_address(-vmi_rom
->virtual_top
);
939 local_irq_save(flags
);
942 #ifdef CONFIG_X86_IO_APIC
943 /* This is virtual hardware; timer routing is wired correctly */
946 local_irq_restore(flags
& X86_EFLAGS_IF
);
949 static int __init
parse_vmi(char *arg
)
954 if (!strcmp(arg
, "disable_pge")) {
955 clear_bit(X86_FEATURE_PGE
, boot_cpu_data
.x86_capability
);
957 } else if (!strcmp(arg
, "disable_pse")) {
958 clear_bit(X86_FEATURE_PSE
, boot_cpu_data
.x86_capability
);
960 } else if (!strcmp(arg
, "disable_sep")) {
961 clear_bit(X86_FEATURE_SEP
, boot_cpu_data
.x86_capability
);
963 } else if (!strcmp(arg
, "disable_tsc")) {
964 clear_bit(X86_FEATURE_TSC
, boot_cpu_data
.x86_capability
);
966 } else if (!strcmp(arg
, "disable_mtrr")) {
967 clear_bit(X86_FEATURE_MTRR
, boot_cpu_data
.x86_capability
);
969 } else if (!strcmp(arg
, "disable_timer")) {
970 disable_vmi_timer
= 1;
972 } else if (!strcmp(arg
, "disable_noidle"))
977 early_param("vmi", parse_vmi
);