2 * VMI specific paravirt-ops implementation
4 * Copyright (C) 2005, VMware, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * Send feedback to zach@vmware.com
25 #include <linux/module.h>
26 #include <linux/license.h>
27 #include <linux/cpu.h>
28 #include <linux/bootmem.h>
32 #include <asm/fixmap.h>
33 #include <asm/apicdef.h>
35 #include <asm/processor.h>
36 #include <asm/timer.h>
37 #include <asm/vmi_time.h>
39 /* Convenient for calling VMI functions indirectly in the ROM */
40 typedef u32
__attribute__((regparm(1))) (VROMFUNC
)(void);
41 typedef u64
__attribute__((regparm(2))) (VROMLONGFUNC
)(int);
43 #define call_vrom_func(rom,func) \
44 (((VROMFUNC *)(rom->func))())
46 #define call_vrom_long_func(rom,func,arg) \
47 (((VROMLONGFUNC *)(rom->func)) (arg))
49 static struct vrom_header
*vmi_rom
;
50 static int license_gplok
;
51 static int disable_nodelay
;
52 static int disable_pge
;
53 static int disable_pse
;
54 static int disable_sep
;
55 static int disable_tsc
;
56 static int disable_mtrr
;
58 /* Cached VMI operations */
60 void (*cpuid
)(void /* non-c */);
61 void (*_set_ldt
)(u32 selector
);
62 void (*set_tr
)(u32 selector
);
63 void (*set_kernel_stack
)(u32 selector
, u32 esp0
);
64 void (*allocate_page
)(u32
, u32
, u32
, u32
, u32
);
65 void (*release_page
)(u32
, u32
);
66 void (*set_pte
)(pte_t
, pte_t
*, unsigned);
67 void (*update_pte
)(pte_t
*, unsigned);
68 void (*set_linear_mapping
)(int, u32
, u32
, u32
);
69 void (*flush_tlb
)(int);
70 void (*set_initial_ap_state
)(int, int);
74 /* XXX move this to alternative.h */
75 extern struct paravirt_patch __start_parainstructions
[],
76 __stop_parainstructions
[];
79 * VMI patching routines.
81 #define MNEM_CALL 0xe8
85 static char irq_save_disable_callout
[] = {
86 MNEM_CALL
, 0, 0, 0, 0,
87 MNEM_CALL
, 0, 0, 0, 0,
90 #define IRQ_PATCH_INT_MASK 0
91 #define IRQ_PATCH_DISABLE 5
93 static inline void patch_offset(unsigned char *eip
, unsigned char *dest
)
95 *(unsigned long *)(eip
+1) = dest
-eip
-5;
98 static unsigned patch_internal(int call
, unsigned len
, void *insns
)
101 struct vmi_relocation_info
*const rel
= (struct vmi_relocation_info
*)&reloc
;
102 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, call
);
104 case VMI_RELOCATION_CALL_REL
:
106 *(char *)insns
= MNEM_CALL
;
107 patch_offset(insns
, rel
->eip
);
110 case VMI_RELOCATION_JUMP_REL
:
112 *(char *)insns
= MNEM_JMP
;
113 patch_offset(insns
, rel
->eip
);
116 case VMI_RELOCATION_NOP
:
117 /* obliterate the whole thing */
120 case VMI_RELOCATION_NONE
:
121 /* leave native code in place */
131 * Apply patch if appropriate, return length of new instruction
132 * sequence. The callee does nop padding for us.
134 static unsigned vmi_patch(u8 type
, u16 clobbers
, void *insns
, unsigned len
)
137 case PARAVIRT_IRQ_DISABLE
:
138 return patch_internal(VMI_CALL_DisableInterrupts
, len
, insns
);
139 case PARAVIRT_IRQ_ENABLE
:
140 return patch_internal(VMI_CALL_EnableInterrupts
, len
, insns
);
141 case PARAVIRT_RESTORE_FLAGS
:
142 return patch_internal(VMI_CALL_SetInterruptMask
, len
, insns
);
143 case PARAVIRT_SAVE_FLAGS
:
144 return patch_internal(VMI_CALL_GetInterruptMask
, len
, insns
);
145 case PARAVIRT_SAVE_FLAGS_IRQ_DISABLE
:
147 patch_internal(VMI_CALL_GetInterruptMask
, len
, insns
);
148 patch_internal(VMI_CALL_DisableInterrupts
, len
-5, insns
+5);
152 * You bastards didn't leave enough room to
153 * patch save_flags_irq_disable inline. Patch
157 *(char *)insns
= MNEM_CALL
;
158 patch_offset(insns
, irq_save_disable_callout
);
161 case PARAVIRT_INTERRUPT_RETURN
:
162 return patch_internal(VMI_CALL_IRET
, len
, insns
);
163 case PARAVIRT_STI_SYSEXIT
:
164 return patch_internal(VMI_CALL_SYSEXIT
, len
, insns
);
171 /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
172 static void vmi_cpuid(unsigned int *eax
, unsigned int *ebx
,
173 unsigned int *ecx
, unsigned int *edx
)
178 asm volatile ("call *%6"
183 : "0" (*eax
), "2" (*ecx
), "r" (vmi_ops
.cpuid
));
186 *edx
&= ~X86_FEATURE_PSE
;
188 *edx
&= ~X86_FEATURE_PGE
;
190 *edx
&= ~X86_FEATURE_SEP
;
192 *edx
&= ~X86_FEATURE_TSC
;
194 *edx
&= ~X86_FEATURE_MTRR
;
198 static inline void vmi_maybe_load_tls(struct desc_struct
*gdt
, int nr
, struct desc_struct
*new)
200 if (gdt
[nr
].a
!= new->a
|| gdt
[nr
].b
!= new->b
)
201 write_gdt_entry(gdt
, nr
, new->a
, new->b
);
204 static void vmi_load_tls(struct thread_struct
*t
, unsigned int cpu
)
206 struct desc_struct
*gdt
= get_cpu_gdt_table(cpu
);
207 vmi_maybe_load_tls(gdt
, GDT_ENTRY_TLS_MIN
+ 0, &t
->tls_array
[0]);
208 vmi_maybe_load_tls(gdt
, GDT_ENTRY_TLS_MIN
+ 1, &t
->tls_array
[1]);
209 vmi_maybe_load_tls(gdt
, GDT_ENTRY_TLS_MIN
+ 2, &t
->tls_array
[2]);
212 static void vmi_set_ldt(const void *addr
, unsigned entries
)
214 unsigned cpu
= smp_processor_id();
217 pack_descriptor(&low
, &high
, (unsigned long)addr
,
218 entries
* sizeof(struct desc_struct
) - 1,
220 write_gdt_entry(get_cpu_gdt_table(cpu
), GDT_ENTRY_LDT
, low
, high
);
221 vmi_ops
._set_ldt(entries
? GDT_ENTRY_LDT
*sizeof(struct desc_struct
) : 0);
224 static void vmi_set_tr(void)
226 vmi_ops
.set_tr(GDT_ENTRY_TSS
*sizeof(struct desc_struct
));
229 static void vmi_load_esp0(struct tss_struct
*tss
,
230 struct thread_struct
*thread
)
232 tss
->esp0
= thread
->esp0
;
234 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
235 if (unlikely(tss
->ss1
!= thread
->sysenter_cs
)) {
236 tss
->ss1
= thread
->sysenter_cs
;
237 wrmsr(MSR_IA32_SYSENTER_CS
, thread
->sysenter_cs
, 0);
239 vmi_ops
.set_kernel_stack(__KERNEL_DS
, tss
->esp0
);
242 static void vmi_flush_tlb_user(void)
244 vmi_ops
.flush_tlb(VMI_FLUSH_TLB
);
247 static void vmi_flush_tlb_kernel(void)
249 vmi_ops
.flush_tlb(VMI_FLUSH_TLB
| VMI_FLUSH_GLOBAL
);
252 /* Stub to do nothing at all; used for delays and unimplemented calls */
253 static void vmi_nop(void)
257 /* For NO_IDLE_HZ, we stop the clock when halting the kernel */
258 #ifdef CONFIG_NO_IDLE_HZ
259 static fastcall
void vmi_safe_halt(void)
261 int idle
= vmi_stop_hz_timer();
265 vmi_account_time_restart_hz_timer();
271 #ifdef CONFIG_DEBUG_PAGE_TYPE
273 #ifdef CONFIG_X86_PAE
274 #define MAX_BOOT_PTS (2048+4+1)
276 #define MAX_BOOT_PTS (1024+1)
280 * During boot, mem_map is not yet available in paging_init, so stash
281 * all the boot page allocations here.
286 } boot_page_allocations
[MAX_BOOT_PTS
];
287 static int num_boot_page_allocations
;
288 static int boot_allocations_applied
;
290 void vmi_apply_boot_page_allocations(void)
294 for (i
= 0; i
< num_boot_page_allocations
; i
++) {
295 struct page
*page
= pfn_to_page(boot_page_allocations
[i
].pfn
);
296 page
->type
= boot_page_allocations
[i
].type
;
297 page
->type
= boot_page_allocations
[i
].type
&
298 ~(VMI_PAGE_ZEROED
| VMI_PAGE_CLONE
);
300 boot_allocations_applied
= 1;
303 static void record_page_type(u32 pfn
, int type
)
305 BUG_ON(num_boot_page_allocations
>= MAX_BOOT_PTS
);
306 boot_page_allocations
[num_boot_page_allocations
].pfn
= pfn
;
307 boot_page_allocations
[num_boot_page_allocations
].type
= type
;
308 num_boot_page_allocations
++;
311 static void check_zeroed_page(u32 pfn
, int type
, struct page
*page
)
315 int limit
= PAGE_SIZE
/ sizeof(int);
317 if (page_address(page
))
318 ptr
= (u32
*)page_address(page
);
320 ptr
= (u32
*)__va(pfn
<< PAGE_SHIFT
);
322 * When cloning the root in non-PAE mode, only the userspace
323 * pdes need to be zeroed.
325 if (type
& VMI_PAGE_CLONE
)
326 limit
= USER_PTRS_PER_PGD
;
327 for (i
= 0; i
< limit
; i
++)
332 * We stash the page type into struct page so we can verify the page
333 * types are used properly.
335 static void vmi_set_page_type(u32 pfn
, int type
)
337 /* PAE can have multiple roots per page - don't track */
338 if (PTRS_PER_PMD
> 1 && (type
& VMI_PAGE_PDP
))
341 if (boot_allocations_applied
) {
342 struct page
*page
= pfn_to_page(pfn
);
343 if (type
!= VMI_PAGE_NORMAL
)
346 BUG_ON(page
->type
== VMI_PAGE_NORMAL
);
347 page
->type
= type
& ~(VMI_PAGE_ZEROED
| VMI_PAGE_CLONE
);
348 if (type
& VMI_PAGE_ZEROED
)
349 check_zeroed_page(pfn
, type
, page
);
351 record_page_type(pfn
, type
);
355 static void vmi_check_page_type(u32 pfn
, int type
)
357 /* PAE can have multiple roots per page - skip checks */
358 if (PTRS_PER_PMD
> 1 && (type
& VMI_PAGE_PDP
))
361 type
&= ~(VMI_PAGE_ZEROED
| VMI_PAGE_CLONE
);
362 if (boot_allocations_applied
) {
363 struct page
*page
= pfn_to_page(pfn
);
364 BUG_ON((page
->type
^ type
) & VMI_PAGE_PAE
);
365 BUG_ON(type
== VMI_PAGE_NORMAL
&& page
->type
);
366 BUG_ON((type
& page
->type
) == 0);
370 #define vmi_set_page_type(p,t) do { } while (0)
371 #define vmi_check_page_type(p,t) do { } while (0)
374 static void vmi_allocate_pt(u32 pfn
)
376 vmi_set_page_type(pfn
, VMI_PAGE_L1
);
377 vmi_ops
.allocate_page(pfn
, VMI_PAGE_L1
, 0, 0, 0);
380 static void vmi_allocate_pd(u32 pfn
)
383 * This call comes in very early, before mem_map is setup.
384 * It is called only for swapper_pg_dir, which already has
387 vmi_set_page_type(pfn
, VMI_PAGE_L2
);
388 vmi_ops
.allocate_page(pfn
, VMI_PAGE_L2
, 0, 0, 0);
391 static void vmi_allocate_pd_clone(u32 pfn
, u32 clonepfn
, u32 start
, u32 count
)
393 vmi_set_page_type(pfn
, VMI_PAGE_L2
| VMI_PAGE_CLONE
);
394 vmi_check_page_type(clonepfn
, VMI_PAGE_L2
);
395 vmi_ops
.allocate_page(pfn
, VMI_PAGE_L2
| VMI_PAGE_CLONE
, clonepfn
, start
, count
);
398 static void vmi_release_pt(u32 pfn
)
400 vmi_ops
.release_page(pfn
, VMI_PAGE_L1
);
401 vmi_set_page_type(pfn
, VMI_PAGE_NORMAL
);
404 static void vmi_release_pd(u32 pfn
)
406 vmi_ops
.release_page(pfn
, VMI_PAGE_L2
);
407 vmi_set_page_type(pfn
, VMI_PAGE_NORMAL
);
411 * Helper macros for MMU update flags. We can defer updates until a flush
412 * or page invalidation only if the update is to the current address space
413 * (otherwise, there is no flush). We must check against init_mm, since
414 * this could be a kernel update, which usually passes init_mm, although
415 * sometimes this check can be skipped if we know the particular function
416 * is only called on user mode PTEs. We could change the kernel to pass
417 * current->active_mm here, but in particular, I was unsure if changing
418 * mm/highmem.c to do this would still be correct on other architectures.
420 #define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \
421 (!mustbeuser && (mm) == &init_mm))
422 #define vmi_flags_addr(mm, addr, level, user) \
423 ((level) | (is_current_as(mm, user) ? \
424 (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
425 #define vmi_flags_addr_defer(mm, addr, level, user) \
426 ((level) | (is_current_as(mm, user) ? \
427 (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
429 static void vmi_update_pte(struct mm_struct
*mm
, u32 addr
, pte_t
*ptep
)
431 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
432 vmi_ops
.update_pte(ptep
, vmi_flags_addr(mm
, addr
, VMI_PAGE_PT
, 0));
435 static void vmi_update_pte_defer(struct mm_struct
*mm
, u32 addr
, pte_t
*ptep
)
437 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
438 vmi_ops
.update_pte(ptep
, vmi_flags_addr_defer(mm
, addr
, VMI_PAGE_PT
, 0));
441 static void vmi_set_pte(pte_t
*ptep
, pte_t pte
)
443 /* XXX because of set_pmd_pte, this can be called on PT or PD layers */
444 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
| VMI_PAGE_PD
);
445 vmi_ops
.set_pte(pte
, ptep
, VMI_PAGE_PT
);
448 static void vmi_set_pte_at(struct mm_struct
*mm
, u32 addr
, pte_t
*ptep
, pte_t pte
)
450 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
451 vmi_ops
.set_pte(pte
, ptep
, vmi_flags_addr(mm
, addr
, VMI_PAGE_PT
, 0));
454 static void vmi_set_pmd(pmd_t
*pmdp
, pmd_t pmdval
)
456 #ifdef CONFIG_X86_PAE
457 const pte_t pte
= { pmdval
.pmd
, pmdval
.pmd
>> 32 };
458 vmi_check_page_type(__pa(pmdp
) >> PAGE_SHIFT
, VMI_PAGE_PMD
);
460 const pte_t pte
= { pmdval
.pud
.pgd
.pgd
};
461 vmi_check_page_type(__pa(pmdp
) >> PAGE_SHIFT
, VMI_PAGE_PGD
);
463 vmi_ops
.set_pte(pte
, (pte_t
*)pmdp
, VMI_PAGE_PD
);
466 #ifdef CONFIG_X86_PAE
468 static void vmi_set_pte_atomic(pte_t
*ptep
, pte_t pteval
)
471 * XXX This is called from set_pmd_pte, but at both PT
472 * and PD layers so the VMI_PAGE_PT flag is wrong. But
473 * it is only called for large page mapping changes,
474 * the Xen backend, doesn't support large pages, and the
475 * ESX backend doesn't depend on the flag.
477 set_64bit((unsigned long long *)ptep
,pte_val(pteval
));
478 vmi_ops
.update_pte(ptep
, VMI_PAGE_PT
);
481 static void vmi_set_pte_present(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
, pte_t pte
)
483 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
484 vmi_ops
.set_pte(pte
, ptep
, vmi_flags_addr_defer(mm
, addr
, VMI_PAGE_PT
, 1));
487 static void vmi_set_pud(pud_t
*pudp
, pud_t pudval
)
490 const pte_t pte
= { pudval
.pgd
.pgd
, pudval
.pgd
.pgd
>> 32 };
491 vmi_check_page_type(__pa(pudp
) >> PAGE_SHIFT
, VMI_PAGE_PGD
);
492 vmi_ops
.set_pte(pte
, (pte_t
*)pudp
, VMI_PAGE_PDP
);
495 static void vmi_pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
497 const pte_t pte
= { 0 };
498 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
499 vmi_ops
.set_pte(pte
, ptep
, vmi_flags_addr(mm
, addr
, VMI_PAGE_PT
, 0));
502 void vmi_pmd_clear(pmd_t
*pmd
)
504 const pte_t pte
= { 0 };
505 vmi_check_page_type(__pa(pmd
) >> PAGE_SHIFT
, VMI_PAGE_PMD
);
506 vmi_ops
.set_pte(pte
, (pte_t
*)pmd
, VMI_PAGE_PD
);
511 struct vmi_ap_state ap
;
512 extern void setup_pda(void);
514 static void __init
/* XXX cpu hotplug */
515 vmi_startup_ipi_hook(int phys_apicid
, unsigned long start_eip
,
516 unsigned long start_esp
)
518 /* Default everything to zero. This is fine for most GPRs. */
519 memset(&ap
, 0, sizeof(struct vmi_ap_state
));
521 ap
.gdtr_limit
= GDT_SIZE
- 1;
522 ap
.gdtr_base
= (unsigned long) get_cpu_gdt_table(phys_apicid
);
524 ap
.idtr_limit
= IDT_ENTRIES
* 8 - 1;
525 ap
.idtr_base
= (unsigned long) idt_table
;
530 ap
.eip
= (unsigned long) start_eip
;
532 ap
.esp
= (unsigned long) start_esp
;
536 ap
.fs
= __KERNEL_PDA
;
543 #ifdef CONFIG_X86_PAE
544 /* efer should match BSP efer. */
547 rdmsr(MSR_EFER
, l
, h
);
548 ap
.efer
= (unsigned long long) h
<< 32 | l
;
552 ap
.cr3
= __pa(swapper_pg_dir
);
553 /* Protected mode, paging, AM, WP, NE, MP. */
555 ap
.cr4
= mmu_cr4_features
;
556 vmi_ops
.set_initial_ap_state(__pa(&ap
), phys_apicid
);
560 static inline int __init
check_vmi_rom(struct vrom_header
*rom
)
562 struct pci_header
*pci
;
563 struct pnp_header
*pnp
;
564 const char *manufacturer
= "UNKNOWN";
565 const char *product
= "UNKNOWN";
566 const char *license
= "unspecified";
568 if (rom
->rom_signature
!= 0xaa55)
570 if (rom
->vrom_signature
!= VMI_SIGNATURE
)
572 if (rom
->api_version_maj
!= VMI_API_REV_MAJOR
||
573 rom
->api_version_min
+1 < VMI_API_REV_MINOR
+1) {
574 printk(KERN_WARNING
"VMI: Found mismatched rom version %d.%d\n",
575 rom
->api_version_maj
,
576 rom
->api_version_min
);
581 * Relying on the VMI_SIGNATURE field is not 100% safe, so check
582 * the PCI header and device type to make sure this is really a
585 if (!rom
->pci_header_offs
) {
586 printk(KERN_WARNING
"VMI: ROM does not contain PCI header.\n");
590 pci
= (struct pci_header
*)((char *)rom
+rom
->pci_header_offs
);
591 if (pci
->vendorID
!= PCI_VENDOR_ID_VMWARE
||
592 pci
->deviceID
!= PCI_DEVICE_ID_VMWARE_VMI
) {
593 /* Allow it to run... anyways, but warn */
594 printk(KERN_WARNING
"VMI: ROM from unknown manufacturer\n");
597 if (rom
->pnp_header_offs
) {
598 pnp
= (struct pnp_header
*)((char *)rom
+rom
->pnp_header_offs
);
599 if (pnp
->manufacturer_offset
)
600 manufacturer
= (const char *)rom
+pnp
->manufacturer_offset
;
601 if (pnp
->product_offset
)
602 product
= (const char *)rom
+pnp
->product_offset
;
605 if (rom
->license_offs
)
606 license
= (char *)rom
+rom
->license_offs
;
608 printk(KERN_INFO
"VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n",
609 manufacturer
, product
,
610 rom
->api_version_maj
, rom
->api_version_min
,
611 pci
->rom_version_maj
, pci
->rom_version_min
);
613 license_gplok
= license_is_gpl_compatible(license
);
614 if (!license_gplok
) {
615 printk(KERN_WARNING
"VMI: ROM license '%s' taints kernel... "
616 "inlining disabled\n",
618 add_taint(TAINT_PROPRIETARY_MODULE
);
624 * Probe for the VMI option ROM
626 static inline int __init
probe_vmi_rom(void)
630 /* VMI ROM is in option ROM area, check signature */
631 for (base
= 0xC0000; base
< 0xE0000; base
+= 2048) {
632 struct vrom_header
*romstart
;
633 romstart
= (struct vrom_header
*)isa_bus_to_virt(base
);
634 if (check_vmi_rom(romstart
)) {
643 * VMI setup common to all processors
645 void vmi_bringup(void)
647 /* We must establish the lowmem mapping for MMU ops to work */
649 vmi_ops
.set_linear_mapping(0, __PAGE_OFFSET
, max_low_pfn
, 0);
653 * Return a pointer to the VMI function or a NOP stub
655 static void *vmi_get_function(int vmicall
)
658 const struct vmi_relocation_info
*rel
= (struct vmi_relocation_info
*)&reloc
;
659 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, vmicall
);
660 BUG_ON(rel
->type
== VMI_RELOCATION_JUMP_REL
);
661 if (rel
->type
== VMI_RELOCATION_CALL_REL
)
662 return (void *)rel
->eip
;
664 return (void *)vmi_nop
;
668 * Helper macro for making the VMI paravirt-ops fill code readable.
669 * For unimplemented operations, fall back to default.
671 #define para_fill(opname, vmicall) \
673 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
674 VMI_CALL_##vmicall); \
675 if (rel->type != VMI_RELOCATION_NONE) { \
676 BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); \
677 paravirt_ops.opname = (void *)rel->eip; \
682 * Activate the VMI interface and switch into paravirtualized mode
684 static inline int __init
activate_vmi(void)
688 const struct vmi_relocation_info
*rel
= (struct vmi_relocation_info
*)&reloc
;
690 if (call_vrom_func(vmi_rom
, vmi_init
) != 0) {
691 printk(KERN_ERR
"VMI ROM failed to initialize!");
694 savesegment(cs
, kernel_cs
);
696 paravirt_ops
.paravirt_enabled
= 1;
697 paravirt_ops
.kernel_rpl
= kernel_cs
& SEGMENT_RPL_MASK
;
699 paravirt_ops
.patch
= vmi_patch
;
700 paravirt_ops
.name
= "vmi";
703 * Many of these operations are ABI compatible with VMI.
704 * This means we can fill in the paravirt-ops with direct
705 * pointers into the VMI ROM. If the calling convention for
706 * these operations changes, this code needs to be updated.
709 * CPUID paravirt-op uses pointers, not the native ISA
710 * halt has no VMI equivalent; all VMI halts are "safe"
711 * no MSR support yet - just trap and emulate. VMI uses the
712 * same ABI as the native ISA, but Linux wants exceptions
713 * from bogus MSR read / write handled
714 * rdpmc is not yet used in Linux
717 /* CPUID is special, so very special */
718 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, VMI_CALL_CPUID
);
719 if (rel
->type
!= VMI_RELOCATION_NONE
) {
720 BUG_ON(rel
->type
!= VMI_RELOCATION_CALL_REL
);
721 vmi_ops
.cpuid
= (void *)rel
->eip
;
722 paravirt_ops
.cpuid
= vmi_cpuid
;
725 para_fill(clts
, CLTS
);
726 para_fill(get_debugreg
, GetDR
);
727 para_fill(set_debugreg
, SetDR
);
728 para_fill(read_cr0
, GetCR0
);
729 para_fill(read_cr2
, GetCR2
);
730 para_fill(read_cr3
, GetCR3
);
731 para_fill(read_cr4
, GetCR4
);
732 para_fill(write_cr0
, SetCR0
);
733 para_fill(write_cr2
, SetCR2
);
734 para_fill(write_cr3
, SetCR3
);
735 para_fill(write_cr4
, SetCR4
);
736 para_fill(save_fl
, GetInterruptMask
);
737 para_fill(restore_fl
, SetInterruptMask
);
738 para_fill(irq_disable
, DisableInterrupts
);
739 para_fill(irq_enable
, EnableInterrupts
);
740 /* irq_save_disable !!! sheer pain */
741 patch_offset(&irq_save_disable_callout
[IRQ_PATCH_INT_MASK
],
742 (char *)paravirt_ops
.save_fl
);
743 patch_offset(&irq_save_disable_callout
[IRQ_PATCH_DISABLE
],
744 (char *)paravirt_ops
.irq_disable
);
745 #ifndef CONFIG_NO_IDLE_HZ
746 para_fill(safe_halt
, Halt
);
748 vmi_ops
.halt
= vmi_get_function(VMI_CALL_Halt
);
749 paravirt_ops
.safe_halt
= vmi_safe_halt
;
751 para_fill(wbinvd
, WBINVD
);
752 /* paravirt_ops.read_msr = vmi_rdmsr */
753 /* paravirt_ops.write_msr = vmi_wrmsr */
754 para_fill(read_tsc
, RDTSC
);
755 /* paravirt_ops.rdpmc = vmi_rdpmc */
757 /* TR interface doesn't pass TR value */
758 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, VMI_CALL_SetTR
);
759 if (rel
->type
!= VMI_RELOCATION_NONE
) {
760 BUG_ON(rel
->type
!= VMI_RELOCATION_CALL_REL
);
761 vmi_ops
.set_tr
= (void *)rel
->eip
;
762 paravirt_ops
.load_tr_desc
= vmi_set_tr
;
765 /* LDT is special, too */
766 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, VMI_CALL_SetLDT
);
767 if (rel
->type
!= VMI_RELOCATION_NONE
) {
768 BUG_ON(rel
->type
!= VMI_RELOCATION_CALL_REL
);
769 vmi_ops
._set_ldt
= (void *)rel
->eip
;
770 paravirt_ops
.set_ldt
= vmi_set_ldt
;
773 para_fill(load_gdt
, SetGDT
);
774 para_fill(load_idt
, SetIDT
);
775 para_fill(store_gdt
, GetGDT
);
776 para_fill(store_idt
, GetIDT
);
777 para_fill(store_tr
, GetTR
);
778 paravirt_ops
.load_tls
= vmi_load_tls
;
779 para_fill(write_ldt_entry
, WriteLDTEntry
);
780 para_fill(write_gdt_entry
, WriteGDTEntry
);
781 para_fill(write_idt_entry
, WriteIDTEntry
);
782 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
,
783 VMI_CALL_UpdateKernelStack
);
784 if (rel
->type
!= VMI_RELOCATION_NONE
) {
785 BUG_ON(rel
->type
!= VMI_RELOCATION_CALL_REL
);
786 vmi_ops
.set_kernel_stack
= (void *)rel
->eip
;
787 paravirt_ops
.load_esp0
= vmi_load_esp0
;
790 para_fill(set_iopl_mask
, SetIOPLMask
);
791 paravirt_ops
.io_delay
= (void *)vmi_nop
;
792 if (!disable_nodelay
) {
793 paravirt_ops
.const_udelay
= (void *)vmi_nop
;
796 para_fill(set_lazy_mode
, SetLazyMode
);
798 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, VMI_CALL_FlushTLB
);
799 if (rel
->type
!= VMI_RELOCATION_NONE
) {
800 vmi_ops
.flush_tlb
= (void *)rel
->eip
;
801 paravirt_ops
.flush_tlb_user
= vmi_flush_tlb_user
;
802 paravirt_ops
.flush_tlb_kernel
= vmi_flush_tlb_kernel
;
804 para_fill(flush_tlb_single
, InvalPage
);
807 * Until a standard flag format can be agreed on, we need to
808 * implement these as wrappers in Linux. Get the VMI ROM
809 * function pointers for the two backend calls.
811 #ifdef CONFIG_X86_PAE
812 vmi_ops
.set_pte
= vmi_get_function(VMI_CALL_SetPxELong
);
813 vmi_ops
.update_pte
= vmi_get_function(VMI_CALL_UpdatePxELong
);
815 vmi_ops
.set_pte
= vmi_get_function(VMI_CALL_SetPxE
);
816 vmi_ops
.update_pte
= vmi_get_function(VMI_CALL_UpdatePxE
);
818 vmi_ops
.set_linear_mapping
= vmi_get_function(VMI_CALL_SetLinearMapping
);
819 vmi_ops
.allocate_page
= vmi_get_function(VMI_CALL_AllocatePage
);
820 vmi_ops
.release_page
= vmi_get_function(VMI_CALL_ReleasePage
);
822 paravirt_ops
.alloc_pt
= vmi_allocate_pt
;
823 paravirt_ops
.alloc_pd
= vmi_allocate_pd
;
824 paravirt_ops
.alloc_pd_clone
= vmi_allocate_pd_clone
;
825 paravirt_ops
.release_pt
= vmi_release_pt
;
826 paravirt_ops
.release_pd
= vmi_release_pd
;
827 paravirt_ops
.set_pte
= vmi_set_pte
;
828 paravirt_ops
.set_pte_at
= vmi_set_pte_at
;
829 paravirt_ops
.set_pmd
= vmi_set_pmd
;
830 paravirt_ops
.pte_update
= vmi_update_pte
;
831 paravirt_ops
.pte_update_defer
= vmi_update_pte_defer
;
832 #ifdef CONFIG_X86_PAE
833 paravirt_ops
.set_pte_atomic
= vmi_set_pte_atomic
;
834 paravirt_ops
.set_pte_present
= vmi_set_pte_present
;
835 paravirt_ops
.set_pud
= vmi_set_pud
;
836 paravirt_ops
.pte_clear
= vmi_pte_clear
;
837 paravirt_ops
.pmd_clear
= vmi_pmd_clear
;
840 * These MUST always be patched. Don't support indirect jumps
841 * through these operations, as the VMI interface may use either
842 * a jump or a call to get to these operations, depending on
843 * the backend. They are performance critical anyway, so requiring
844 * a patch is not a big problem.
846 paravirt_ops
.irq_enable_sysexit
= (void *)0xfeedbab0;
847 paravirt_ops
.iret
= (void *)0xbadbab0;
850 paravirt_ops
.startup_ipi_hook
= vmi_startup_ipi_hook
;
851 vmi_ops
.set_initial_ap_state
= vmi_get_function(VMI_CALL_SetInitialAPState
);
854 #ifdef CONFIG_X86_LOCAL_APIC
855 paravirt_ops
.apic_read
= vmi_get_function(VMI_CALL_APICRead
);
856 paravirt_ops
.apic_write
= vmi_get_function(VMI_CALL_APICWrite
);
857 paravirt_ops
.apic_write_atomic
= vmi_get_function(VMI_CALL_APICWrite
);
861 * Check for VMI timer functionality by probing for a cycle frequency method
863 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, VMI_CALL_GetCycleFrequency
);
864 if (rel
->type
!= VMI_RELOCATION_NONE
) {
865 vmi_timer_ops
.get_cycle_frequency
= (void *)rel
->eip
;
866 vmi_timer_ops
.get_cycle_counter
=
867 vmi_get_function(VMI_CALL_GetCycleCounter
);
868 vmi_timer_ops
.get_wallclock
=
869 vmi_get_function(VMI_CALL_GetWallclockTime
);
870 vmi_timer_ops
.wallclock_updated
=
871 vmi_get_function(VMI_CALL_WallclockUpdated
);
872 vmi_timer_ops
.set_alarm
= vmi_get_function(VMI_CALL_SetAlarm
);
873 vmi_timer_ops
.cancel_alarm
=
874 vmi_get_function(VMI_CALL_CancelAlarm
);
875 paravirt_ops
.time_init
= vmi_time_init
;
876 paravirt_ops
.get_wallclock
= vmi_get_wallclock
;
877 paravirt_ops
.set_wallclock
= vmi_set_wallclock
;
878 #ifdef CONFIG_X86_LOCAL_APIC
879 paravirt_ops
.setup_boot_clock
= vmi_timer_setup_boot_alarm
;
880 paravirt_ops
.setup_secondary_clock
= vmi_timer_setup_secondary_alarm
;
882 custom_sched_clock
= vmi_sched_clock
;
886 * Alternative instruction rewriting doesn't happen soon enough
887 * to convert VMI_IRET to a call instead of a jump; so we have
888 * to do this before IRQs get reenabled. Fortunately, it is
891 apply_paravirt(__start_parainstructions
, __stop_parainstructions
);
900 void __init
vmi_init(void)
907 check_vmi_rom(vmi_rom
);
909 /* In case probing for or validating the ROM failed, basil */
913 reserve_top_address(-vmi_rom
->virtual_top
);
915 local_irq_save(flags
);
920 local_irq_restore(flags
& X86_EFLAGS_IF
);
923 static int __init
parse_vmi(char *arg
)
928 if (!strcmp(arg
, "disable_nodelay"))
930 else if (!strcmp(arg
, "disable_pge")) {
931 clear_bit(X86_FEATURE_PGE
, boot_cpu_data
.x86_capability
);
933 } else if (!strcmp(arg
, "disable_pse")) {
934 clear_bit(X86_FEATURE_PSE
, boot_cpu_data
.x86_capability
);
936 } else if (!strcmp(arg
, "disable_sep")) {
937 clear_bit(X86_FEATURE_SEP
, boot_cpu_data
.x86_capability
);
939 } else if (!strcmp(arg
, "disable_tsc")) {
940 clear_bit(X86_FEATURE_TSC
, boot_cpu_data
.x86_capability
);
942 } else if (!strcmp(arg
, "disable_mtrr")) {
943 clear_bit(X86_FEATURE_MTRR
, boot_cpu_data
.x86_capability
);
949 early_param("vmi", parse_vmi
);