2 * VMI specific paravirt-ops implementation
4 * Copyright (C) 2005, VMware, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * Send feedback to zach@vmware.com
25 #include <linux/module.h>
26 #include <linux/license.h>
27 #include <linux/cpu.h>
28 #include <linux/bootmem.h>
32 #include <asm/fixmap.h>
33 #include <asm/apicdef.h>
35 #include <asm/processor.h>
36 #include <asm/timer.h>
37 #include <asm/vmi_time.h>
38 #include <asm/kmap_types.h>
40 /* Convenient for calling VMI functions indirectly in the ROM */
41 typedef u32
__attribute__((regparm(1))) (VROMFUNC
)(void);
42 typedef u64
__attribute__((regparm(2))) (VROMLONGFUNC
)(int);
44 #define call_vrom_func(rom,func) \
45 (((VROMFUNC *)(rom->func))())
47 #define call_vrom_long_func(rom,func,arg) \
48 (((VROMLONGFUNC *)(rom->func)) (arg))
50 static struct vrom_header
*vmi_rom
;
51 static int license_gplok
;
52 static int disable_pge
;
53 static int disable_pse
;
54 static int disable_sep
;
55 static int disable_tsc
;
56 static int disable_mtrr
;
57 static int disable_noidle
;
58 static int disable_vmi_timer
;
60 /* Cached VMI operations */
62 void (*cpuid
)(void /* non-c */);
63 void (*_set_ldt
)(u32 selector
);
64 void (*set_tr
)(u32 selector
);
65 void (*set_kernel_stack
)(u32 selector
, u32 esp0
);
66 void (*allocate_page
)(u32
, u32
, u32
, u32
, u32
);
67 void (*release_page
)(u32
, u32
);
68 void (*set_pte
)(pte_t
, pte_t
*, unsigned);
69 void (*update_pte
)(pte_t
*, unsigned);
70 void (*set_linear_mapping
)(int, u32
, u32
, u32
);
71 void (*flush_tlb
)(int);
72 void (*set_initial_ap_state
)(int, int);
76 /* XXX move this to alternative.h */
77 extern struct paravirt_patch __start_parainstructions
[],
78 __stop_parainstructions
[];
81 * VMI patching routines.
83 #define MNEM_CALL 0xe8
87 static char irq_save_disable_callout
[] = {
88 MNEM_CALL
, 0, 0, 0, 0,
89 MNEM_CALL
, 0, 0, 0, 0,
92 #define IRQ_PATCH_INT_MASK 0
93 #define IRQ_PATCH_DISABLE 5
95 static inline void patch_offset(unsigned char *eip
, unsigned char *dest
)
97 *(unsigned long *)(eip
+1) = dest
-eip
-5;
100 static unsigned patch_internal(int call
, unsigned len
, void *insns
)
103 struct vmi_relocation_info
*const rel
= (struct vmi_relocation_info
*)&reloc
;
104 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, call
);
106 case VMI_RELOCATION_CALL_REL
:
108 *(char *)insns
= MNEM_CALL
;
109 patch_offset(insns
, rel
->eip
);
112 case VMI_RELOCATION_JUMP_REL
:
114 *(char *)insns
= MNEM_JMP
;
115 patch_offset(insns
, rel
->eip
);
118 case VMI_RELOCATION_NOP
:
119 /* obliterate the whole thing */
122 case VMI_RELOCATION_NONE
:
123 /* leave native code in place */
133 * Apply patch if appropriate, return length of new instruction
134 * sequence. The callee does nop padding for us.
136 static unsigned vmi_patch(u8 type
, u16 clobbers
, void *insns
, unsigned len
)
139 case PARAVIRT_IRQ_DISABLE
:
140 return patch_internal(VMI_CALL_DisableInterrupts
, len
, insns
);
141 case PARAVIRT_IRQ_ENABLE
:
142 return patch_internal(VMI_CALL_EnableInterrupts
, len
, insns
);
143 case PARAVIRT_RESTORE_FLAGS
:
144 return patch_internal(VMI_CALL_SetInterruptMask
, len
, insns
);
145 case PARAVIRT_SAVE_FLAGS
:
146 return patch_internal(VMI_CALL_GetInterruptMask
, len
, insns
);
147 case PARAVIRT_SAVE_FLAGS_IRQ_DISABLE
:
149 patch_internal(VMI_CALL_GetInterruptMask
, len
, insns
);
150 patch_internal(VMI_CALL_DisableInterrupts
, len
-5, insns
+5);
154 * You bastards didn't leave enough room to
155 * patch save_flags_irq_disable inline. Patch
159 *(char *)insns
= MNEM_CALL
;
160 patch_offset(insns
, irq_save_disable_callout
);
163 case PARAVIRT_INTERRUPT_RETURN
:
164 return patch_internal(VMI_CALL_IRET
, len
, insns
);
165 case PARAVIRT_STI_SYSEXIT
:
166 return patch_internal(VMI_CALL_SYSEXIT
, len
, insns
);
173 /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
174 static void vmi_cpuid(unsigned int *eax
, unsigned int *ebx
,
175 unsigned int *ecx
, unsigned int *edx
)
180 asm volatile ("call *%6"
185 : "0" (*eax
), "2" (*ecx
), "r" (vmi_ops
.cpuid
));
188 *edx
&= ~X86_FEATURE_PSE
;
190 *edx
&= ~X86_FEATURE_PGE
;
192 *edx
&= ~X86_FEATURE_SEP
;
194 *edx
&= ~X86_FEATURE_TSC
;
196 *edx
&= ~X86_FEATURE_MTRR
;
200 static inline void vmi_maybe_load_tls(struct desc_struct
*gdt
, int nr
, struct desc_struct
*new)
202 if (gdt
[nr
].a
!= new->a
|| gdt
[nr
].b
!= new->b
)
203 write_gdt_entry(gdt
, nr
, new->a
, new->b
);
206 static void vmi_load_tls(struct thread_struct
*t
, unsigned int cpu
)
208 struct desc_struct
*gdt
= get_cpu_gdt_table(cpu
);
209 vmi_maybe_load_tls(gdt
, GDT_ENTRY_TLS_MIN
+ 0, &t
->tls_array
[0]);
210 vmi_maybe_load_tls(gdt
, GDT_ENTRY_TLS_MIN
+ 1, &t
->tls_array
[1]);
211 vmi_maybe_load_tls(gdt
, GDT_ENTRY_TLS_MIN
+ 2, &t
->tls_array
[2]);
214 static void vmi_set_ldt(const void *addr
, unsigned entries
)
216 unsigned cpu
= smp_processor_id();
219 pack_descriptor(&low
, &high
, (unsigned long)addr
,
220 entries
* sizeof(struct desc_struct
) - 1,
222 write_gdt_entry(get_cpu_gdt_table(cpu
), GDT_ENTRY_LDT
, low
, high
);
223 vmi_ops
._set_ldt(entries
? GDT_ENTRY_LDT
*sizeof(struct desc_struct
) : 0);
226 static void vmi_set_tr(void)
228 vmi_ops
.set_tr(GDT_ENTRY_TSS
*sizeof(struct desc_struct
));
231 static void vmi_load_esp0(struct tss_struct
*tss
,
232 struct thread_struct
*thread
)
234 tss
->esp0
= thread
->esp0
;
236 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
237 if (unlikely(tss
->ss1
!= thread
->sysenter_cs
)) {
238 tss
->ss1
= thread
->sysenter_cs
;
239 wrmsr(MSR_IA32_SYSENTER_CS
, thread
->sysenter_cs
, 0);
241 vmi_ops
.set_kernel_stack(__KERNEL_DS
, tss
->esp0
);
244 static void vmi_flush_tlb_user(void)
246 vmi_ops
.flush_tlb(VMI_FLUSH_TLB
);
249 static void vmi_flush_tlb_kernel(void)
251 vmi_ops
.flush_tlb(VMI_FLUSH_TLB
| VMI_FLUSH_GLOBAL
);
254 /* Stub to do nothing at all; used for delays and unimplemented calls */
255 static void vmi_nop(void)
259 /* For NO_IDLE_HZ, we stop the clock when halting the kernel */
260 static fastcall
void vmi_safe_halt(void)
262 int idle
= vmi_stop_hz_timer();
266 vmi_account_time_restart_hz_timer();
271 #ifdef CONFIG_DEBUG_PAGE_TYPE
273 #ifdef CONFIG_X86_PAE
274 #define MAX_BOOT_PTS (2048+4+1)
276 #define MAX_BOOT_PTS (1024+1)
280 * During boot, mem_map is not yet available in paging_init, so stash
281 * all the boot page allocations here.
286 } boot_page_allocations
[MAX_BOOT_PTS
];
287 static int num_boot_page_allocations
;
288 static int boot_allocations_applied
;
290 void vmi_apply_boot_page_allocations(void)
294 for (i
= 0; i
< num_boot_page_allocations
; i
++) {
295 struct page
*page
= pfn_to_page(boot_page_allocations
[i
].pfn
);
296 page
->type
= boot_page_allocations
[i
].type
;
297 page
->type
= boot_page_allocations
[i
].type
&
298 ~(VMI_PAGE_ZEROED
| VMI_PAGE_CLONE
);
300 boot_allocations_applied
= 1;
303 static void record_page_type(u32 pfn
, int type
)
305 BUG_ON(num_boot_page_allocations
>= MAX_BOOT_PTS
);
306 boot_page_allocations
[num_boot_page_allocations
].pfn
= pfn
;
307 boot_page_allocations
[num_boot_page_allocations
].type
= type
;
308 num_boot_page_allocations
++;
311 static void check_zeroed_page(u32 pfn
, int type
, struct page
*page
)
315 int limit
= PAGE_SIZE
/ sizeof(int);
317 if (page_address(page
))
318 ptr
= (u32
*)page_address(page
);
320 ptr
= (u32
*)__va(pfn
<< PAGE_SHIFT
);
322 * When cloning the root in non-PAE mode, only the userspace
323 * pdes need to be zeroed.
325 if (type
& VMI_PAGE_CLONE
)
326 limit
= USER_PTRS_PER_PGD
;
327 for (i
= 0; i
< limit
; i
++)
332 * We stash the page type into struct page so we can verify the page
333 * types are used properly.
335 static void vmi_set_page_type(u32 pfn
, int type
)
337 /* PAE can have multiple roots per page - don't track */
338 if (PTRS_PER_PMD
> 1 && (type
& VMI_PAGE_PDP
))
341 if (boot_allocations_applied
) {
342 struct page
*page
= pfn_to_page(pfn
);
343 if (type
!= VMI_PAGE_NORMAL
)
346 BUG_ON(page
->type
== VMI_PAGE_NORMAL
);
347 page
->type
= type
& ~(VMI_PAGE_ZEROED
| VMI_PAGE_CLONE
);
348 if (type
& VMI_PAGE_ZEROED
)
349 check_zeroed_page(pfn
, type
, page
);
351 record_page_type(pfn
, type
);
355 static void vmi_check_page_type(u32 pfn
, int type
)
357 /* PAE can have multiple roots per page - skip checks */
358 if (PTRS_PER_PMD
> 1 && (type
& VMI_PAGE_PDP
))
361 type
&= ~(VMI_PAGE_ZEROED
| VMI_PAGE_CLONE
);
362 if (boot_allocations_applied
) {
363 struct page
*page
= pfn_to_page(pfn
);
364 BUG_ON((page
->type
^ type
) & VMI_PAGE_PAE
);
365 BUG_ON(type
== VMI_PAGE_NORMAL
&& page
->type
);
366 BUG_ON((type
& page
->type
) == 0);
370 #define vmi_set_page_type(p,t) do { } while (0)
371 #define vmi_check_page_type(p,t) do { } while (0)
374 static void vmi_map_pt_hook(int type
, pte_t
*va
, u32 pfn
)
377 * Internally, the VMI ROM must map virtual addresses to physical
378 * addresses for processing MMU updates. By the time MMU updates
379 * are issued, this information is typically already lost.
380 * Fortunately, the VMI provides a cache of mapping slots for active
383 * We use slot zero for the linear mapping of physical memory, and
384 * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
386 * args: SLOT VA COUNT PFN
388 BUG_ON(type
!= KM_PTE0
&& type
!= KM_PTE1
);
389 vmi_ops
.set_linear_mapping((type
- KM_PTE0
)+1, (u32
)va
, 1, pfn
);
392 static void vmi_allocate_pt(u32 pfn
)
394 vmi_set_page_type(pfn
, VMI_PAGE_L1
);
395 vmi_ops
.allocate_page(pfn
, VMI_PAGE_L1
, 0, 0, 0);
398 static void vmi_allocate_pd(u32 pfn
)
401 * This call comes in very early, before mem_map is setup.
402 * It is called only for swapper_pg_dir, which already has
405 vmi_set_page_type(pfn
, VMI_PAGE_L2
);
406 vmi_ops
.allocate_page(pfn
, VMI_PAGE_L2
, 0, 0, 0);
409 static void vmi_allocate_pd_clone(u32 pfn
, u32 clonepfn
, u32 start
, u32 count
)
411 vmi_set_page_type(pfn
, VMI_PAGE_L2
| VMI_PAGE_CLONE
);
412 vmi_check_page_type(clonepfn
, VMI_PAGE_L2
);
413 vmi_ops
.allocate_page(pfn
, VMI_PAGE_L2
| VMI_PAGE_CLONE
, clonepfn
, start
, count
);
416 static void vmi_release_pt(u32 pfn
)
418 vmi_ops
.release_page(pfn
, VMI_PAGE_L1
);
419 vmi_set_page_type(pfn
, VMI_PAGE_NORMAL
);
422 static void vmi_release_pd(u32 pfn
)
424 vmi_ops
.release_page(pfn
, VMI_PAGE_L2
);
425 vmi_set_page_type(pfn
, VMI_PAGE_NORMAL
);
429 * Helper macros for MMU update flags. We can defer updates until a flush
430 * or page invalidation only if the update is to the current address space
431 * (otherwise, there is no flush). We must check against init_mm, since
432 * this could be a kernel update, which usually passes init_mm, although
433 * sometimes this check can be skipped if we know the particular function
434 * is only called on user mode PTEs. We could change the kernel to pass
435 * current->active_mm here, but in particular, I was unsure if changing
436 * mm/highmem.c to do this would still be correct on other architectures.
438 #define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \
439 (!mustbeuser && (mm) == &init_mm))
440 #define vmi_flags_addr(mm, addr, level, user) \
441 ((level) | (is_current_as(mm, user) ? \
442 (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
443 #define vmi_flags_addr_defer(mm, addr, level, user) \
444 ((level) | (is_current_as(mm, user) ? \
445 (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
447 static void vmi_update_pte(struct mm_struct
*mm
, u32 addr
, pte_t
*ptep
)
449 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
450 vmi_ops
.update_pte(ptep
, vmi_flags_addr(mm
, addr
, VMI_PAGE_PT
, 0));
453 static void vmi_update_pte_defer(struct mm_struct
*mm
, u32 addr
, pte_t
*ptep
)
455 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
456 vmi_ops
.update_pte(ptep
, vmi_flags_addr_defer(mm
, addr
, VMI_PAGE_PT
, 0));
459 static void vmi_set_pte(pte_t
*ptep
, pte_t pte
)
461 /* XXX because of set_pmd_pte, this can be called on PT or PD layers */
462 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
| VMI_PAGE_PD
);
463 vmi_ops
.set_pte(pte
, ptep
, VMI_PAGE_PT
);
466 static void vmi_set_pte_at(struct mm_struct
*mm
, u32 addr
, pte_t
*ptep
, pte_t pte
)
468 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
469 vmi_ops
.set_pte(pte
, ptep
, vmi_flags_addr(mm
, addr
, VMI_PAGE_PT
, 0));
472 static void vmi_set_pmd(pmd_t
*pmdp
, pmd_t pmdval
)
474 #ifdef CONFIG_X86_PAE
475 const pte_t pte
= { pmdval
.pmd
, pmdval
.pmd
>> 32 };
476 vmi_check_page_type(__pa(pmdp
) >> PAGE_SHIFT
, VMI_PAGE_PMD
);
478 const pte_t pte
= { pmdval
.pud
.pgd
.pgd
};
479 vmi_check_page_type(__pa(pmdp
) >> PAGE_SHIFT
, VMI_PAGE_PGD
);
481 vmi_ops
.set_pte(pte
, (pte_t
*)pmdp
, VMI_PAGE_PD
);
484 #ifdef CONFIG_X86_PAE
486 static void vmi_set_pte_atomic(pte_t
*ptep
, pte_t pteval
)
489 * XXX This is called from set_pmd_pte, but at both PT
490 * and PD layers so the VMI_PAGE_PT flag is wrong. But
491 * it is only called for large page mapping changes,
492 * the Xen backend, doesn't support large pages, and the
493 * ESX backend doesn't depend on the flag.
495 set_64bit((unsigned long long *)ptep
,pte_val(pteval
));
496 vmi_ops
.update_pte(ptep
, VMI_PAGE_PT
);
499 static void vmi_set_pte_present(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
, pte_t pte
)
501 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
502 vmi_ops
.set_pte(pte
, ptep
, vmi_flags_addr_defer(mm
, addr
, VMI_PAGE_PT
, 1));
505 static void vmi_set_pud(pud_t
*pudp
, pud_t pudval
)
508 const pte_t pte
= { pudval
.pgd
.pgd
, pudval
.pgd
.pgd
>> 32 };
509 vmi_check_page_type(__pa(pudp
) >> PAGE_SHIFT
, VMI_PAGE_PGD
);
510 vmi_ops
.set_pte(pte
, (pte_t
*)pudp
, VMI_PAGE_PDP
);
513 static void vmi_pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
515 const pte_t pte
= { 0 };
516 vmi_check_page_type(__pa(ptep
) >> PAGE_SHIFT
, VMI_PAGE_PTE
);
517 vmi_ops
.set_pte(pte
, ptep
, vmi_flags_addr(mm
, addr
, VMI_PAGE_PT
, 0));
520 void vmi_pmd_clear(pmd_t
*pmd
)
522 const pte_t pte
= { 0 };
523 vmi_check_page_type(__pa(pmd
) >> PAGE_SHIFT
, VMI_PAGE_PMD
);
524 vmi_ops
.set_pte(pte
, (pte_t
*)pmd
, VMI_PAGE_PD
);
529 extern void setup_pda(void);
531 static void __devinit
532 vmi_startup_ipi_hook(int phys_apicid
, unsigned long start_eip
,
533 unsigned long start_esp
)
535 struct vmi_ap_state ap
;
537 /* Default everything to zero. This is fine for most GPRs. */
538 memset(&ap
, 0, sizeof(struct vmi_ap_state
));
540 ap
.gdtr_limit
= GDT_SIZE
- 1;
541 ap
.gdtr_base
= (unsigned long) get_cpu_gdt_table(phys_apicid
);
543 ap
.idtr_limit
= IDT_ENTRIES
* 8 - 1;
544 ap
.idtr_base
= (unsigned long) idt_table
;
549 ap
.eip
= (unsigned long) start_eip
;
551 ap
.esp
= (unsigned long) start_esp
;
555 ap
.fs
= __KERNEL_PDA
;
562 #ifdef CONFIG_X86_PAE
563 /* efer should match BSP efer. */
566 rdmsr(MSR_EFER
, l
, h
);
567 ap
.efer
= (unsigned long long) h
<< 32 | l
;
571 ap
.cr3
= __pa(swapper_pg_dir
);
572 /* Protected mode, paging, AM, WP, NE, MP. */
574 ap
.cr4
= mmu_cr4_features
;
575 vmi_ops
.set_initial_ap_state((u32
)&ap
, phys_apicid
);
579 static inline int __init
check_vmi_rom(struct vrom_header
*rom
)
581 struct pci_header
*pci
;
582 struct pnp_header
*pnp
;
583 const char *manufacturer
= "UNKNOWN";
584 const char *product
= "UNKNOWN";
585 const char *license
= "unspecified";
587 if (rom
->rom_signature
!= 0xaa55)
589 if (rom
->vrom_signature
!= VMI_SIGNATURE
)
591 if (rom
->api_version_maj
!= VMI_API_REV_MAJOR
||
592 rom
->api_version_min
+1 < VMI_API_REV_MINOR
+1) {
593 printk(KERN_WARNING
"VMI: Found mismatched rom version %d.%d\n",
594 rom
->api_version_maj
,
595 rom
->api_version_min
);
600 * Relying on the VMI_SIGNATURE field is not 100% safe, so check
601 * the PCI header and device type to make sure this is really a
604 if (!rom
->pci_header_offs
) {
605 printk(KERN_WARNING
"VMI: ROM does not contain PCI header.\n");
609 pci
= (struct pci_header
*)((char *)rom
+rom
->pci_header_offs
);
610 if (pci
->vendorID
!= PCI_VENDOR_ID_VMWARE
||
611 pci
->deviceID
!= PCI_DEVICE_ID_VMWARE_VMI
) {
612 /* Allow it to run... anyways, but warn */
613 printk(KERN_WARNING
"VMI: ROM from unknown manufacturer\n");
616 if (rom
->pnp_header_offs
) {
617 pnp
= (struct pnp_header
*)((char *)rom
+rom
->pnp_header_offs
);
618 if (pnp
->manufacturer_offset
)
619 manufacturer
= (const char *)rom
+pnp
->manufacturer_offset
;
620 if (pnp
->product_offset
)
621 product
= (const char *)rom
+pnp
->product_offset
;
624 if (rom
->license_offs
)
625 license
= (char *)rom
+rom
->license_offs
;
627 printk(KERN_INFO
"VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n",
628 manufacturer
, product
,
629 rom
->api_version_maj
, rom
->api_version_min
,
630 pci
->rom_version_maj
, pci
->rom_version_min
);
632 license_gplok
= license_is_gpl_compatible(license
);
633 if (!license_gplok
) {
634 printk(KERN_WARNING
"VMI: ROM license '%s' taints kernel... "
635 "inlining disabled\n",
637 add_taint(TAINT_PROPRIETARY_MODULE
);
643 * Probe for the VMI option ROM
645 static inline int __init
probe_vmi_rom(void)
649 /* VMI ROM is in option ROM area, check signature */
650 for (base
= 0xC0000; base
< 0xE0000; base
+= 2048) {
651 struct vrom_header
*romstart
;
652 romstart
= (struct vrom_header
*)isa_bus_to_virt(base
);
653 if (check_vmi_rom(romstart
)) {
662 * VMI setup common to all processors
664 void vmi_bringup(void)
666 /* We must establish the lowmem mapping for MMU ops to work */
667 if (vmi_ops
.set_linear_mapping
)
668 vmi_ops
.set_linear_mapping(0, __PAGE_OFFSET
, max_low_pfn
, 0);
672 * Return a pointer to a VMI function or NULL if unimplemented
674 static void *vmi_get_function(int vmicall
)
677 const struct vmi_relocation_info
*rel
= (struct vmi_relocation_info
*)&reloc
;
678 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, vmicall
);
679 BUG_ON(rel
->type
== VMI_RELOCATION_JUMP_REL
);
680 if (rel
->type
== VMI_RELOCATION_CALL_REL
)
681 return (void *)rel
->eip
;
687 * Helper macro for making the VMI paravirt-ops fill code readable.
688 * For unimplemented operations, fall back to default, unless nop
689 * is returned by the ROM.
691 #define para_fill(opname, vmicall) \
693 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
694 VMI_CALL_##vmicall); \
695 if (rel->type != VMI_RELOCATION_NONE) { \
696 BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); \
697 paravirt_ops.opname = (void *)rel->eip; \
698 } else if (rel->type == VMI_RELOCATION_NOP) \
699 paravirt_ops.opname = (void *)vmi_nop; \
703 * Helper macro for making the VMI paravirt-ops fill code readable.
704 * For cached operations which do not match the VMI ROM ABI and must
705 * go through a tranlation stub. Ignore NOPs, since it is not clear
706 * a NOP * VMI function corresponds to a NOP paravirt-op when the
707 * functions are not in 1-1 correspondence.
709 #define para_wrap(opname, wrapper, cache, vmicall) \
711 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
712 VMI_CALL_##vmicall); \
713 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \
714 if (rel->type == VMI_RELOCATION_CALL_REL) { \
715 paravirt_ops.opname = wrapper; \
716 vmi_ops.cache = (void *)rel->eip; \
722 * Activate the VMI interface and switch into paravirtualized mode
724 static inline int __init
activate_vmi(void)
728 const struct vmi_relocation_info
*rel
= (struct vmi_relocation_info
*)&reloc
;
730 if (call_vrom_func(vmi_rom
, vmi_init
) != 0) {
731 printk(KERN_ERR
"VMI ROM failed to initialize!");
734 savesegment(cs
, kernel_cs
);
736 paravirt_ops
.paravirt_enabled
= 1;
737 paravirt_ops
.kernel_rpl
= kernel_cs
& SEGMENT_RPL_MASK
;
739 paravirt_ops
.patch
= vmi_patch
;
740 paravirt_ops
.name
= "vmi";
743 * Many of these operations are ABI compatible with VMI.
744 * This means we can fill in the paravirt-ops with direct
745 * pointers into the VMI ROM. If the calling convention for
746 * these operations changes, this code needs to be updated.
749 * CPUID paravirt-op uses pointers, not the native ISA
750 * halt has no VMI equivalent; all VMI halts are "safe"
751 * no MSR support yet - just trap and emulate. VMI uses the
752 * same ABI as the native ISA, but Linux wants exceptions
753 * from bogus MSR read / write handled
754 * rdpmc is not yet used in Linux
757 /* CPUID is special, so very special it gets wrapped like a present */
758 para_wrap(cpuid
, vmi_cpuid
, cpuid
, CPUID
);
760 para_fill(clts
, CLTS
);
761 para_fill(get_debugreg
, GetDR
);
762 para_fill(set_debugreg
, SetDR
);
763 para_fill(read_cr0
, GetCR0
);
764 para_fill(read_cr2
, GetCR2
);
765 para_fill(read_cr3
, GetCR3
);
766 para_fill(read_cr4
, GetCR4
);
767 para_fill(write_cr0
, SetCR0
);
768 para_fill(write_cr2
, SetCR2
);
769 para_fill(write_cr3
, SetCR3
);
770 para_fill(write_cr4
, SetCR4
);
771 para_fill(save_fl
, GetInterruptMask
);
772 para_fill(restore_fl
, SetInterruptMask
);
773 para_fill(irq_disable
, DisableInterrupts
);
774 para_fill(irq_enable
, EnableInterrupts
);
776 /* irq_save_disable !!! sheer pain */
777 patch_offset(&irq_save_disable_callout
[IRQ_PATCH_INT_MASK
],
778 (char *)paravirt_ops
.save_fl
);
779 patch_offset(&irq_save_disable_callout
[IRQ_PATCH_DISABLE
],
780 (char *)paravirt_ops
.irq_disable
);
782 para_fill(wbinvd
, WBINVD
);
783 para_fill(read_tsc
, RDTSC
);
785 /* The following we emulate with trap and emulate for now */
786 /* paravirt_ops.read_msr = vmi_rdmsr */
787 /* paravirt_ops.write_msr = vmi_wrmsr */
788 /* paravirt_ops.rdpmc = vmi_rdpmc */
790 /* TR interface doesn't pass TR value, wrap */
791 para_wrap(load_tr_desc
, vmi_set_tr
, set_tr
, SetTR
);
793 /* LDT is special, too */
794 para_wrap(set_ldt
, vmi_set_ldt
, _set_ldt
, SetLDT
);
796 para_fill(load_gdt
, SetGDT
);
797 para_fill(load_idt
, SetIDT
);
798 para_fill(store_gdt
, GetGDT
);
799 para_fill(store_idt
, GetIDT
);
800 para_fill(store_tr
, GetTR
);
801 paravirt_ops
.load_tls
= vmi_load_tls
;
802 para_fill(write_ldt_entry
, WriteLDTEntry
);
803 para_fill(write_gdt_entry
, WriteGDTEntry
);
804 para_fill(write_idt_entry
, WriteIDTEntry
);
805 para_wrap(load_esp0
, vmi_load_esp0
, set_kernel_stack
, UpdateKernelStack
);
806 para_fill(set_iopl_mask
, SetIOPLMask
);
807 para_fill(io_delay
, IODelay
);
808 para_fill(set_lazy_mode
, SetLazyMode
);
810 /* user and kernel flush are just handled with different flags to FlushTLB */
811 para_wrap(flush_tlb_user
, vmi_flush_tlb_user
, flush_tlb
, FlushTLB
);
812 para_wrap(flush_tlb_kernel
, vmi_flush_tlb_kernel
, flush_tlb
, FlushTLB
);
813 para_fill(flush_tlb_single
, InvalPage
);
816 * Until a standard flag format can be agreed on, we need to
817 * implement these as wrappers in Linux. Get the VMI ROM
818 * function pointers for the two backend calls.
820 #ifdef CONFIG_X86_PAE
821 vmi_ops
.set_pte
= vmi_get_function(VMI_CALL_SetPxELong
);
822 vmi_ops
.update_pte
= vmi_get_function(VMI_CALL_UpdatePxELong
);
824 vmi_ops
.set_pte
= vmi_get_function(VMI_CALL_SetPxE
);
825 vmi_ops
.update_pte
= vmi_get_function(VMI_CALL_UpdatePxE
);
828 if (vmi_ops
.set_pte
) {
829 paravirt_ops
.set_pte
= vmi_set_pte
;
830 paravirt_ops
.set_pte_at
= vmi_set_pte_at
;
831 paravirt_ops
.set_pmd
= vmi_set_pmd
;
832 #ifdef CONFIG_X86_PAE
833 paravirt_ops
.set_pte_atomic
= vmi_set_pte_atomic
;
834 paravirt_ops
.set_pte_present
= vmi_set_pte_present
;
835 paravirt_ops
.set_pud
= vmi_set_pud
;
836 paravirt_ops
.pte_clear
= vmi_pte_clear
;
837 paravirt_ops
.pmd_clear
= vmi_pmd_clear
;
841 if (vmi_ops
.update_pte
) {
842 paravirt_ops
.pte_update
= vmi_update_pte
;
843 paravirt_ops
.pte_update_defer
= vmi_update_pte_defer
;
846 vmi_ops
.allocate_page
= vmi_get_function(VMI_CALL_AllocatePage
);
847 if (vmi_ops
.allocate_page
) {
848 paravirt_ops
.alloc_pt
= vmi_allocate_pt
;
849 paravirt_ops
.alloc_pd
= vmi_allocate_pd
;
850 paravirt_ops
.alloc_pd_clone
= vmi_allocate_pd_clone
;
853 vmi_ops
.release_page
= vmi_get_function(VMI_CALL_ReleasePage
);
854 if (vmi_ops
.release_page
) {
855 paravirt_ops
.release_pt
= vmi_release_pt
;
856 paravirt_ops
.release_pd
= vmi_release_pd
;
858 para_wrap(map_pt_hook
, vmi_map_pt_hook
, set_linear_mapping
,
862 * These MUST always be patched. Don't support indirect jumps
863 * through these operations, as the VMI interface may use either
864 * a jump or a call to get to these operations, depending on
865 * the backend. They are performance critical anyway, so requiring
866 * a patch is not a big problem.
868 paravirt_ops
.irq_enable_sysexit
= (void *)0xfeedbab0;
869 paravirt_ops
.iret
= (void *)0xbadbab0;
872 para_wrap(startup_ipi_hook
, vmi_startup_ipi_hook
, set_initial_ap_state
, SetInitialAPState
);
875 #ifdef CONFIG_X86_LOCAL_APIC
876 para_fill(apic_read
, APICRead
);
877 para_fill(apic_write
, APICWrite
);
878 para_fill(apic_write_atomic
, APICWrite
);
882 * Check for VMI timer functionality by probing for a cycle frequency method
884 reloc
= call_vrom_long_func(vmi_rom
, get_reloc
, VMI_CALL_GetCycleFrequency
);
885 if (!disable_vmi_timer
&& rel
->type
!= VMI_RELOCATION_NONE
) {
886 vmi_timer_ops
.get_cycle_frequency
= (void *)rel
->eip
;
887 vmi_timer_ops
.get_cycle_counter
=
888 vmi_get_function(VMI_CALL_GetCycleCounter
);
889 vmi_timer_ops
.get_wallclock
=
890 vmi_get_function(VMI_CALL_GetWallclockTime
);
891 vmi_timer_ops
.wallclock_updated
=
892 vmi_get_function(VMI_CALL_WallclockUpdated
);
893 vmi_timer_ops
.set_alarm
= vmi_get_function(VMI_CALL_SetAlarm
);
894 vmi_timer_ops
.cancel_alarm
=
895 vmi_get_function(VMI_CALL_CancelAlarm
);
896 paravirt_ops
.time_init
= vmi_time_init
;
897 paravirt_ops
.get_wallclock
= vmi_get_wallclock
;
898 paravirt_ops
.set_wallclock
= vmi_set_wallclock
;
899 #ifdef CONFIG_X86_LOCAL_APIC
900 paravirt_ops
.setup_boot_clock
= vmi_timer_setup_boot_alarm
;
901 paravirt_ops
.setup_secondary_clock
= vmi_timer_setup_secondary_alarm
;
903 paravirt_ops
.get_scheduled_cycles
= vmi_get_sched_cycles
;
904 paravirt_ops
.get_cpu_khz
= vmi_cpu_khz
;
906 /* We have true wallclock functions; disable CMOS clock sync */
907 no_sync_cmos_clock
= 1;
910 disable_vmi_timer
= 1;
913 /* No idle HZ mode only works if VMI timer and no idle is enabled */
914 if (disable_noidle
|| disable_vmi_timer
)
915 para_fill(safe_halt
, Halt
);
917 para_wrap(safe_halt
, vmi_safe_halt
, halt
, Halt
);
920 * Alternative instruction rewriting doesn't happen soon enough
921 * to convert VMI_IRET to a call instead of a jump; so we have
922 * to do this before IRQs get reenabled. Fortunately, it is
925 apply_paravirt(__start_parainstructions
, __stop_parainstructions
);
934 void __init
vmi_init(void)
941 check_vmi_rom(vmi_rom
);
943 /* In case probing for or validating the ROM failed, basil */
947 reserve_top_address(-vmi_rom
->virtual_top
);
949 local_irq_save(flags
);
952 #ifdef CONFIG_X86_IO_APIC
953 /* This is virtual hardware; timer routing is wired correctly */
956 local_irq_restore(flags
& X86_EFLAGS_IF
);
959 static int __init
parse_vmi(char *arg
)
964 if (!strcmp(arg
, "disable_pge")) {
965 clear_bit(X86_FEATURE_PGE
, boot_cpu_data
.x86_capability
);
967 } else if (!strcmp(arg
, "disable_pse")) {
968 clear_bit(X86_FEATURE_PSE
, boot_cpu_data
.x86_capability
);
970 } else if (!strcmp(arg
, "disable_sep")) {
971 clear_bit(X86_FEATURE_SEP
, boot_cpu_data
.x86_capability
);
973 } else if (!strcmp(arg
, "disable_tsc")) {
974 clear_bit(X86_FEATURE_TSC
, boot_cpu_data
.x86_capability
);
976 } else if (!strcmp(arg
, "disable_mtrr")) {
977 clear_bit(X86_FEATURE_MTRR
, boot_cpu_data
.x86_capability
);
979 } else if (!strcmp(arg
, "disable_timer")) {
980 disable_vmi_timer
= 1;
982 } else if (!strcmp(arg
, "disable_noidle"))
987 early_param("vmi", parse_vmi
);