mm: page_alloc: embed OOM killing naturally into allocation slowpath
[linux-2.6/btrfs-unstable.git] / arch / x86 / kernel / paravirt.c
blob548d25f00c90ad010379b0b36884cabee82b6e02
1 /* Paravirtualization interfaces
2 Copyright (C) 2006 Rusty Russell IBM Corporation
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
21 #include <linux/errno.h>
22 #include <linux/module.h>
23 #include <linux/efi.h>
24 #include <linux/bcd.h>
25 #include <linux/highmem.h>
26 #include <linux/kprobes.h>
28 #include <asm/bug.h>
29 #include <asm/paravirt.h>
30 #include <asm/debugreg.h>
31 #include <asm/desc.h>
32 #include <asm/setup.h>
33 #include <asm/pgtable.h>
34 #include <asm/time.h>
35 #include <asm/pgalloc.h>
36 #include <asm/irq.h>
37 #include <asm/delay.h>
38 #include <asm/fixmap.h>
39 #include <asm/apic.h>
40 #include <asm/tlbflush.h>
41 #include <asm/timer.h>
42 #include <asm/special_insns.h>
44 /* nop stub */
45 void _paravirt_nop(void)
49 /* identity function, which can be inlined */
50 u32 _paravirt_ident_32(u32 x)
52 return x;
55 u64 _paravirt_ident_64(u64 x)
57 return x;
60 void __init default_banner(void)
62 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
63 pv_info.name);
66 /* Undefined instruction for dealing with missing ops pointers. */
67 static const unsigned char ud2a[] = { 0x0f, 0x0b };
69 unsigned paravirt_patch_nop(void)
71 return 0;
74 unsigned paravirt_patch_ignore(unsigned len)
76 return len;
79 struct branch {
80 unsigned char opcode;
81 u32 delta;
82 } __attribute__((packed));
84 unsigned paravirt_patch_call(void *insnbuf,
85 const void *target, u16 tgt_clobbers,
86 unsigned long addr, u16 site_clobbers,
87 unsigned len)
89 struct branch *b = insnbuf;
90 unsigned long delta = (unsigned long)target - (addr+5);
92 if (tgt_clobbers & ~site_clobbers)
93 return len; /* target would clobber too much for this site */
94 if (len < 5)
95 return len; /* call too long for patch site */
97 b->opcode = 0xe8; /* call */
98 b->delta = delta;
99 BUILD_BUG_ON(sizeof(*b) != 5);
101 return 5;
104 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
105 unsigned long addr, unsigned len)
107 struct branch *b = insnbuf;
108 unsigned long delta = (unsigned long)target - (addr+5);
110 if (len < 5)
111 return len; /* call too long for patch site */
113 b->opcode = 0xe9; /* jmp */
114 b->delta = delta;
116 return 5;
119 /* Neat trick to map patch type back to the call within the
120 * corresponding structure. */
121 static void *get_call_destination(u8 type)
123 struct paravirt_patch_template tmpl = {
124 .pv_init_ops = pv_init_ops,
125 .pv_time_ops = pv_time_ops,
126 .pv_cpu_ops = pv_cpu_ops,
127 .pv_irq_ops = pv_irq_ops,
128 .pv_apic_ops = pv_apic_ops,
129 .pv_mmu_ops = pv_mmu_ops,
130 #ifdef CONFIG_PARAVIRT_SPINLOCKS
131 .pv_lock_ops = pv_lock_ops,
132 #endif
134 return *((void **)&tmpl + type);
137 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
138 unsigned long addr, unsigned len)
140 void *opfunc = get_call_destination(type);
141 unsigned ret;
143 if (opfunc == NULL)
144 /* If there's no function, patch it with a ud2a (BUG) */
145 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
146 else if (opfunc == _paravirt_nop)
147 /* If the operation is a nop, then nop the callsite */
148 ret = paravirt_patch_nop();
150 /* identity functions just return their single argument */
151 else if (opfunc == _paravirt_ident_32)
152 ret = paravirt_patch_ident_32(insnbuf, len);
153 else if (opfunc == _paravirt_ident_64)
154 ret = paravirt_patch_ident_64(insnbuf, len);
156 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
157 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
158 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
159 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
160 /* If operation requires a jmp, then jmp */
161 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
162 else
163 /* Otherwise call the function; assume target could
164 clobber any caller-save reg */
165 ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
166 addr, clobbers, len);
168 return ret;
171 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
172 const char *start, const char *end)
174 unsigned insn_len = end - start;
176 if (insn_len > len || start == NULL)
177 insn_len = len;
178 else
179 memcpy(insnbuf, start, insn_len);
181 return insn_len;
184 static void native_flush_tlb(void)
186 __native_flush_tlb();
190 * Global pages have to be flushed a bit differently. Not a real
191 * performance problem because this does not happen often.
193 static void native_flush_tlb_global(void)
195 __native_flush_tlb_global();
198 static void native_flush_tlb_single(unsigned long addr)
200 __native_flush_tlb_single(addr);
203 struct static_key paravirt_steal_enabled;
204 struct static_key paravirt_steal_rq_enabled;
206 static u64 native_steal_clock(int cpu)
208 return 0;
211 /* These are in entry.S */
212 extern void native_iret(void);
213 extern void native_irq_enable_sysexit(void);
214 extern void native_usergs_sysret32(void);
215 extern void native_usergs_sysret64(void);
217 static struct resource reserve_ioports = {
218 .start = 0,
219 .end = IO_SPACE_LIMIT,
220 .name = "paravirt-ioport",
221 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
225 * Reserve the whole legacy IO space to prevent any legacy drivers
226 * from wasting time probing for their hardware. This is a fairly
227 * brute-force approach to disabling all non-virtual drivers.
229 * Note that this must be called very early to have any effect.
231 int paravirt_disable_iospace(void)
233 return request_resource(&ioport_resource, &reserve_ioports);
236 static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
238 static inline void enter_lazy(enum paravirt_lazy_mode mode)
240 BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
242 this_cpu_write(paravirt_lazy_mode, mode);
245 static void leave_lazy(enum paravirt_lazy_mode mode)
247 BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
249 this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
252 void paravirt_enter_lazy_mmu(void)
254 enter_lazy(PARAVIRT_LAZY_MMU);
257 void paravirt_leave_lazy_mmu(void)
259 leave_lazy(PARAVIRT_LAZY_MMU);
262 void paravirt_flush_lazy_mmu(void)
264 preempt_disable();
266 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
267 arch_leave_lazy_mmu_mode();
268 arch_enter_lazy_mmu_mode();
271 preempt_enable();
274 void paravirt_start_context_switch(struct task_struct *prev)
276 BUG_ON(preemptible());
278 if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
279 arch_leave_lazy_mmu_mode();
280 set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
282 enter_lazy(PARAVIRT_LAZY_CPU);
285 void paravirt_end_context_switch(struct task_struct *next)
287 BUG_ON(preemptible());
289 leave_lazy(PARAVIRT_LAZY_CPU);
291 if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
292 arch_enter_lazy_mmu_mode();
295 enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
297 if (in_interrupt())
298 return PARAVIRT_LAZY_NONE;
300 return this_cpu_read(paravirt_lazy_mode);
303 struct pv_info pv_info = {
304 .name = "bare hardware",
305 .paravirt_enabled = 0,
306 .kernel_rpl = 0,
307 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
309 #ifdef CONFIG_X86_64
310 .extra_user_64bit_cs = __USER_CS,
311 #endif
314 struct pv_init_ops pv_init_ops = {
315 .patch = native_patch,
318 struct pv_time_ops pv_time_ops = {
319 .sched_clock = native_sched_clock,
320 .steal_clock = native_steal_clock,
323 __visible struct pv_irq_ops pv_irq_ops = {
324 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
325 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
326 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
327 .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
328 .safe_halt = native_safe_halt,
329 .halt = native_halt,
330 #ifdef CONFIG_X86_64
331 .adjust_exception_frame = paravirt_nop,
332 #endif
335 __visible struct pv_cpu_ops pv_cpu_ops = {
336 .cpuid = native_cpuid,
337 .get_debugreg = native_get_debugreg,
338 .set_debugreg = native_set_debugreg,
339 .clts = native_clts,
340 .read_cr0 = native_read_cr0,
341 .write_cr0 = native_write_cr0,
342 .read_cr4 = native_read_cr4,
343 .read_cr4_safe = native_read_cr4_safe,
344 .write_cr4 = native_write_cr4,
345 #ifdef CONFIG_X86_64
346 .read_cr8 = native_read_cr8,
347 .write_cr8 = native_write_cr8,
348 #endif
349 .wbinvd = native_wbinvd,
350 .read_msr = native_read_msr_safe,
351 .write_msr = native_write_msr_safe,
352 .read_tsc = native_read_tsc,
353 .read_pmc = native_read_pmc,
354 .read_tscp = native_read_tscp,
355 .load_tr_desc = native_load_tr_desc,
356 .set_ldt = native_set_ldt,
357 .load_gdt = native_load_gdt,
358 .load_idt = native_load_idt,
359 .store_idt = native_store_idt,
360 .store_tr = native_store_tr,
361 .load_tls = native_load_tls,
362 #ifdef CONFIG_X86_64
363 .load_gs_index = native_load_gs_index,
364 #endif
365 .write_ldt_entry = native_write_ldt_entry,
366 .write_gdt_entry = native_write_gdt_entry,
367 .write_idt_entry = native_write_idt_entry,
369 .alloc_ldt = paravirt_nop,
370 .free_ldt = paravirt_nop,
372 .load_sp0 = native_load_sp0,
374 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
375 .irq_enable_sysexit = native_irq_enable_sysexit,
376 #endif
377 #ifdef CONFIG_X86_64
378 #ifdef CONFIG_IA32_EMULATION
379 .usergs_sysret32 = native_usergs_sysret32,
380 #endif
381 .usergs_sysret64 = native_usergs_sysret64,
382 #endif
383 .iret = native_iret,
384 .swapgs = native_swapgs,
386 .set_iopl_mask = native_set_iopl_mask,
387 .io_delay = native_io_delay,
389 .start_context_switch = paravirt_nop,
390 .end_context_switch = paravirt_nop,
393 /* At this point, native_get/set_debugreg has real function entries */
394 NOKPROBE_SYMBOL(native_get_debugreg);
395 NOKPROBE_SYMBOL(native_set_debugreg);
396 NOKPROBE_SYMBOL(native_load_idt);
398 struct pv_apic_ops pv_apic_ops = {
399 #ifdef CONFIG_X86_LOCAL_APIC
400 .startup_ipi_hook = paravirt_nop,
401 #endif
404 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
405 /* 32-bit pagetable entries */
406 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
407 #else
408 /* 64-bit pagetable entries */
409 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
410 #endif
412 struct pv_mmu_ops pv_mmu_ops = {
414 .read_cr2 = native_read_cr2,
415 .write_cr2 = native_write_cr2,
416 .read_cr3 = native_read_cr3,
417 .write_cr3 = native_write_cr3,
419 .flush_tlb_user = native_flush_tlb,
420 .flush_tlb_kernel = native_flush_tlb_global,
421 .flush_tlb_single = native_flush_tlb_single,
422 .flush_tlb_others = native_flush_tlb_others,
424 .pgd_alloc = __paravirt_pgd_alloc,
425 .pgd_free = paravirt_nop,
427 .alloc_pte = paravirt_nop,
428 .alloc_pmd = paravirt_nop,
429 .alloc_pud = paravirt_nop,
430 .release_pte = paravirt_nop,
431 .release_pmd = paravirt_nop,
432 .release_pud = paravirt_nop,
434 .set_pte = native_set_pte,
435 .set_pte_at = native_set_pte_at,
436 .set_pmd = native_set_pmd,
437 .set_pmd_at = native_set_pmd_at,
438 .pte_update = paravirt_nop,
439 .pte_update_defer = paravirt_nop,
440 .pmd_update = paravirt_nop,
441 .pmd_update_defer = paravirt_nop,
443 .ptep_modify_prot_start = __ptep_modify_prot_start,
444 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
446 #if PAGETABLE_LEVELS >= 3
447 #ifdef CONFIG_X86_PAE
448 .set_pte_atomic = native_set_pte_atomic,
449 .pte_clear = native_pte_clear,
450 .pmd_clear = native_pmd_clear,
451 #endif
452 .set_pud = native_set_pud,
454 .pmd_val = PTE_IDENT,
455 .make_pmd = PTE_IDENT,
457 #if PAGETABLE_LEVELS == 4
458 .pud_val = PTE_IDENT,
459 .make_pud = PTE_IDENT,
461 .set_pgd = native_set_pgd,
462 #endif
463 #endif /* PAGETABLE_LEVELS >= 3 */
465 .pte_val = PTE_IDENT,
466 .pgd_val = PTE_IDENT,
468 .make_pte = PTE_IDENT,
469 .make_pgd = PTE_IDENT,
471 .dup_mmap = paravirt_nop,
472 .exit_mmap = paravirt_nop,
473 .activate_mm = paravirt_nop,
475 .lazy_mode = {
476 .enter = paravirt_nop,
477 .leave = paravirt_nop,
478 .flush = paravirt_nop,
481 .set_fixmap = native_set_fixmap,
484 EXPORT_SYMBOL_GPL(pv_time_ops);
485 EXPORT_SYMBOL (pv_cpu_ops);
486 EXPORT_SYMBOL (pv_mmu_ops);
487 EXPORT_SYMBOL_GPL(pv_apic_ops);
488 EXPORT_SYMBOL_GPL(pv_info);
489 EXPORT_SYMBOL (pv_irq_ops);