x86: optimize lock prefix switching to run less frequently
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / alternative.c
blob318a4f9b7ececcaec148e77089234f16bee309c5
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/spinlock.h>
4 #include <linux/list.h>
5 #include <linux/kprobes.h>
6 #include <linux/mm.h>
7 #include <linux/vmalloc.h>
8 #include <asm/alternative.h>
9 #include <asm/sections.h>
10 #include <asm/pgtable.h>
11 #include <asm/mce.h>
12 #include <asm/nmi.h>
13 #include <asm/vsyscall.h>
15 #define MAX_PATCH_LEN (255-1)
17 #ifdef CONFIG_HOTPLUG_CPU
18 static int smp_alt_once;
20 static int __init bootonly(char *str)
22 smp_alt_once = 1;
23 return 1;
25 __setup("smp-alt-boot", bootonly);
26 #else
27 #define smp_alt_once 1
28 #endif
30 static int debug_alternative;
32 static int __init debug_alt(char *str)
34 debug_alternative = 1;
35 return 1;
37 __setup("debug-alternative", debug_alt);
39 static int noreplace_smp;
41 static int __init setup_noreplace_smp(char *str)
43 noreplace_smp = 1;
44 return 1;
46 __setup("noreplace-smp", setup_noreplace_smp);
48 #ifdef CONFIG_PARAVIRT
49 static int noreplace_paravirt = 0;
51 static int __init setup_noreplace_paravirt(char *str)
53 noreplace_paravirt = 1;
54 return 1;
56 __setup("noreplace-paravirt", setup_noreplace_paravirt);
57 #endif
59 #define DPRINTK(fmt, args...) if (debug_alternative) \
60 printk(KERN_DEBUG fmt, args)
62 #ifdef GENERIC_NOP1
63 /* Use inline assembly to define this because the nops are defined
64 as inline assembly strings in the include files and we cannot
65 get them easily into strings. */
66 asm("\t.section .rodata, \"a\"\nintelnops: "
67 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
68 GENERIC_NOP7 GENERIC_NOP8);
69 extern const unsigned char intelnops[];
70 static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = {
71 NULL,
72 intelnops,
73 intelnops + 1,
74 intelnops + 1 + 2,
75 intelnops + 1 + 2 + 3,
76 intelnops + 1 + 2 + 3 + 4,
77 intelnops + 1 + 2 + 3 + 4 + 5,
78 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
79 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
81 #endif
83 #ifdef K8_NOP1
84 asm("\t.section .rodata, \"a\"\nk8nops: "
85 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
86 K8_NOP7 K8_NOP8);
87 extern const unsigned char k8nops[];
88 static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = {
89 NULL,
90 k8nops,
91 k8nops + 1,
92 k8nops + 1 + 2,
93 k8nops + 1 + 2 + 3,
94 k8nops + 1 + 2 + 3 + 4,
95 k8nops + 1 + 2 + 3 + 4 + 5,
96 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
97 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
99 #endif
101 #ifdef K7_NOP1
102 asm("\t.section .rodata, \"a\"\nk7nops: "
103 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
104 K7_NOP7 K7_NOP8);
105 extern const unsigned char k7nops[];
106 static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = {
107 NULL,
108 k7nops,
109 k7nops + 1,
110 k7nops + 1 + 2,
111 k7nops + 1 + 2 + 3,
112 k7nops + 1 + 2 + 3 + 4,
113 k7nops + 1 + 2 + 3 + 4 + 5,
114 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
115 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
117 #endif
119 #ifdef P6_NOP1
120 asm("\t.section .rodata, \"a\"\np6nops: "
121 P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
122 P6_NOP7 P6_NOP8);
123 extern const unsigned char p6nops[];
124 static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = {
125 NULL,
126 p6nops,
127 p6nops + 1,
128 p6nops + 1 + 2,
129 p6nops + 1 + 2 + 3,
130 p6nops + 1 + 2 + 3 + 4,
131 p6nops + 1 + 2 + 3 + 4 + 5,
132 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
133 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
135 #endif
137 #ifdef CONFIG_X86_64
139 extern char __vsyscall_0;
140 static inline const unsigned char*const * find_nop_table(void)
142 return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
143 boot_cpu_data.x86 < 6 ? k8_nops : p6_nops;
146 #else /* CONFIG_X86_64 */
148 static const struct nop {
149 int cpuid;
150 const unsigned char *const *noptable;
151 } noptypes[] = {
152 { X86_FEATURE_K8, k8_nops },
153 { X86_FEATURE_K7, k7_nops },
154 { X86_FEATURE_P4, p6_nops },
155 { X86_FEATURE_P3, p6_nops },
156 { -1, NULL }
159 static const unsigned char*const * find_nop_table(void)
161 const unsigned char *const *noptable = intel_nops;
162 int i;
164 for (i = 0; noptypes[i].cpuid >= 0; i++) {
165 if (boot_cpu_has(noptypes[i].cpuid)) {
166 noptable = noptypes[i].noptable;
167 break;
170 return noptable;
173 #endif /* CONFIG_X86_64 */
175 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
176 static void add_nops(void *insns, unsigned int len)
178 const unsigned char *const *noptable = find_nop_table();
180 while (len > 0) {
181 unsigned int noplen = len;
182 if (noplen > ASM_NOP_MAX)
183 noplen = ASM_NOP_MAX;
184 memcpy(insns, noptable[noplen], noplen);
185 insns += noplen;
186 len -= noplen;
190 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
191 extern u8 *__smp_locks[], *__smp_locks_end[];
193 /* Replace instructions with better alternatives for this CPU type.
194 This runs before SMP is initialized to avoid SMP problems with
195 self modifying code. This implies that assymetric systems where
196 APs have less capabilities than the boot processor are not handled.
197 Tough. Make sure you disable such features by hand. */
199 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
201 struct alt_instr *a;
202 char insnbuf[MAX_PATCH_LEN];
204 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
205 for (a = start; a < end; a++) {
206 u8 *instr = a->instr;
207 BUG_ON(a->replacementlen > a->instrlen);
208 BUG_ON(a->instrlen > sizeof(insnbuf));
209 if (!boot_cpu_has(a->cpuid))
210 continue;
211 #ifdef CONFIG_X86_64
212 /* vsyscall code is not mapped yet. resolve it manually. */
213 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
214 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
215 DPRINTK("%s: vsyscall fixup: %p => %p\n",
216 __FUNCTION__, a->instr, instr);
218 #endif
219 memcpy(insnbuf, a->replacement, a->replacementlen);
220 add_nops(insnbuf + a->replacementlen,
221 a->instrlen - a->replacementlen);
222 text_poke(instr, insnbuf, a->instrlen);
226 #ifdef CONFIG_SMP
228 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
230 u8 **ptr;
232 for (ptr = start; ptr < end; ptr++) {
233 if (*ptr < text)
234 continue;
235 if (*ptr > text_end)
236 continue;
237 text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */
241 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
243 u8 **ptr;
244 char insn[1];
246 if (noreplace_smp)
247 return;
249 add_nops(insn, 1);
250 for (ptr = start; ptr < end; ptr++) {
251 if (*ptr < text)
252 continue;
253 if (*ptr > text_end)
254 continue;
255 text_poke(*ptr, insn, 1);
259 struct smp_alt_module {
260 /* what is this ??? */
261 struct module *mod;
262 char *name;
264 /* ptrs to lock prefixes */
265 u8 **locks;
266 u8 **locks_end;
268 /* .text segment, needed to avoid patching init code ;) */
269 u8 *text;
270 u8 *text_end;
272 struct list_head next;
274 static LIST_HEAD(smp_alt_modules);
275 static DEFINE_SPINLOCK(smp_alt);
276 static int smp_mode = 1; /* protected by smp_alt */
278 void alternatives_smp_module_add(struct module *mod, char *name,
279 void *locks, void *locks_end,
280 void *text, void *text_end)
282 struct smp_alt_module *smp;
283 unsigned long flags;
285 if (noreplace_smp)
286 return;
288 if (smp_alt_once) {
289 if (boot_cpu_has(X86_FEATURE_UP))
290 alternatives_smp_unlock(locks, locks_end,
291 text, text_end);
292 return;
295 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
296 if (NULL == smp)
297 return; /* we'll run the (safe but slow) SMP code then ... */
299 smp->mod = mod;
300 smp->name = name;
301 smp->locks = locks;
302 smp->locks_end = locks_end;
303 smp->text = text;
304 smp->text_end = text_end;
305 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
306 __FUNCTION__, smp->locks, smp->locks_end,
307 smp->text, smp->text_end, smp->name);
309 spin_lock_irqsave(&smp_alt, flags);
310 list_add_tail(&smp->next, &smp_alt_modules);
311 if (boot_cpu_has(X86_FEATURE_UP))
312 alternatives_smp_unlock(smp->locks, smp->locks_end,
313 smp->text, smp->text_end);
314 spin_unlock_irqrestore(&smp_alt, flags);
317 void alternatives_smp_module_del(struct module *mod)
319 struct smp_alt_module *item;
320 unsigned long flags;
322 if (smp_alt_once || noreplace_smp)
323 return;
325 spin_lock_irqsave(&smp_alt, flags);
326 list_for_each_entry(item, &smp_alt_modules, next) {
327 if (mod != item->mod)
328 continue;
329 list_del(&item->next);
330 spin_unlock_irqrestore(&smp_alt, flags);
331 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
332 kfree(item);
333 return;
335 spin_unlock_irqrestore(&smp_alt, flags);
338 void alternatives_smp_switch(int smp)
340 struct smp_alt_module *mod;
341 unsigned long flags;
343 #ifdef CONFIG_LOCKDEP
345 * A not yet fixed binutils section handling bug prevents
346 * alternatives-replacement from working reliably, so turn
347 * it off:
349 printk("lockdep: not fixing up alternatives.\n");
350 return;
351 #endif
353 if (noreplace_smp || smp_alt_once)
354 return;
355 BUG_ON(!smp && (num_online_cpus() > 1));
357 spin_lock_irqsave(&smp_alt, flags);
360 * Avoid unnecessary switches because it forces JIT based VMs to
361 * throw away all cached translations, which can be quite costly.
363 if (smp == smp_mode) {
364 /* nothing */
365 } else if (smp) {
366 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
367 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
368 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
369 list_for_each_entry(mod, &smp_alt_modules, next)
370 alternatives_smp_lock(mod->locks, mod->locks_end,
371 mod->text, mod->text_end);
372 } else {
373 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
374 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
375 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
376 list_for_each_entry(mod, &smp_alt_modules, next)
377 alternatives_smp_unlock(mod->locks, mod->locks_end,
378 mod->text, mod->text_end);
380 smp_mode = smp;
381 spin_unlock_irqrestore(&smp_alt, flags);
384 #endif
386 #ifdef CONFIG_PARAVIRT
387 void apply_paravirt(struct paravirt_patch_site *start,
388 struct paravirt_patch_site *end)
390 struct paravirt_patch_site *p;
391 char insnbuf[MAX_PATCH_LEN];
393 if (noreplace_paravirt)
394 return;
396 for (p = start; p < end; p++) {
397 unsigned int used;
399 BUG_ON(p->len > MAX_PATCH_LEN);
400 /* prep the buffer with the original instructions */
401 memcpy(insnbuf, p->instr, p->len);
402 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
403 (unsigned long)p->instr, p->len);
405 BUG_ON(used > p->len);
407 /* Pad the rest with nops */
408 add_nops(insnbuf + used, p->len - used);
409 text_poke(p->instr, insnbuf, p->len);
412 extern struct paravirt_patch_site __start_parainstructions[],
413 __stop_parainstructions[];
414 #endif /* CONFIG_PARAVIRT */
416 void __init alternative_instructions(void)
418 unsigned long flags;
420 /* The patching is not fully atomic, so try to avoid local interruptions
421 that might execute the to be patched code.
422 Other CPUs are not running. */
423 stop_nmi();
424 #ifdef CONFIG_X86_MCE
425 stop_mce();
426 #endif
428 local_irq_save(flags);
429 apply_alternatives(__alt_instructions, __alt_instructions_end);
431 /* switch to patch-once-at-boottime-only mode and free the
432 * tables in case we know the number of CPUs will never ever
433 * change */
434 #ifdef CONFIG_HOTPLUG_CPU
435 if (num_possible_cpus() < 2)
436 smp_alt_once = 1;
437 #endif
439 #ifdef CONFIG_SMP
440 if (smp_alt_once) {
441 if (1 == num_possible_cpus()) {
442 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
443 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
444 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
446 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
447 _text, _etext);
449 } else {
450 alternatives_smp_module_add(NULL, "core kernel",
451 __smp_locks, __smp_locks_end,
452 _text, _etext);
454 /* Only switch to UP mode if we don't immediately boot others */
455 if (num_possible_cpus() == 1 || setup_max_cpus <= 1)
456 alternatives_smp_switch(0);
458 #endif
459 apply_paravirt(__parainstructions, __parainstructions_end);
460 local_irq_restore(flags);
462 if (smp_alt_once)
463 free_init_pages("SMP alternatives",
464 (unsigned long)__smp_locks,
465 (unsigned long)__smp_locks_end);
467 restart_nmi();
468 #ifdef CONFIG_X86_MCE
469 restart_mce();
470 #endif
474 * Warning:
475 * When you use this code to patch more than one byte of an instruction
476 * you need to make sure that other CPUs cannot execute this code in parallel.
477 * Also no thread must be currently preempted in the middle of these instructions.
478 * And on the local CPU you need to be protected again NMI or MCE handlers
479 * seeing an inconsistent instruction while you patch.
481 void __kprobes text_poke(void *addr, unsigned char *opcode, int len)
483 memcpy(addr, opcode, len);
484 sync_core();
485 /* Could also do a CLFLUSH here to speed up CPU recovery; but
486 that causes hangs on some VIA CPUs. */