iommu/amd: Don't use MSI address range for DMA addresses
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / alternative.c
blob70237732a6c7c5f62f941d5df0594f7386ca6a10
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/stringify.h>
6 #include <linux/kprobes.h>
7 #include <linux/mm.h>
8 #include <linux/vmalloc.h>
9 #include <linux/memory.h>
10 #include <linux/stop_machine.h>
11 #include <linux/slab.h>
12 #include <asm/alternative.h>
13 #include <asm/sections.h>
14 #include <asm/pgtable.h>
15 #include <asm/mce.h>
16 #include <asm/nmi.h>
17 #include <asm/vsyscall.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
20 #include <asm/io.h>
21 #include <asm/fixmap.h>
23 #define MAX_PATCH_LEN (255-1)
25 #ifdef CONFIG_HOTPLUG_CPU
26 static int smp_alt_once;
28 static int __init bootonly(char *str)
30 smp_alt_once = 1;
31 return 1;
33 __setup("smp-alt-boot", bootonly);
34 #else
35 #define smp_alt_once 1
36 #endif
38 static int __initdata_or_module debug_alternative;
40 static int __init debug_alt(char *str)
42 debug_alternative = 1;
43 return 1;
45 __setup("debug-alternative", debug_alt);
47 static int noreplace_smp;
49 static int __init setup_noreplace_smp(char *str)
51 noreplace_smp = 1;
52 return 1;
54 __setup("noreplace-smp", setup_noreplace_smp);
56 #ifdef CONFIG_PARAVIRT
57 static int __initdata_or_module noreplace_paravirt = 0;
59 static int __init setup_noreplace_paravirt(char *str)
61 noreplace_paravirt = 1;
62 return 1;
64 __setup("noreplace-paravirt", setup_noreplace_paravirt);
65 #endif
67 #define DPRINTK(fmt, args...) if (debug_alternative) \
68 printk(KERN_DEBUG fmt, args)
70 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
71 /* Use inline assembly to define this because the nops are defined
72 as inline assembly strings in the include files and we cannot
73 get them easily into strings. */
74 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: "
75 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
76 GENERIC_NOP7 GENERIC_NOP8
77 "\t.previous");
78 extern const unsigned char intelnops[];
79 static const unsigned char *const __initconst_or_module
80 intel_nops[ASM_NOP_MAX+1] = {
81 NULL,
82 intelnops,
83 intelnops + 1,
84 intelnops + 1 + 2,
85 intelnops + 1 + 2 + 3,
86 intelnops + 1 + 2 + 3 + 4,
87 intelnops + 1 + 2 + 3 + 4 + 5,
88 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
89 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
91 #endif
93 #ifdef K8_NOP1
94 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: "
95 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
96 K8_NOP7 K8_NOP8
97 "\t.previous");
98 extern const unsigned char k8nops[];
99 static const unsigned char *const __initconst_or_module
100 k8_nops[ASM_NOP_MAX+1] = {
101 NULL,
102 k8nops,
103 k8nops + 1,
104 k8nops + 1 + 2,
105 k8nops + 1 + 2 + 3,
106 k8nops + 1 + 2 + 3 + 4,
107 k8nops + 1 + 2 + 3 + 4 + 5,
108 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
109 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
111 #endif
113 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
114 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: "
115 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
116 K7_NOP7 K7_NOP8
117 "\t.previous");
118 extern const unsigned char k7nops[];
119 static const unsigned char *const __initconst_or_module
120 k7_nops[ASM_NOP_MAX+1] = {
121 NULL,
122 k7nops,
123 k7nops + 1,
124 k7nops + 1 + 2,
125 k7nops + 1 + 2 + 3,
126 k7nops + 1 + 2 + 3 + 4,
127 k7nops + 1 + 2 + 3 + 4 + 5,
128 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
129 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
131 #endif
133 #ifdef P6_NOP1
134 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: "
135 P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
136 P6_NOP7 P6_NOP8
137 "\t.previous");
138 extern const unsigned char p6nops[];
139 static const unsigned char *const __initconst_or_module
140 p6_nops[ASM_NOP_MAX+1] = {
141 NULL,
142 p6nops,
143 p6nops + 1,
144 p6nops + 1 + 2,
145 p6nops + 1 + 2 + 3,
146 p6nops + 1 + 2 + 3 + 4,
147 p6nops + 1 + 2 + 3 + 4 + 5,
148 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
149 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
151 #endif
153 #ifdef CONFIG_X86_64
155 extern char __vsyscall_0;
156 static const unsigned char *const *__init_or_module find_nop_table(void)
158 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
159 boot_cpu_has(X86_FEATURE_NOPL))
160 return p6_nops;
161 else
162 return k8_nops;
165 #else /* CONFIG_X86_64 */
167 static const unsigned char *const *__init_or_module find_nop_table(void)
169 if (boot_cpu_has(X86_FEATURE_K8))
170 return k8_nops;
171 else if (boot_cpu_has(X86_FEATURE_K7))
172 return k7_nops;
173 else if (boot_cpu_has(X86_FEATURE_NOPL))
174 return p6_nops;
175 else
176 return intel_nops;
179 #endif /* CONFIG_X86_64 */
181 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
182 static void __init_or_module add_nops(void *insns, unsigned int len)
184 const unsigned char *const *noptable = find_nop_table();
186 while (len > 0) {
187 unsigned int noplen = len;
188 if (noplen > ASM_NOP_MAX)
189 noplen = ASM_NOP_MAX;
190 memcpy(insns, noptable[noplen], noplen);
191 insns += noplen;
192 len -= noplen;
196 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
197 extern s32 __smp_locks[], __smp_locks_end[];
198 static void *text_poke_early(void *addr, const void *opcode, size_t len);
200 /* Replace instructions with better alternatives for this CPU type.
201 This runs before SMP is initialized to avoid SMP problems with
202 self modifying code. This implies that assymetric systems where
203 APs have less capabilities than the boot processor are not handled.
204 Tough. Make sure you disable such features by hand. */
206 void __init_or_module apply_alternatives(struct alt_instr *start,
207 struct alt_instr *end)
209 struct alt_instr *a;
210 u8 insnbuf[MAX_PATCH_LEN];
212 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
213 for (a = start; a < end; a++) {
214 u8 *instr = a->instr;
215 BUG_ON(a->replacementlen > a->instrlen);
216 BUG_ON(a->instrlen > sizeof(insnbuf));
217 if (!boot_cpu_has(a->cpuid))
218 continue;
219 #ifdef CONFIG_X86_64
220 /* vsyscall code is not mapped yet. resolve it manually. */
221 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
222 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
223 DPRINTK("%s: vsyscall fixup: %p => %p\n",
224 __func__, a->instr, instr);
226 #endif
227 memcpy(insnbuf, a->replacement, a->replacementlen);
228 if (*insnbuf == 0xe8 && a->replacementlen == 5)
229 *(s32 *)(insnbuf + 1) += a->replacement - a->instr;
230 add_nops(insnbuf + a->replacementlen,
231 a->instrlen - a->replacementlen);
232 text_poke_early(instr, insnbuf, a->instrlen);
236 #ifdef CONFIG_SMP
238 static void alternatives_smp_lock(const s32 *start, const s32 *end,
239 u8 *text, u8 *text_end)
241 const s32 *poff;
243 mutex_lock(&text_mutex);
244 for (poff = start; poff < end; poff++) {
245 u8 *ptr = (u8 *)poff + *poff;
247 if (!*poff || ptr < text || ptr >= text_end)
248 continue;
249 /* turn DS segment override prefix into lock prefix */
250 if (*ptr == 0x3e)
251 text_poke(ptr, ((unsigned char []){0xf0}), 1);
253 mutex_unlock(&text_mutex);
256 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
257 u8 *text, u8 *text_end)
259 const s32 *poff;
261 if (noreplace_smp)
262 return;
264 mutex_lock(&text_mutex);
265 for (poff = start; poff < end; poff++) {
266 u8 *ptr = (u8 *)poff + *poff;
268 if (!*poff || ptr < text || ptr >= text_end)
269 continue;
270 /* turn lock prefix into DS segment override prefix */
271 if (*ptr == 0xf0)
272 text_poke(ptr, ((unsigned char []){0x3E}), 1);
274 mutex_unlock(&text_mutex);
277 struct smp_alt_module {
278 /* what is this ??? */
279 struct module *mod;
280 char *name;
282 /* ptrs to lock prefixes */
283 const s32 *locks;
284 const s32 *locks_end;
286 /* .text segment, needed to avoid patching init code ;) */
287 u8 *text;
288 u8 *text_end;
290 struct list_head next;
292 static LIST_HEAD(smp_alt_modules);
293 static DEFINE_MUTEX(smp_alt);
294 static int smp_mode = 1; /* protected by smp_alt */
296 void __init_or_module alternatives_smp_module_add(struct module *mod,
297 char *name,
298 void *locks, void *locks_end,
299 void *text, void *text_end)
301 struct smp_alt_module *smp;
303 if (noreplace_smp)
304 return;
306 if (smp_alt_once) {
307 if (boot_cpu_has(X86_FEATURE_UP))
308 alternatives_smp_unlock(locks, locks_end,
309 text, text_end);
310 return;
313 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
314 if (NULL == smp)
315 return; /* we'll run the (safe but slow) SMP code then ... */
317 smp->mod = mod;
318 smp->name = name;
319 smp->locks = locks;
320 smp->locks_end = locks_end;
321 smp->text = text;
322 smp->text_end = text_end;
323 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
324 __func__, smp->locks, smp->locks_end,
325 smp->text, smp->text_end, smp->name);
327 mutex_lock(&smp_alt);
328 list_add_tail(&smp->next, &smp_alt_modules);
329 if (boot_cpu_has(X86_FEATURE_UP))
330 alternatives_smp_unlock(smp->locks, smp->locks_end,
331 smp->text, smp->text_end);
332 mutex_unlock(&smp_alt);
335 void __init_or_module alternatives_smp_module_del(struct module *mod)
337 struct smp_alt_module *item;
339 if (smp_alt_once || noreplace_smp)
340 return;
342 mutex_lock(&smp_alt);
343 list_for_each_entry(item, &smp_alt_modules, next) {
344 if (mod != item->mod)
345 continue;
346 list_del(&item->next);
347 mutex_unlock(&smp_alt);
348 DPRINTK("%s: %s\n", __func__, item->name);
349 kfree(item);
350 return;
352 mutex_unlock(&smp_alt);
355 void alternatives_smp_switch(int smp)
357 struct smp_alt_module *mod;
359 #ifdef CONFIG_LOCKDEP
361 * Older binutils section handling bug prevented
362 * alternatives-replacement from working reliably.
364 * If this still occurs then you should see a hang
365 * or crash shortly after this line:
367 printk("lockdep: fixing up alternatives.\n");
368 #endif
370 if (noreplace_smp || smp_alt_once)
371 return;
372 BUG_ON(!smp && (num_online_cpus() > 1));
374 mutex_lock(&smp_alt);
377 * Avoid unnecessary switches because it forces JIT based VMs to
378 * throw away all cached translations, which can be quite costly.
380 if (smp == smp_mode) {
381 /* nothing */
382 } else if (smp) {
383 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
384 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
385 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
386 list_for_each_entry(mod, &smp_alt_modules, next)
387 alternatives_smp_lock(mod->locks, mod->locks_end,
388 mod->text, mod->text_end);
389 } else {
390 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
391 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
392 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
393 list_for_each_entry(mod, &smp_alt_modules, next)
394 alternatives_smp_unlock(mod->locks, mod->locks_end,
395 mod->text, mod->text_end);
397 smp_mode = smp;
398 mutex_unlock(&smp_alt);
401 /* Return 1 if the address range is reserved for smp-alternatives */
402 int alternatives_text_reserved(void *start, void *end)
404 struct smp_alt_module *mod;
405 const s32 *poff;
406 u8 *text_start = start;
407 u8 *text_end = end;
409 list_for_each_entry(mod, &smp_alt_modules, next) {
410 if (mod->text > text_end || mod->text_end < text_start)
411 continue;
412 for (poff = mod->locks; poff < mod->locks_end; poff++) {
413 const u8 *ptr = (const u8 *)poff + *poff;
415 if (text_start <= ptr && text_end > ptr)
416 return 1;
420 return 0;
422 #endif
424 #ifdef CONFIG_PARAVIRT
425 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
426 struct paravirt_patch_site *end)
428 struct paravirt_patch_site *p;
429 char insnbuf[MAX_PATCH_LEN];
431 if (noreplace_paravirt)
432 return;
434 for (p = start; p < end; p++) {
435 unsigned int used;
437 BUG_ON(p->len > MAX_PATCH_LEN);
438 /* prep the buffer with the original instructions */
439 memcpy(insnbuf, p->instr, p->len);
440 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
441 (unsigned long)p->instr, p->len);
443 BUG_ON(used > p->len);
445 /* Pad the rest with nops */
446 add_nops(insnbuf + used, p->len - used);
447 text_poke_early(p->instr, insnbuf, p->len);
450 extern struct paravirt_patch_site __start_parainstructions[],
451 __stop_parainstructions[];
452 #endif /* CONFIG_PARAVIRT */
454 void __init alternative_instructions(void)
456 /* The patching is not fully atomic, so try to avoid local interruptions
457 that might execute the to be patched code.
458 Other CPUs are not running. */
459 stop_nmi();
462 * Don't stop machine check exceptions while patching.
463 * MCEs only happen when something got corrupted and in this
464 * case we must do something about the corruption.
465 * Ignoring it is worse than a unlikely patching race.
466 * Also machine checks tend to be broadcast and if one CPU
467 * goes into machine check the others follow quickly, so we don't
468 * expect a machine check to cause undue problems during to code
469 * patching.
472 apply_alternatives(__alt_instructions, __alt_instructions_end);
474 /* switch to patch-once-at-boottime-only mode and free the
475 * tables in case we know the number of CPUs will never ever
476 * change */
477 #ifdef CONFIG_HOTPLUG_CPU
478 if (num_possible_cpus() < 2)
479 smp_alt_once = 1;
480 #endif
482 #ifdef CONFIG_SMP
483 if (smp_alt_once) {
484 if (1 == num_possible_cpus()) {
485 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
486 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
487 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
489 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
490 _text, _etext);
492 } else {
493 alternatives_smp_module_add(NULL, "core kernel",
494 __smp_locks, __smp_locks_end,
495 _text, _etext);
497 /* Only switch to UP mode if we don't immediately boot others */
498 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
499 alternatives_smp_switch(0);
501 #endif
502 apply_paravirt(__parainstructions, __parainstructions_end);
504 if (smp_alt_once)
505 free_init_pages("SMP alternatives",
506 (unsigned long)__smp_locks,
507 (unsigned long)__smp_locks_end);
509 restart_nmi();
513 * text_poke_early - Update instructions on a live kernel at boot time
514 * @addr: address to modify
515 * @opcode: source of the copy
516 * @len: length to copy
518 * When you use this code to patch more than one byte of an instruction
519 * you need to make sure that other CPUs cannot execute this code in parallel.
520 * Also no thread must be currently preempted in the middle of these
521 * instructions. And on the local CPU you need to be protected again NMI or MCE
522 * handlers seeing an inconsistent instruction while you patch.
524 static void *__init_or_module text_poke_early(void *addr, const void *opcode,
525 size_t len)
527 unsigned long flags;
528 local_irq_save(flags);
529 memcpy(addr, opcode, len);
530 sync_core();
531 local_irq_restore(flags);
532 /* Could also do a CLFLUSH here to speed up CPU recovery; but
533 that causes hangs on some VIA CPUs. */
534 return addr;
538 * text_poke - Update instructions on a live kernel
539 * @addr: address to modify
540 * @opcode: source of the copy
541 * @len: length to copy
543 * Only atomic text poke/set should be allowed when not doing early patching.
544 * It means the size must be writable atomically and the address must be aligned
545 * in a way that permits an atomic write. It also makes sure we fit on a single
546 * page.
548 * Note: Must be called under text_mutex.
550 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
552 unsigned long flags;
553 char *vaddr;
554 struct page *pages[2];
555 int i;
557 if (!core_kernel_text((unsigned long)addr)) {
558 pages[0] = vmalloc_to_page(addr);
559 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
560 } else {
561 pages[0] = virt_to_page(addr);
562 WARN_ON(!PageReserved(pages[0]));
563 pages[1] = virt_to_page(addr + PAGE_SIZE);
565 BUG_ON(!pages[0]);
566 local_irq_save(flags);
567 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
568 if (pages[1])
569 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
570 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
571 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
572 clear_fixmap(FIX_TEXT_POKE0);
573 if (pages[1])
574 clear_fixmap(FIX_TEXT_POKE1);
575 local_flush_tlb();
576 sync_core();
577 /* Could also do a CLFLUSH here to speed up CPU recovery; but
578 that causes hangs on some VIA CPUs. */
579 for (i = 0; i < len; i++)
580 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
581 local_irq_restore(flags);
582 return addr;
586 * Cross-modifying kernel text with stop_machine().
587 * This code originally comes from immediate value.
589 static atomic_t stop_machine_first;
590 static int wrote_text;
592 struct text_poke_params {
593 void *addr;
594 const void *opcode;
595 size_t len;
598 static int __kprobes stop_machine_text_poke(void *data)
600 struct text_poke_params *tpp = data;
602 if (atomic_dec_and_test(&stop_machine_first)) {
603 text_poke(tpp->addr, tpp->opcode, tpp->len);
604 smp_wmb(); /* Make sure other cpus see that this has run */
605 wrote_text = 1;
606 } else {
607 while (!wrote_text)
608 cpu_relax();
609 smp_mb(); /* Load wrote_text before following execution */
612 flush_icache_range((unsigned long)tpp->addr,
613 (unsigned long)tpp->addr + tpp->len);
614 return 0;
618 * text_poke_smp - Update instructions on a live kernel on SMP
619 * @addr: address to modify
620 * @opcode: source of the copy
621 * @len: length to copy
623 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
624 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
625 * should be allowed, since stop_machine() does _not_ protect code against
626 * NMI and MCE.
628 * Note: Must be called under get_online_cpus() and text_mutex.
630 void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
632 struct text_poke_params tpp;
634 tpp.addr = addr;
635 tpp.opcode = opcode;
636 tpp.len = len;
637 atomic_set(&stop_machine_first, 1);
638 wrote_text = 0;
639 stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
640 return addr;