1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/stringify.h>
6 #include <linux/kprobes.h>
8 #include <linux/vmalloc.h>
9 #include <linux/memory.h>
10 #include <linux/stop_machine.h>
11 #include <linux/slab.h>
12 #include <asm/alternative.h>
13 #include <asm/sections.h>
14 #include <asm/pgtable.h>
17 #include <asm/vsyscall.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
21 #include <asm/fixmap.h>
23 #define MAX_PATCH_LEN (255-1)
25 #ifdef CONFIG_HOTPLUG_CPU
26 static int smp_alt_once
;
28 static int __init
bootonly(char *str
)
33 __setup("smp-alt-boot", bootonly
);
35 #define smp_alt_once 1
38 static int __initdata_or_module debug_alternative
;
40 static int __init
debug_alt(char *str
)
42 debug_alternative
= 1;
45 __setup("debug-alternative", debug_alt
);
47 static int noreplace_smp
;
49 static int __init
setup_noreplace_smp(char *str
)
54 __setup("noreplace-smp", setup_noreplace_smp
);
56 #ifdef CONFIG_PARAVIRT
57 static int __initdata_or_module noreplace_paravirt
= 0;
59 static int __init
setup_noreplace_paravirt(char *str
)
61 noreplace_paravirt
= 1;
64 __setup("noreplace-paravirt", setup_noreplace_paravirt
);
67 #define DPRINTK(fmt, args...) if (debug_alternative) \
68 printk(KERN_DEBUG fmt, args)
71 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
72 * that correspond to that nop. Getting from one nop to the next, we
73 * add to the array the offset that is equal to the sum of all sizes of
74 * nops preceding the one we are after.
76 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
77 * nice symmetry of sizes of the previous nops.
79 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
80 static const unsigned char intelnops
[] =
92 static const unsigned char * const intel_nops
[ASM_NOP_MAX
+2] =
98 intelnops
+ 1 + 2 + 3,
99 intelnops
+ 1 + 2 + 3 + 4,
100 intelnops
+ 1 + 2 + 3 + 4 + 5,
101 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6,
102 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
103 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
108 static const unsigned char k8nops
[] =
120 static const unsigned char * const k8_nops
[ASM_NOP_MAX
+2] =
127 k8nops
+ 1 + 2 + 3 + 4,
128 k8nops
+ 1 + 2 + 3 + 4 + 5,
129 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6,
130 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
131 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
135 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
136 static const unsigned char k7nops
[] =
148 static const unsigned char * const k7_nops
[ASM_NOP_MAX
+2] =
155 k7nops
+ 1 + 2 + 3 + 4,
156 k7nops
+ 1 + 2 + 3 + 4 + 5,
157 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6,
158 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
159 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
164 static const unsigned char __initconst_or_module p6nops
[] =
176 static const unsigned char * const p6_nops
[ASM_NOP_MAX
+2] =
183 p6nops
+ 1 + 2 + 3 + 4,
184 p6nops
+ 1 + 2 + 3 + 4 + 5,
185 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6,
186 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
187 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
191 /* Initialize these to a safe default */
193 const unsigned char * const *ideal_nops
= p6_nops
;
195 const unsigned char * const *ideal_nops
= intel_nops
;
198 void __init
arch_init_ideal_nops(void)
200 switch (boot_cpu_data
.x86_vendor
) {
201 case X86_VENDOR_INTEL
:
203 * Due to a decoder implementation quirk, some
204 * specific Intel CPUs actually perform better with
205 * the "k8_nops" than with the SDM-recommended NOPs.
207 if (boot_cpu_data
.x86
== 6 &&
208 boot_cpu_data
.x86_model
>= 0x0f &&
209 boot_cpu_data
.x86_model
!= 0x1c &&
210 boot_cpu_data
.x86_model
!= 0x26 &&
211 boot_cpu_data
.x86_model
!= 0x27 &&
212 boot_cpu_data
.x86_model
< 0x30) {
213 ideal_nops
= k8_nops
;
214 } else if (boot_cpu_has(X86_FEATURE_NOPL
)) {
215 ideal_nops
= p6_nops
;
218 ideal_nops
= k8_nops
;
220 ideal_nops
= intel_nops
;
226 ideal_nops
= k8_nops
;
228 if (boot_cpu_has(X86_FEATURE_K8
))
229 ideal_nops
= k8_nops
;
230 else if (boot_cpu_has(X86_FEATURE_K7
))
231 ideal_nops
= k7_nops
;
233 ideal_nops
= intel_nops
;
238 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
239 static void __init_or_module
add_nops(void *insns
, unsigned int len
)
242 unsigned int noplen
= len
;
243 if (noplen
> ASM_NOP_MAX
)
244 noplen
= ASM_NOP_MAX
;
245 memcpy(insns
, ideal_nops
[noplen
], noplen
);
251 extern struct alt_instr __alt_instructions
[], __alt_instructions_end
[];
252 extern s32 __smp_locks
[], __smp_locks_end
[];
253 extern char __vsyscall_0
;
254 void *text_poke_early(void *addr
, const void *opcode
, size_t len
);
256 /* Replace instructions with better alternatives for this CPU type.
257 This runs before SMP is initialized to avoid SMP problems with
258 self modifying code. This implies that asymmetric systems where
259 APs have less capabilities than the boot processor are not handled.
260 Tough. Make sure you disable such features by hand. */
262 void __init_or_module
apply_alternatives(struct alt_instr
*start
,
263 struct alt_instr
*end
)
266 u8 insnbuf
[MAX_PATCH_LEN
];
268 DPRINTK("%s: alt table %p -> %p\n", __func__
, start
, end
);
270 * The scan order should be from start to end. A later scanned
271 * alternative code can overwrite a previous scanned alternative code.
272 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
275 * So be careful if you want to change the scan order to any other
278 for (a
= start
; a
< end
; a
++) {
279 u8
*instr
= a
->instr
;
280 BUG_ON(a
->replacementlen
> a
->instrlen
);
281 BUG_ON(a
->instrlen
> sizeof(insnbuf
));
282 BUG_ON(a
->cpuid
>= NCAPINTS
*32);
283 if (!boot_cpu_has(a
->cpuid
))
286 /* vsyscall code is not mapped yet. resolve it manually. */
287 if (instr
>= (u8
*)VSYSCALL_START
&& instr
< (u8
*)VSYSCALL_END
) {
288 instr
= __va(instr
- (u8
*)VSYSCALL_START
+ (u8
*)__pa_symbol(&__vsyscall_0
));
289 DPRINTK("%s: vsyscall fixup: %p => %p\n",
290 __func__
, a
->instr
, instr
);
293 memcpy(insnbuf
, a
->replacement
, a
->replacementlen
);
294 if (*insnbuf
== 0xe8 && a
->replacementlen
== 5)
295 *(s32
*)(insnbuf
+ 1) += a
->replacement
- a
->instr
;
296 add_nops(insnbuf
+ a
->replacementlen
,
297 a
->instrlen
- a
->replacementlen
);
298 text_poke_early(instr
, insnbuf
, a
->instrlen
);
304 static void alternatives_smp_lock(const s32
*start
, const s32
*end
,
305 u8
*text
, u8
*text_end
)
309 mutex_lock(&text_mutex
);
310 for (poff
= start
; poff
< end
; poff
++) {
311 u8
*ptr
= (u8
*)poff
+ *poff
;
313 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
315 /* turn DS segment override prefix into lock prefix */
317 text_poke(ptr
, ((unsigned char []){0xf0}), 1);
319 mutex_unlock(&text_mutex
);
322 static void alternatives_smp_unlock(const s32
*start
, const s32
*end
,
323 u8
*text
, u8
*text_end
)
330 mutex_lock(&text_mutex
);
331 for (poff
= start
; poff
< end
; poff
++) {
332 u8
*ptr
= (u8
*)poff
+ *poff
;
334 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
336 /* turn lock prefix into DS segment override prefix */
338 text_poke(ptr
, ((unsigned char []){0x3E}), 1);
340 mutex_unlock(&text_mutex
);
343 struct smp_alt_module
{
344 /* what is this ??? */
348 /* ptrs to lock prefixes */
350 const s32
*locks_end
;
352 /* .text segment, needed to avoid patching init code ;) */
356 struct list_head next
;
358 static LIST_HEAD(smp_alt_modules
);
359 static DEFINE_MUTEX(smp_alt
);
360 static int smp_mode
= 1; /* protected by smp_alt */
362 void __init_or_module
alternatives_smp_module_add(struct module
*mod
,
364 void *locks
, void *locks_end
,
365 void *text
, void *text_end
)
367 struct smp_alt_module
*smp
;
373 if (boot_cpu_has(X86_FEATURE_UP
))
374 alternatives_smp_unlock(locks
, locks_end
,
379 smp
= kzalloc(sizeof(*smp
), GFP_KERNEL
);
381 return; /* we'll run the (safe but slow) SMP code then ... */
386 smp
->locks_end
= locks_end
;
388 smp
->text_end
= text_end
;
389 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
390 __func__
, smp
->locks
, smp
->locks_end
,
391 smp
->text
, smp
->text_end
, smp
->name
);
393 mutex_lock(&smp_alt
);
394 list_add_tail(&smp
->next
, &smp_alt_modules
);
395 if (boot_cpu_has(X86_FEATURE_UP
))
396 alternatives_smp_unlock(smp
->locks
, smp
->locks_end
,
397 smp
->text
, smp
->text_end
);
398 mutex_unlock(&smp_alt
);
401 void __init_or_module
alternatives_smp_module_del(struct module
*mod
)
403 struct smp_alt_module
*item
;
405 if (smp_alt_once
|| noreplace_smp
)
408 mutex_lock(&smp_alt
);
409 list_for_each_entry(item
, &smp_alt_modules
, next
) {
410 if (mod
!= item
->mod
)
412 list_del(&item
->next
);
413 mutex_unlock(&smp_alt
);
414 DPRINTK("%s: %s\n", __func__
, item
->name
);
418 mutex_unlock(&smp_alt
);
421 bool skip_smp_alternatives
;
422 void alternatives_smp_switch(int smp
)
424 struct smp_alt_module
*mod
;
426 #ifdef CONFIG_LOCKDEP
428 * Older binutils section handling bug prevented
429 * alternatives-replacement from working reliably.
431 * If this still occurs then you should see a hang
432 * or crash shortly after this line:
434 printk("lockdep: fixing up alternatives.\n");
437 if (noreplace_smp
|| smp_alt_once
|| skip_smp_alternatives
)
439 BUG_ON(!smp
&& (num_online_cpus() > 1));
441 mutex_lock(&smp_alt
);
444 * Avoid unnecessary switches because it forces JIT based VMs to
445 * throw away all cached translations, which can be quite costly.
447 if (smp
== smp_mode
) {
450 printk(KERN_INFO
"SMP alternatives: switching to SMP code\n");
451 clear_cpu_cap(&boot_cpu_data
, X86_FEATURE_UP
);
452 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP
);
453 list_for_each_entry(mod
, &smp_alt_modules
, next
)
454 alternatives_smp_lock(mod
->locks
, mod
->locks_end
,
455 mod
->text
, mod
->text_end
);
457 printk(KERN_INFO
"SMP alternatives: switching to UP code\n");
458 set_cpu_cap(&boot_cpu_data
, X86_FEATURE_UP
);
459 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP
);
460 list_for_each_entry(mod
, &smp_alt_modules
, next
)
461 alternatives_smp_unlock(mod
->locks
, mod
->locks_end
,
462 mod
->text
, mod
->text_end
);
465 mutex_unlock(&smp_alt
);
468 /* Return 1 if the address range is reserved for smp-alternatives */
469 int alternatives_text_reserved(void *start
, void *end
)
471 struct smp_alt_module
*mod
;
473 u8
*text_start
= start
;
476 list_for_each_entry(mod
, &smp_alt_modules
, next
) {
477 if (mod
->text
> text_end
|| mod
->text_end
< text_start
)
479 for (poff
= mod
->locks
; poff
< mod
->locks_end
; poff
++) {
480 const u8
*ptr
= (const u8
*)poff
+ *poff
;
482 if (text_start
<= ptr
&& text_end
> ptr
)
491 #ifdef CONFIG_PARAVIRT
492 void __init_or_module
apply_paravirt(struct paravirt_patch_site
*start
,
493 struct paravirt_patch_site
*end
)
495 struct paravirt_patch_site
*p
;
496 char insnbuf
[MAX_PATCH_LEN
];
498 if (noreplace_paravirt
)
501 for (p
= start
; p
< end
; p
++) {
504 BUG_ON(p
->len
> MAX_PATCH_LEN
);
505 /* prep the buffer with the original instructions */
506 memcpy(insnbuf
, p
->instr
, p
->len
);
507 used
= pv_init_ops
.patch(p
->instrtype
, p
->clobbers
, insnbuf
,
508 (unsigned long)p
->instr
, p
->len
);
510 BUG_ON(used
> p
->len
);
512 /* Pad the rest with nops */
513 add_nops(insnbuf
+ used
, p
->len
- used
);
514 text_poke_early(p
->instr
, insnbuf
, p
->len
);
517 extern struct paravirt_patch_site __start_parainstructions
[],
518 __stop_parainstructions
[];
519 #endif /* CONFIG_PARAVIRT */
521 void __init
alternative_instructions(void)
523 /* The patching is not fully atomic, so try to avoid local interruptions
524 that might execute the to be patched code.
525 Other CPUs are not running. */
529 * Don't stop machine check exceptions while patching.
530 * MCEs only happen when something got corrupted and in this
531 * case we must do something about the corruption.
532 * Ignoring it is worse than a unlikely patching race.
533 * Also machine checks tend to be broadcast and if one CPU
534 * goes into machine check the others follow quickly, so we don't
535 * expect a machine check to cause undue problems during to code
539 apply_alternatives(__alt_instructions
, __alt_instructions_end
);
541 /* switch to patch-once-at-boottime-only mode and free the
542 * tables in case we know the number of CPUs will never ever
544 #ifdef CONFIG_HOTPLUG_CPU
545 if (num_possible_cpus() < 2)
551 if (1 == num_possible_cpus()) {
552 printk(KERN_INFO
"SMP alternatives: switching to UP code\n");
553 set_cpu_cap(&boot_cpu_data
, X86_FEATURE_UP
);
554 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP
);
556 alternatives_smp_unlock(__smp_locks
, __smp_locks_end
,
560 alternatives_smp_module_add(NULL
, "core kernel",
561 __smp_locks
, __smp_locks_end
,
564 /* Only switch to UP mode if we don't immediately boot others */
565 if (num_present_cpus() == 1 || setup_max_cpus
<= 1)
566 alternatives_smp_switch(0);
569 apply_paravirt(__parainstructions
, __parainstructions_end
);
572 free_init_pages("SMP alternatives",
573 (unsigned long)__smp_locks
,
574 (unsigned long)__smp_locks_end
);
580 * text_poke_early - Update instructions on a live kernel at boot time
581 * @addr: address to modify
582 * @opcode: source of the copy
583 * @len: length to copy
585 * When you use this code to patch more than one byte of an instruction
586 * you need to make sure that other CPUs cannot execute this code in parallel.
587 * Also no thread must be currently preempted in the middle of these
588 * instructions. And on the local CPU you need to be protected again NMI or MCE
589 * handlers seeing an inconsistent instruction while you patch.
591 void *__init_or_module
text_poke_early(void *addr
, const void *opcode
,
595 local_irq_save(flags
);
596 memcpy(addr
, opcode
, len
);
598 local_irq_restore(flags
);
599 /* Could also do a CLFLUSH here to speed up CPU recovery; but
600 that causes hangs on some VIA CPUs. */
605 * text_poke - Update instructions on a live kernel
606 * @addr: address to modify
607 * @opcode: source of the copy
608 * @len: length to copy
610 * Only atomic text poke/set should be allowed when not doing early patching.
611 * It means the size must be writable atomically and the address must be aligned
612 * in a way that permits an atomic write. It also makes sure we fit on a single
615 * Note: Must be called under text_mutex.
617 void *__kprobes
text_poke(void *addr
, const void *opcode
, size_t len
)
621 struct page
*pages
[2];
624 if (!core_kernel_text((unsigned long)addr
)) {
625 pages
[0] = vmalloc_to_page(addr
);
626 pages
[1] = vmalloc_to_page(addr
+ PAGE_SIZE
);
628 pages
[0] = virt_to_page(addr
);
629 WARN_ON(!PageReserved(pages
[0]));
630 pages
[1] = virt_to_page(addr
+ PAGE_SIZE
);
633 local_irq_save(flags
);
634 set_fixmap(FIX_TEXT_POKE0
, page_to_phys(pages
[0]));
636 set_fixmap(FIX_TEXT_POKE1
, page_to_phys(pages
[1]));
637 vaddr
= (char *)fix_to_virt(FIX_TEXT_POKE0
);
638 memcpy(&vaddr
[(unsigned long)addr
& ~PAGE_MASK
], opcode
, len
);
639 clear_fixmap(FIX_TEXT_POKE0
);
641 clear_fixmap(FIX_TEXT_POKE1
);
644 /* Could also do a CLFLUSH here to speed up CPU recovery; but
645 that causes hangs on some VIA CPUs. */
646 for (i
= 0; i
< len
; i
++)
647 BUG_ON(((char *)addr
)[i
] != ((char *)opcode
)[i
]);
648 local_irq_restore(flags
);
653 * Cross-modifying kernel text with stop_machine().
654 * This code originally comes from immediate value.
656 static atomic_t stop_machine_first
;
657 static int wrote_text
;
659 struct text_poke_params
{
660 struct text_poke_param
*params
;
664 static int __kprobes
stop_machine_text_poke(void *data
)
666 struct text_poke_params
*tpp
= data
;
667 struct text_poke_param
*p
;
670 if (atomic_dec_and_test(&stop_machine_first
)) {
671 for (i
= 0; i
< tpp
->nparams
; i
++) {
673 text_poke(p
->addr
, p
->opcode
, p
->len
);
675 smp_wmb(); /* Make sure other cpus see that this has run */
680 smp_mb(); /* Load wrote_text before following execution */
683 for (i
= 0; i
< tpp
->nparams
; i
++) {
685 flush_icache_range((unsigned long)p
->addr
,
686 (unsigned long)p
->addr
+ p
->len
);
689 * Intel Archiecture Software Developer's Manual section 7.1.3 specifies
690 * that a core serializing instruction such as "cpuid" should be
691 * executed on _each_ core before the new instruction is made visible.
698 * text_poke_smp - Update instructions on a live kernel on SMP
699 * @addr: address to modify
700 * @opcode: source of the copy
701 * @len: length to copy
703 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
704 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
705 * should be allowed, since stop_machine() does _not_ protect code against
708 * Note: Must be called under get_online_cpus() and text_mutex.
710 void *__kprobes
text_poke_smp(void *addr
, const void *opcode
, size_t len
)
712 struct text_poke_params tpp
;
713 struct text_poke_param p
;
720 atomic_set(&stop_machine_first
, 1);
722 /* Use __stop_machine() because the caller already got online_cpus. */
723 __stop_machine(stop_machine_text_poke
, (void *)&tpp
, cpu_online_mask
);
728 * text_poke_smp_batch - Update instructions on a live kernel on SMP
729 * @params: an array of text_poke parameters
730 * @n: the number of elements in params.
732 * Modify multi-byte instruction by using stop_machine() on SMP. Since the
733 * stop_machine() is heavy task, it is better to aggregate text_poke requests
734 * and do it once if possible.
736 * Note: Must be called under get_online_cpus() and text_mutex.
738 void __kprobes
text_poke_smp_batch(struct text_poke_param
*params
, int n
)
740 struct text_poke_params tpp
= {.params
= params
, .nparams
= n
};
742 atomic_set(&stop_machine_first
, 1);
744 __stop_machine(stop_machine_text_poke
, (void *)&tpp
, NULL
);