[PATCH] i386: PARAVIRT: add common patching machinery
[linux-2.6/btrfs-unstable.git] / arch / i386 / kernel / alternative.c
blobc5d037c60950ffadbfbc0205c1aed8a04827391c
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/spinlock.h>
4 #include <linux/list.h>
5 #include <asm/alternative.h>
6 #include <asm/sections.h>
8 static int noreplace_smp = 0;
9 static int smp_alt_once = 0;
10 static int debug_alternative = 0;
12 static int __init bootonly(char *str)
14 smp_alt_once = 1;
15 return 1;
17 __setup("smp-alt-boot", bootonly);
19 static int __init debug_alt(char *str)
21 debug_alternative = 1;
22 return 1;
24 __setup("debug-alternative", debug_alt);
26 static int __init setup_noreplace_smp(char *str)
28 noreplace_smp = 1;
29 return 1;
31 __setup("noreplace-smp", setup_noreplace_smp);
34 #define DPRINTK(fmt, args...) if (debug_alternative) \
35 printk(KERN_DEBUG fmt, args)
37 #ifdef GENERIC_NOP1
38 /* Use inline assembly to define this because the nops are defined
39 as inline assembly strings in the include files and we cannot
40 get them easily into strings. */
41 asm("\t.data\nintelnops: "
42 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
43 GENERIC_NOP7 GENERIC_NOP8);
44 extern unsigned char intelnops[];
45 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
46 NULL,
47 intelnops,
48 intelnops + 1,
49 intelnops + 1 + 2,
50 intelnops + 1 + 2 + 3,
51 intelnops + 1 + 2 + 3 + 4,
52 intelnops + 1 + 2 + 3 + 4 + 5,
53 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
54 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
56 #endif
58 #ifdef K8_NOP1
59 asm("\t.data\nk8nops: "
60 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
61 K8_NOP7 K8_NOP8);
62 extern unsigned char k8nops[];
63 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
64 NULL,
65 k8nops,
66 k8nops + 1,
67 k8nops + 1 + 2,
68 k8nops + 1 + 2 + 3,
69 k8nops + 1 + 2 + 3 + 4,
70 k8nops + 1 + 2 + 3 + 4 + 5,
71 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
72 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
74 #endif
76 #ifdef K7_NOP1
77 asm("\t.data\nk7nops: "
78 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
79 K7_NOP7 K7_NOP8);
80 extern unsigned char k7nops[];
81 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
82 NULL,
83 k7nops,
84 k7nops + 1,
85 k7nops + 1 + 2,
86 k7nops + 1 + 2 + 3,
87 k7nops + 1 + 2 + 3 + 4,
88 k7nops + 1 + 2 + 3 + 4 + 5,
89 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
90 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
92 #endif
94 #ifdef CONFIG_X86_64
96 extern char __vsyscall_0;
97 static inline unsigned char** find_nop_table(void)
99 return k8_nops;
102 #else /* CONFIG_X86_64 */
104 static struct nop {
105 int cpuid;
106 unsigned char **noptable;
107 } noptypes[] = {
108 { X86_FEATURE_K8, k8_nops },
109 { X86_FEATURE_K7, k7_nops },
110 { -1, NULL }
113 static unsigned char** find_nop_table(void)
115 unsigned char **noptable = intel_nops;
116 int i;
118 for (i = 0; noptypes[i].cpuid >= 0; i++) {
119 if (boot_cpu_has(noptypes[i].cpuid)) {
120 noptable = noptypes[i].noptable;
121 break;
124 return noptable;
127 #endif /* CONFIG_X86_64 */
129 static void nop_out(void *insns, unsigned int len)
131 unsigned char **noptable = find_nop_table();
133 while (len > 0) {
134 unsigned int noplen = len;
135 if (noplen > ASM_NOP_MAX)
136 noplen = ASM_NOP_MAX;
137 memcpy(insns, noptable[noplen], noplen);
138 insns += noplen;
139 len -= noplen;
143 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
144 extern u8 *__smp_locks[], *__smp_locks_end[];
146 /* Replace instructions with better alternatives for this CPU type.
147 This runs before SMP is initialized to avoid SMP problems with
148 self modifying code. This implies that assymetric systems where
149 APs have less capabilities than the boot processor are not handled.
150 Tough. Make sure you disable such features by hand. */
152 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
154 struct alt_instr *a;
155 u8 *instr;
156 int diff;
158 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
159 for (a = start; a < end; a++) {
160 BUG_ON(a->replacementlen > a->instrlen);
161 if (!boot_cpu_has(a->cpuid))
162 continue;
163 instr = a->instr;
164 #ifdef CONFIG_X86_64
165 /* vsyscall code is not mapped yet. resolve it manually. */
166 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
167 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
168 DPRINTK("%s: vsyscall fixup: %p => %p\n",
169 __FUNCTION__, a->instr, instr);
171 #endif
172 memcpy(instr, a->replacement, a->replacementlen);
173 diff = a->instrlen - a->replacementlen;
174 nop_out(instr + a->replacementlen, diff);
178 #ifdef CONFIG_SMP
180 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
182 u8 **ptr;
184 for (ptr = start; ptr < end; ptr++) {
185 if (*ptr < text)
186 continue;
187 if (*ptr > text_end)
188 continue;
189 **ptr = 0xf0; /* lock prefix */
193 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
195 u8 **ptr;
197 if (noreplace_smp)
198 return;
200 for (ptr = start; ptr < end; ptr++) {
201 if (*ptr < text)
202 continue;
203 if (*ptr > text_end)
204 continue;
205 nop_out(*ptr, 1);
209 struct smp_alt_module {
210 /* what is this ??? */
211 struct module *mod;
212 char *name;
214 /* ptrs to lock prefixes */
215 u8 **locks;
216 u8 **locks_end;
218 /* .text segment, needed to avoid patching init code ;) */
219 u8 *text;
220 u8 *text_end;
222 struct list_head next;
224 static LIST_HEAD(smp_alt_modules);
225 static DEFINE_SPINLOCK(smp_alt);
227 void alternatives_smp_module_add(struct module *mod, char *name,
228 void *locks, void *locks_end,
229 void *text, void *text_end)
231 struct smp_alt_module *smp;
232 unsigned long flags;
234 if (noreplace_smp)
235 return;
237 if (smp_alt_once) {
238 if (boot_cpu_has(X86_FEATURE_UP))
239 alternatives_smp_unlock(locks, locks_end,
240 text, text_end);
241 return;
244 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
245 if (NULL == smp)
246 return; /* we'll run the (safe but slow) SMP code then ... */
248 smp->mod = mod;
249 smp->name = name;
250 smp->locks = locks;
251 smp->locks_end = locks_end;
252 smp->text = text;
253 smp->text_end = text_end;
254 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
255 __FUNCTION__, smp->locks, smp->locks_end,
256 smp->text, smp->text_end, smp->name);
258 spin_lock_irqsave(&smp_alt, flags);
259 list_add_tail(&smp->next, &smp_alt_modules);
260 if (boot_cpu_has(X86_FEATURE_UP))
261 alternatives_smp_unlock(smp->locks, smp->locks_end,
262 smp->text, smp->text_end);
263 spin_unlock_irqrestore(&smp_alt, flags);
266 void alternatives_smp_module_del(struct module *mod)
268 struct smp_alt_module *item;
269 unsigned long flags;
271 if (smp_alt_once || noreplace_smp)
272 return;
274 spin_lock_irqsave(&smp_alt, flags);
275 list_for_each_entry(item, &smp_alt_modules, next) {
276 if (mod != item->mod)
277 continue;
278 list_del(&item->next);
279 spin_unlock_irqrestore(&smp_alt, flags);
280 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
281 kfree(item);
282 return;
284 spin_unlock_irqrestore(&smp_alt, flags);
287 void alternatives_smp_switch(int smp)
289 struct smp_alt_module *mod;
290 unsigned long flags;
292 #ifdef CONFIG_LOCKDEP
294 * A not yet fixed binutils section handling bug prevents
295 * alternatives-replacement from working reliably, so turn
296 * it off:
298 printk("lockdep: not fixing up alternatives.\n");
299 return;
300 #endif
302 if (noreplace_smp || smp_alt_once)
303 return;
304 BUG_ON(!smp && (num_online_cpus() > 1));
306 spin_lock_irqsave(&smp_alt, flags);
307 if (smp) {
308 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
309 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
310 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
311 list_for_each_entry(mod, &smp_alt_modules, next)
312 alternatives_smp_lock(mod->locks, mod->locks_end,
313 mod->text, mod->text_end);
314 } else {
315 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
316 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
317 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
318 list_for_each_entry(mod, &smp_alt_modules, next)
319 alternatives_smp_unlock(mod->locks, mod->locks_end,
320 mod->text, mod->text_end);
322 spin_unlock_irqrestore(&smp_alt, flags);
325 #endif
327 #ifdef CONFIG_PARAVIRT
328 void apply_paravirt(struct paravirt_patch_site *start,
329 struct paravirt_patch_site *end)
331 struct paravirt_patch_site *p;
333 for (p = start; p < end; p++) {
334 unsigned int used;
336 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
337 p->len);
339 BUG_ON(used > p->len);
341 /* Pad the rest with nops */
342 nop_out(p->instr + used, p->len - used);
345 /* Sync to be conservative, in case we patched following
346 * instructions */
347 sync_core();
349 extern struct paravirt_patch_site __start_parainstructions[],
350 __stop_parainstructions[];
351 #endif /* CONFIG_PARAVIRT */
353 void __init alternative_instructions(void)
355 unsigned long flags;
357 local_irq_save(flags);
358 apply_alternatives(__alt_instructions, __alt_instructions_end);
360 /* switch to patch-once-at-boottime-only mode and free the
361 * tables in case we know the number of CPUs will never ever
362 * change */
363 #ifdef CONFIG_HOTPLUG_CPU
364 if (num_possible_cpus() < 2)
365 smp_alt_once = 1;
366 #else
367 smp_alt_once = 1;
368 #endif
370 #ifdef CONFIG_SMP
371 if (smp_alt_once) {
372 if (1 == num_possible_cpus()) {
373 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
374 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
375 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
376 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
377 _text, _etext);
379 free_init_pages("SMP alternatives",
380 __pa_symbol(&__smp_locks),
381 __pa_symbol(&__smp_locks_end));
382 } else {
383 alternatives_smp_module_add(NULL, "core kernel",
384 __smp_locks, __smp_locks_end,
385 _text, _etext);
386 alternatives_smp_switch(0);
388 #endif
389 apply_paravirt(__start_parainstructions, __stop_parainstructions);
390 local_irq_restore(flags);