RT-AC66 3.0.0.4.374.130 core
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / arch / i386 / kernel / alternative.c
blobd8cda14fff8bc4713f41a15a0193cb935550c602
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/spinlock.h>
4 #include <linux/list.h>
5 #include <asm/alternative.h>
6 #include <asm/sections.h>
8 static int noreplace_smp = 0;
9 static int smp_alt_once = 0;
10 static int debug_alternative = 0;
12 static int __init bootonly(char *str)
14 smp_alt_once = 1;
15 return 1;
17 __setup("smp-alt-boot", bootonly);
19 static int __init debug_alt(char *str)
21 debug_alternative = 1;
22 return 1;
24 __setup("debug-alternative", debug_alt);
26 static int __init setup_noreplace_smp(char *str)
28 noreplace_smp = 1;
29 return 1;
31 __setup("noreplace-smp", setup_noreplace_smp);
33 #ifdef CONFIG_PARAVIRT
34 static int noreplace_paravirt = 0;
36 static int __init setup_noreplace_paravirt(char *str)
38 noreplace_paravirt = 1;
39 return 1;
41 __setup("noreplace-paravirt", setup_noreplace_paravirt);
42 #endif
44 #define DPRINTK(fmt, args...) if (debug_alternative) \
45 printk(KERN_DEBUG fmt, args)
47 #ifdef GENERIC_NOP1
48 /* Use inline assembly to define this because the nops are defined
49 as inline assembly strings in the include files and we cannot
50 get them easily into strings. */
51 asm("\t.data\nintelnops: "
52 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
53 GENERIC_NOP7 GENERIC_NOP8);
54 extern unsigned char intelnops[];
55 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
56 NULL,
57 intelnops,
58 intelnops + 1,
59 intelnops + 1 + 2,
60 intelnops + 1 + 2 + 3,
61 intelnops + 1 + 2 + 3 + 4,
62 intelnops + 1 + 2 + 3 + 4 + 5,
63 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
64 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
66 #endif
68 #ifdef K8_NOP1
69 asm("\t.data\nk8nops: "
70 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
71 K8_NOP7 K8_NOP8);
72 extern unsigned char k8nops[];
73 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
74 NULL,
75 k8nops,
76 k8nops + 1,
77 k8nops + 1 + 2,
78 k8nops + 1 + 2 + 3,
79 k8nops + 1 + 2 + 3 + 4,
80 k8nops + 1 + 2 + 3 + 4 + 5,
81 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
82 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
84 #endif
86 #ifdef K7_NOP1
87 asm("\t.data\nk7nops: "
88 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
89 K7_NOP7 K7_NOP8);
90 extern unsigned char k7nops[];
91 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
92 NULL,
93 k7nops,
94 k7nops + 1,
95 k7nops + 1 + 2,
96 k7nops + 1 + 2 + 3,
97 k7nops + 1 + 2 + 3 + 4,
98 k7nops + 1 + 2 + 3 + 4 + 5,
99 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
100 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
102 #endif
104 #ifdef CONFIG_X86_64
106 extern char __vsyscall_0;
107 static inline unsigned char** find_nop_table(void)
109 return k8_nops;
112 #else /* CONFIG_X86_64 */
114 static struct nop {
115 int cpuid;
116 unsigned char **noptable;
117 } noptypes[] = {
118 { X86_FEATURE_K8, k8_nops },
119 { X86_FEATURE_K7, k7_nops },
120 { -1, NULL }
123 static unsigned char** find_nop_table(void)
125 unsigned char **noptable = intel_nops;
126 int i;
128 for (i = 0; noptypes[i].cpuid >= 0; i++) {
129 if (boot_cpu_has(noptypes[i].cpuid)) {
130 noptable = noptypes[i].noptable;
131 break;
134 return noptable;
137 #endif /* CONFIG_X86_64 */
139 static void nop_out(void *insns, unsigned int len)
141 unsigned char **noptable = find_nop_table();
143 while (len > 0) {
144 unsigned int noplen = len;
145 if (noplen > ASM_NOP_MAX)
146 noplen = ASM_NOP_MAX;
147 memcpy(insns, noptable[noplen], noplen);
148 insns += noplen;
149 len -= noplen;
153 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
154 extern u8 *__smp_locks[], *__smp_locks_end[];
156 /* Replace instructions with better alternatives for this CPU type.
157 This runs before SMP is initialized to avoid SMP problems with
158 self modifying code. This implies that assymetric systems where
159 APs have less capabilities than the boot processor are not handled.
160 Tough. Make sure you disable such features by hand. */
162 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
164 struct alt_instr *a;
165 u8 *instr;
166 int diff;
168 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
169 for (a = start; a < end; a++) {
170 BUG_ON(a->replacementlen > a->instrlen);
171 if (!boot_cpu_has(a->cpuid))
172 continue;
173 instr = a->instr;
174 #ifdef CONFIG_X86_64
175 /* vsyscall code is not mapped yet. resolve it manually. */
176 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
177 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
178 DPRINTK("%s: vsyscall fixup: %p => %p\n",
179 __FUNCTION__, a->instr, instr);
181 #endif
182 memcpy(instr, a->replacement, a->replacementlen);
183 diff = a->instrlen - a->replacementlen;
184 nop_out(instr + a->replacementlen, diff);
188 #ifdef CONFIG_SMP
190 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
192 u8 **ptr;
194 for (ptr = start; ptr < end; ptr++) {
195 if (*ptr < text)
196 continue;
197 if (*ptr > text_end)
198 continue;
199 **ptr = 0xf0; /* lock prefix */
203 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
205 u8 **ptr;
207 if (noreplace_smp)
208 return;
210 for (ptr = start; ptr < end; ptr++) {
211 if (*ptr < text)
212 continue;
213 if (*ptr > text_end)
214 continue;
215 nop_out(*ptr, 1);
219 struct smp_alt_module {
220 /* what is this ??? */
221 struct module *mod;
222 char *name;
224 /* ptrs to lock prefixes */
225 u8 **locks;
226 u8 **locks_end;
228 /* .text segment, needed to avoid patching init code ;) */
229 u8 *text;
230 u8 *text_end;
232 struct list_head next;
234 static LIST_HEAD(smp_alt_modules);
235 static DEFINE_SPINLOCK(smp_alt);
237 void alternatives_smp_module_add(struct module *mod, char *name,
238 void *locks, void *locks_end,
239 void *text, void *text_end)
241 struct smp_alt_module *smp;
242 unsigned long flags;
244 if (noreplace_smp)
245 return;
247 if (smp_alt_once) {
248 if (boot_cpu_has(X86_FEATURE_UP))
249 alternatives_smp_unlock(locks, locks_end,
250 text, text_end);
251 return;
254 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
255 if (NULL == smp)
256 return; /* we'll run the (safe but slow) SMP code then ... */
258 smp->mod = mod;
259 smp->name = name;
260 smp->locks = locks;
261 smp->locks_end = locks_end;
262 smp->text = text;
263 smp->text_end = text_end;
264 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
265 __FUNCTION__, smp->locks, smp->locks_end,
266 smp->text, smp->text_end, smp->name);
268 spin_lock_irqsave(&smp_alt, flags);
269 list_add_tail(&smp->next, &smp_alt_modules);
270 if (boot_cpu_has(X86_FEATURE_UP))
271 alternatives_smp_unlock(smp->locks, smp->locks_end,
272 smp->text, smp->text_end);
273 spin_unlock_irqrestore(&smp_alt, flags);
276 void alternatives_smp_module_del(struct module *mod)
278 struct smp_alt_module *item;
279 unsigned long flags;
281 if (smp_alt_once || noreplace_smp)
282 return;
284 spin_lock_irqsave(&smp_alt, flags);
285 list_for_each_entry(item, &smp_alt_modules, next) {
286 if (mod != item->mod)
287 continue;
288 list_del(&item->next);
289 spin_unlock_irqrestore(&smp_alt, flags);
290 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
291 kfree(item);
292 return;
294 spin_unlock_irqrestore(&smp_alt, flags);
297 void alternatives_smp_switch(int smp)
299 struct smp_alt_module *mod;
300 unsigned long flags;
302 #ifdef CONFIG_LOCKDEP
304 * A not yet fixed binutils section handling bug prevents
305 * alternatives-replacement from working reliably, so turn
306 * it off:
308 printk("lockdep: not fixing up alternatives.\n");
309 return;
310 #endif
312 if (noreplace_smp || smp_alt_once)
313 return;
314 BUG_ON(!smp && (num_online_cpus() > 1));
316 spin_lock_irqsave(&smp_alt, flags);
317 if (smp) {
318 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
319 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
320 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
321 list_for_each_entry(mod, &smp_alt_modules, next)
322 alternatives_smp_lock(mod->locks, mod->locks_end,
323 mod->text, mod->text_end);
324 } else {
325 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
326 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
327 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
328 list_for_each_entry(mod, &smp_alt_modules, next)
329 alternatives_smp_unlock(mod->locks, mod->locks_end,
330 mod->text, mod->text_end);
332 spin_unlock_irqrestore(&smp_alt, flags);
335 #endif
337 #ifdef CONFIG_PARAVIRT
338 void apply_paravirt(struct paravirt_patch_site *start,
339 struct paravirt_patch_site *end)
341 struct paravirt_patch_site *p;
343 if (noreplace_paravirt)
344 return;
346 for (p = start; p < end; p++) {
347 unsigned int used;
349 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
350 p->len);
352 BUG_ON(used > p->len);
354 /* Pad the rest with nops */
355 nop_out(p->instr + used, p->len - used);
358 /* Sync to be conservative, in case we patched following
359 * instructions */
360 sync_core();
362 extern struct paravirt_patch_site __start_parainstructions[],
363 __stop_parainstructions[];
364 #endif /* CONFIG_PARAVIRT */
366 void __init alternative_instructions(void)
368 unsigned long flags;
370 local_irq_save(flags);
371 apply_alternatives(__alt_instructions, __alt_instructions_end);
373 /* switch to patch-once-at-boottime-only mode and free the
374 * tables in case we know the number of CPUs will never ever
375 * change */
376 #ifdef CONFIG_HOTPLUG_CPU
377 if (num_possible_cpus() < 2)
378 smp_alt_once = 1;
379 #else
380 smp_alt_once = 1;
381 #endif
383 #ifdef CONFIG_SMP
384 if (smp_alt_once) {
385 if (1 == num_possible_cpus()) {
386 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
387 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
388 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
389 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
390 _text, _etext);
392 free_init_pages("SMP alternatives",
393 (unsigned long)__smp_locks,
394 (unsigned long)__smp_locks_end);
395 } else {
396 alternatives_smp_module_add(NULL, "core kernel",
397 __smp_locks, __smp_locks_end,
398 _text, _etext);
399 alternatives_smp_switch(0);
401 #endif
402 apply_paravirt(__parainstructions, __parainstructions_end);
403 local_irq_restore(flags);