x86, cpufreq: remove leftover copymask_copy()
[linux-2.6/mini2440.git] / arch / x86 / kernel / reboot.c
blob2b46eb41643b291a182bc2b868ef1481e1db727b
1 #include <linux/module.h>
2 #include <linux/reboot.h>
3 #include <linux/init.h>
4 #include <linux/pm.h>
5 #include <linux/efi.h>
6 #include <acpi/reboot.h>
7 #include <asm/io.h>
8 #include <asm/apic.h>
9 #include <asm/desc.h>
10 #include <asm/hpet.h>
11 #include <asm/pgtable.h>
12 #include <asm/proto.h>
13 #include <asm/reboot_fixups.h>
14 #include <asm/reboot.h>
15 #include <asm/pci_x86.h>
16 #include <asm/virtext.h>
18 #ifdef CONFIG_X86_32
19 # include <linux/dmi.h>
20 # include <linux/ctype.h>
21 # include <linux/mc146818rtc.h>
22 #else
23 # include <asm/iommu.h>
24 #endif
26 #include <mach_ipi.h>
29 * Power off function, if any
31 void (*pm_power_off)(void);
32 EXPORT_SYMBOL(pm_power_off);
34 static const struct desc_ptr no_idt = {};
35 static int reboot_mode;
36 enum reboot_type reboot_type = BOOT_KBD;
37 int reboot_force;
39 #if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
40 static int reboot_cpu = -1;
41 #endif
43 /* This is set if we need to go through the 'emergency' path.
44 * When machine_emergency_restart() is called, we may be on
45 * an inconsistent state and won't be able to do a clean cleanup
47 static int reboot_emergency;
49 /* This is set by the PCI code if either type 1 or type 2 PCI is detected */
50 bool port_cf9_safe = false;
52 /* reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci]
53 warm Don't set the cold reboot flag
54 cold Set the cold reboot flag
55 bios Reboot by jumping through the BIOS (only for X86_32)
56 smp Reboot by executing reset on BSP or other CPU (only for X86_32)
57 triple Force a triple fault (init)
58 kbd Use the keyboard controller. cold reset (default)
59 acpi Use the RESET_REG in the FADT
60 efi Use efi reset_system runtime service
61 pci Use the so-called "PCI reset register", CF9
62 force Avoid anything that could hang.
64 static int __init reboot_setup(char *str)
66 for (;;) {
67 switch (*str) {
68 case 'w':
69 reboot_mode = 0x1234;
70 break;
72 case 'c':
73 reboot_mode = 0;
74 break;
76 #ifdef CONFIG_X86_32
77 #ifdef CONFIG_SMP
78 case 's':
79 if (isdigit(*(str+1))) {
80 reboot_cpu = (int) (*(str+1) - '0');
81 if (isdigit(*(str+2)))
82 reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0');
84 /* we will leave sorting out the final value
85 when we are ready to reboot, since we might not
86 have set up boot_cpu_id or smp_num_cpu */
87 break;
88 #endif /* CONFIG_SMP */
90 case 'b':
91 #endif
92 case 'a':
93 case 'k':
94 case 't':
95 case 'e':
96 case 'p':
97 reboot_type = *str;
98 break;
100 case 'f':
101 reboot_force = 1;
102 break;
105 str = strchr(str, ',');
106 if (str)
107 str++;
108 else
109 break;
111 return 1;
114 __setup("reboot=", reboot_setup);
117 #ifdef CONFIG_X86_32
119 * Reboot options and system auto-detection code provided by
120 * Dell Inc. so their systems "just work". :-)
124 * Some machines require the "reboot=b" commandline option,
125 * this quirk makes that automatic.
127 static int __init set_bios_reboot(const struct dmi_system_id *d)
129 if (reboot_type != BOOT_BIOS) {
130 reboot_type = BOOT_BIOS;
131 printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident);
133 return 0;
136 static struct dmi_system_id __initdata reboot_dmi_table[] = {
137 { /* Handle problems with rebooting on Dell E520's */
138 .callback = set_bios_reboot,
139 .ident = "Dell E520",
140 .matches = {
141 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
142 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
145 { /* Handle problems with rebooting on Dell 1300's */
146 .callback = set_bios_reboot,
147 .ident = "Dell PowerEdge 1300",
148 .matches = {
149 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
150 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
153 { /* Handle problems with rebooting on Dell 300's */
154 .callback = set_bios_reboot,
155 .ident = "Dell PowerEdge 300",
156 .matches = {
157 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
158 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
161 { /* Handle problems with rebooting on Dell Optiplex 745's SFF*/
162 .callback = set_bios_reboot,
163 .ident = "Dell OptiPlex 745",
164 .matches = {
165 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
166 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
169 { /* Handle problems with rebooting on Dell Optiplex 745's DFF*/
170 .callback = set_bios_reboot,
171 .ident = "Dell OptiPlex 745",
172 .matches = {
173 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
174 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
175 DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
178 { /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */
179 .callback = set_bios_reboot,
180 .ident = "Dell OptiPlex 745",
181 .matches = {
182 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
183 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
184 DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
187 { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
188 .callback = set_bios_reboot,
189 .ident = "Dell OptiPlex 330",
190 .matches = {
191 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
192 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
193 DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
196 { /* Handle problems with rebooting on Dell 2400's */
197 .callback = set_bios_reboot,
198 .ident = "Dell PowerEdge 2400",
199 .matches = {
200 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
201 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
204 { /* Handle problems with rebooting on Dell T5400's */
205 .callback = set_bios_reboot,
206 .ident = "Dell Precision T5400",
207 .matches = {
208 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
209 DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"),
212 { /* Handle problems with rebooting on HP laptops */
213 .callback = set_bios_reboot,
214 .ident = "HP Compaq Laptop",
215 .matches = {
216 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
217 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
223 static int __init reboot_init(void)
225 dmi_check_system(reboot_dmi_table);
226 return 0;
228 core_initcall(reboot_init);
230 /* The following code and data reboots the machine by switching to real
231 mode and jumping to the BIOS reset entry point, as if the CPU has
232 really been reset. The previous version asked the keyboard
233 controller to pulse the CPU reset line, which is more thorough, but
234 doesn't work with at least one type of 486 motherboard. It is easy
235 to stop this code working; hence the copious comments. */
236 static const unsigned long long
237 real_mode_gdt_entries [3] =
239 0x0000000000000000ULL, /* Null descriptor */
240 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
241 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
244 static const struct desc_ptr
245 real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries },
246 real_mode_idt = { 0x3ff, 0 };
248 /* This is 16-bit protected mode code to disable paging and the cache,
249 switch to real mode and jump to the BIOS reset code.
251 The instruction that switches to real mode by writing to CR0 must be
252 followed immediately by a far jump instruction, which set CS to a
253 valid value for real mode, and flushes the prefetch queue to avoid
254 running instructions that have already been decoded in protected
255 mode.
257 Clears all the flags except ET, especially PG (paging), PE
258 (protected-mode enable) and TS (task switch for coprocessor state
259 save). Flushes the TLB after paging has been disabled. Sets CD and
260 NW, to disable the cache on a 486, and invalidates the cache. This
261 is more like the state of a 486 after reset. I don't know if
262 something else should be done for other chips.
264 More could be done here to set up the registers as if a CPU reset had
265 occurred; hopefully real BIOSs don't assume much. */
266 static const unsigned char real_mode_switch [] =
268 0x66, 0x0f, 0x20, 0xc0, /* movl %cr0,%eax */
269 0x66, 0x83, 0xe0, 0x11, /* andl $0x00000011,%eax */
270 0x66, 0x0d, 0x00, 0x00, 0x00, 0x60, /* orl $0x60000000,%eax */
271 0x66, 0x0f, 0x22, 0xc0, /* movl %eax,%cr0 */
272 0x66, 0x0f, 0x22, 0xd8, /* movl %eax,%cr3 */
273 0x66, 0x0f, 0x20, 0xc3, /* movl %cr0,%ebx */
274 0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60, /* andl $0x60000000,%ebx */
275 0x74, 0x02, /* jz f */
276 0x0f, 0x09, /* wbinvd */
277 0x24, 0x10, /* f: andb $0x10,al */
278 0x66, 0x0f, 0x22, 0xc0 /* movl %eax,%cr0 */
280 static const unsigned char jump_to_bios [] =
282 0xea, 0x00, 0x00, 0xff, 0xff /* ljmp $0xffff,$0x0000 */
286 * Switch to real mode and then execute the code
287 * specified by the code and length parameters.
288 * We assume that length will aways be less that 100!
290 void machine_real_restart(const unsigned char *code, int length)
292 local_irq_disable();
294 /* Write zero to CMOS register number 0x0f, which the BIOS POST
295 routine will recognize as telling it to do a proper reboot. (Well
296 that's what this book in front of me says -- it may only apply to
297 the Phoenix BIOS though, it's not clear). At the same time,
298 disable NMIs by setting the top bit in the CMOS address register,
299 as we're about to do peculiar things to the CPU. I'm not sure if
300 `outb_p' is needed instead of just `outb'. Use it to be on the
301 safe side. (Yes, CMOS_WRITE does outb_p's. - Paul G.)
303 spin_lock(&rtc_lock);
304 CMOS_WRITE(0x00, 0x8f);
305 spin_unlock(&rtc_lock);
307 /* Remap the kernel at virtual address zero, as well as offset zero
308 from the kernel segment. This assumes the kernel segment starts at
309 virtual address PAGE_OFFSET. */
310 memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
311 sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
314 * Use `swapper_pg_dir' as our page directory.
316 load_cr3(swapper_pg_dir);
318 /* Write 0x1234 to absolute memory location 0x472. The BIOS reads
319 this on booting to tell it to "Bypass memory test (also warm
320 boot)". This seems like a fairly standard thing that gets set by
321 REBOOT.COM programs, and the previous reset routine did this
322 too. */
323 *((unsigned short *)0x472) = reboot_mode;
325 /* For the switch to real mode, copy some code to low memory. It has
326 to be in the first 64k because it is running in 16-bit mode, and it
327 has to have the same physical and virtual address, because it turns
328 off paging. Copy it near the end of the first page, out of the way
329 of BIOS variables. */
330 memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
331 real_mode_switch, sizeof (real_mode_switch));
332 memcpy((void *)(0x1000 - 100), code, length);
334 /* Set up the IDT for real mode. */
335 load_idt(&real_mode_idt);
337 /* Set up a GDT from which we can load segment descriptors for real
338 mode. The GDT is not used in real mode; it is just needed here to
339 prepare the descriptors. */
340 load_gdt(&real_mode_gdt);
342 /* Load the data segment registers, and thus the descriptors ready for
343 real mode. The base address of each segment is 0x100, 16 times the
344 selector value being loaded here. This is so that the segment
345 registers don't have to be reloaded after switching to real mode:
346 the values are consistent for real mode operation already. */
347 __asm__ __volatile__ ("movl $0x0010,%%eax\n"
348 "\tmovl %%eax,%%ds\n"
349 "\tmovl %%eax,%%es\n"
350 "\tmovl %%eax,%%fs\n"
351 "\tmovl %%eax,%%gs\n"
352 "\tmovl %%eax,%%ss" : : : "eax");
354 /* Jump to the 16-bit code that we copied earlier. It disables paging
355 and the cache, switches to real mode, and jumps to the BIOS reset
356 entry point. */
357 __asm__ __volatile__ ("ljmp $0x0008,%0"
359 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
361 #ifdef CONFIG_APM_MODULE
362 EXPORT_SYMBOL(machine_real_restart);
363 #endif
365 #endif /* CONFIG_X86_32 */
367 static inline void kb_wait(void)
369 int i;
371 for (i = 0; i < 0x10000; i++) {
372 if ((inb(0x64) & 0x02) == 0)
373 break;
374 udelay(2);
378 static void vmxoff_nmi(int cpu, struct die_args *args)
380 cpu_emergency_vmxoff();
383 /* Use NMIs as IPIs to tell all CPUs to disable virtualization
385 static void emergency_vmx_disable_all(void)
387 /* Just make sure we won't change CPUs while doing this */
388 local_irq_disable();
390 /* We need to disable VMX on all CPUs before rebooting, otherwise
391 * we risk hanging up the machine, because the CPU ignore INIT
392 * signals when VMX is enabled.
394 * We can't take any locks and we may be on an inconsistent
395 * state, so we use NMIs as IPIs to tell the other CPUs to disable
396 * VMX and halt.
398 * For safety, we will avoid running the nmi_shootdown_cpus()
399 * stuff unnecessarily, but we don't have a way to check
400 * if other CPUs have VMX enabled. So we will call it only if the
401 * CPU we are running on has VMX enabled.
403 * We will miss cases where VMX is not enabled on all CPUs. This
404 * shouldn't do much harm because KVM always enable VMX on all
405 * CPUs anyway. But we can miss it on the small window where KVM
406 * is still enabling VMX.
408 if (cpu_has_vmx() && cpu_vmx_enabled()) {
409 /* Disable VMX on this CPU.
411 cpu_vmxoff();
413 /* Halt and disable VMX on the other CPUs */
414 nmi_shootdown_cpus(vmxoff_nmi);
420 void __attribute__((weak)) mach_reboot_fixups(void)
424 static void native_machine_emergency_restart(void)
426 int i;
428 if (reboot_emergency)
429 emergency_vmx_disable_all();
431 /* Tell the BIOS if we want cold or warm reboot */
432 *((unsigned short *)__va(0x472)) = reboot_mode;
434 for (;;) {
435 /* Could also try the reset bit in the Hammer NB */
436 switch (reboot_type) {
437 case BOOT_KBD:
438 mach_reboot_fixups(); /* for board specific fixups */
440 for (i = 0; i < 10; i++) {
441 kb_wait();
442 udelay(50);
443 outb(0xfe, 0x64); /* pulse reset low */
444 udelay(50);
447 case BOOT_TRIPLE:
448 load_idt(&no_idt);
449 __asm__ __volatile__("int3");
451 reboot_type = BOOT_KBD;
452 break;
454 #ifdef CONFIG_X86_32
455 case BOOT_BIOS:
456 machine_real_restart(jump_to_bios, sizeof(jump_to_bios));
458 reboot_type = BOOT_KBD;
459 break;
460 #endif
462 case BOOT_ACPI:
463 acpi_reboot();
464 reboot_type = BOOT_KBD;
465 break;
467 case BOOT_EFI:
468 if (efi_enabled)
469 efi.reset_system(reboot_mode ?
470 EFI_RESET_WARM :
471 EFI_RESET_COLD,
472 EFI_SUCCESS, 0, NULL);
473 reboot_type = BOOT_KBD;
474 break;
476 case BOOT_CF9:
477 port_cf9_safe = true;
478 /* fall through */
480 case BOOT_CF9_COND:
481 if (port_cf9_safe) {
482 u8 cf9 = inb(0xcf9) & ~6;
483 outb(cf9|2, 0xcf9); /* Request hard reset */
484 udelay(50);
485 outb(cf9|6, 0xcf9); /* Actually do the reset */
486 udelay(50);
488 reboot_type = BOOT_KBD;
489 break;
494 void native_machine_shutdown(void)
496 /* Stop the cpus and apics */
497 #ifdef CONFIG_SMP
499 /* The boot cpu is always logical cpu 0 */
500 int reboot_cpu_id = 0;
502 #ifdef CONFIG_X86_32
503 /* See if there has been given a command line override */
504 if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) &&
505 cpu_online(reboot_cpu))
506 reboot_cpu_id = reboot_cpu;
507 #endif
509 /* Make certain the cpu I'm about to reboot on is online */
510 if (!cpu_online(reboot_cpu_id))
511 reboot_cpu_id = smp_processor_id();
513 /* Make certain I only run on the appropriate processor */
514 set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
516 /* O.K Now that I'm on the appropriate processor,
517 * stop all of the others.
519 smp_send_stop();
520 #endif
522 lapic_shutdown();
524 #ifdef CONFIG_X86_IO_APIC
525 disable_IO_APIC();
526 #endif
528 #ifdef CONFIG_HPET_TIMER
529 hpet_disable();
530 #endif
532 #ifdef CONFIG_X86_64
533 pci_iommu_shutdown();
534 #endif
537 static void __machine_emergency_restart(int emergency)
539 reboot_emergency = emergency;
540 machine_ops.emergency_restart();
543 static void native_machine_restart(char *__unused)
545 printk("machine restart\n");
547 if (!reboot_force)
548 machine_shutdown();
549 __machine_emergency_restart(0);
552 static void native_machine_halt(void)
554 /* stop other cpus and apics */
555 machine_shutdown();
557 /* stop this cpu */
558 stop_this_cpu(NULL);
561 static void native_machine_power_off(void)
563 if (pm_power_off) {
564 if (!reboot_force)
565 machine_shutdown();
566 pm_power_off();
570 struct machine_ops machine_ops = {
571 .power_off = native_machine_power_off,
572 .shutdown = native_machine_shutdown,
573 .emergency_restart = native_machine_emergency_restart,
574 .restart = native_machine_restart,
575 .halt = native_machine_halt,
576 #ifdef CONFIG_KEXEC
577 .crash_shutdown = native_machine_crash_shutdown,
578 #endif
581 void machine_power_off(void)
583 machine_ops.power_off();
586 void machine_shutdown(void)
588 machine_ops.shutdown();
591 void machine_emergency_restart(void)
593 __machine_emergency_restart(1);
596 void machine_restart(char *cmd)
598 machine_ops.restart(cmd);
601 void machine_halt(void)
603 machine_ops.halt();
606 #ifdef CONFIG_KEXEC
607 void machine_crash_shutdown(struct pt_regs *regs)
609 machine_ops.crash_shutdown(regs);
611 #endif
614 #if defined(CONFIG_SMP)
616 /* This keeps a track of which one is crashing cpu. */
617 static int crashing_cpu;
618 static nmi_shootdown_cb shootdown_callback;
620 static atomic_t waiting_for_crash_ipi;
622 static int crash_nmi_callback(struct notifier_block *self,
623 unsigned long val, void *data)
625 int cpu;
627 if (val != DIE_NMI_IPI)
628 return NOTIFY_OK;
630 cpu = raw_smp_processor_id();
632 /* Don't do anything if this handler is invoked on crashing cpu.
633 * Otherwise, system will completely hang. Crashing cpu can get
634 * an NMI if system was initially booted with nmi_watchdog parameter.
636 if (cpu == crashing_cpu)
637 return NOTIFY_STOP;
638 local_irq_disable();
640 shootdown_callback(cpu, (struct die_args *)data);
642 atomic_dec(&waiting_for_crash_ipi);
643 /* Assume hlt works */
644 halt();
645 for (;;)
646 cpu_relax();
648 return 1;
651 static void smp_send_nmi_allbutself(void)
653 send_IPI_allbutself(NMI_VECTOR);
656 static struct notifier_block crash_nmi_nb = {
657 .notifier_call = crash_nmi_callback,
660 /* Halt all other CPUs, calling the specified function on each of them
662 * This function can be used to halt all other CPUs on crash
663 * or emergency reboot time. The function passed as parameter
664 * will be called inside a NMI handler on all CPUs.
666 void nmi_shootdown_cpus(nmi_shootdown_cb callback)
668 unsigned long msecs;
669 local_irq_disable();
671 /* Make a note of crashing cpu. Will be used in NMI callback.*/
672 crashing_cpu = safe_smp_processor_id();
674 shootdown_callback = callback;
676 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
677 /* Would it be better to replace the trap vector here? */
678 if (register_die_notifier(&crash_nmi_nb))
679 return; /* return what? */
680 /* Ensure the new callback function is set before sending
681 * out the NMI
683 wmb();
685 smp_send_nmi_allbutself();
687 msecs = 1000; /* Wait at most a second for the other cpus to stop */
688 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
689 mdelay(1);
690 msecs--;
693 /* Leave the nmi callback set */
695 #else /* !CONFIG_SMP */
696 void nmi_shootdown_cpus(nmi_shootdown_cb callback)
698 /* No other CPUs to shoot down */
700 #endif