2 * Architecture specific (i386) functions for kexec based crash dumps.
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/smp.h>
14 #include <linux/irq.h>
15 #include <linux/reboot.h>
16 #include <linux/kexec.h>
17 #include <linux/irq.h>
18 #include <linux/delay.h>
19 #include <linux/elf.h>
20 #include <linux/elfcore.h>
22 #include <asm/processor.h>
23 #include <asm/hardirq.h>
25 #include <asm/hw_irq.h>
30 note_buf_t crash_notes
[NR_CPUS
];
31 /* This keeps a track of which one is crashing cpu. */
32 static int crashing_cpu
;
34 static u32
*append_elf_note(u32
*buf
,
35 char *name
, unsigned type
, void *data
, size_t data_len
)
38 note
.n_namesz
= strlen(name
) + 1;
39 note
.n_descsz
= data_len
;
41 memcpy(buf
, ¬e
, sizeof(note
));
42 buf
+= (sizeof(note
) +3)/4;
43 memcpy(buf
, name
, note
.n_namesz
);
44 buf
+= (note
.n_namesz
+ 3)/4;
45 memcpy(buf
, data
, note
.n_descsz
);
46 buf
+= (note
.n_descsz
+ 3)/4;
50 static void final_note(u32
*buf
)
56 memcpy(buf
, ¬e
, sizeof(note
));
60 static void crash_save_this_cpu(struct pt_regs
*regs
, int cpu
)
62 struct elf_prstatus prstatus
;
64 if ((cpu
< 0) || (cpu
>= NR_CPUS
)) {
67 /* Using ELF notes here is opportunistic.
68 * I need a well defined structure format
69 * for the data I pass, and I need tags
70 * on the data to indicate what information I have
71 * squirrelled away. ELF notes happen to provide
72 * all of that that no need to invent something new.
74 buf
= &crash_notes
[cpu
][0];
75 memset(&prstatus
, 0, sizeof(prstatus
));
76 prstatus
.pr_pid
= current
->pid
;
77 elf_core_copy_regs(&prstatus
.pr_reg
, regs
);
78 buf
= append_elf_note(buf
, "CORE", NT_PRSTATUS
,
79 &prstatus
, sizeof(prstatus
));
84 static void crash_get_current_regs(struct pt_regs
*regs
)
86 __asm__
__volatile__("movl %%ebx,%0" : "=m"(regs
->ebx
));
87 __asm__
__volatile__("movl %%ecx,%0" : "=m"(regs
->ecx
));
88 __asm__
__volatile__("movl %%edx,%0" : "=m"(regs
->edx
));
89 __asm__
__volatile__("movl %%esi,%0" : "=m"(regs
->esi
));
90 __asm__
__volatile__("movl %%edi,%0" : "=m"(regs
->edi
));
91 __asm__
__volatile__("movl %%ebp,%0" : "=m"(regs
->ebp
));
92 __asm__
__volatile__("movl %%eax,%0" : "=m"(regs
->eax
));
93 __asm__
__volatile__("movl %%esp,%0" : "=m"(regs
->esp
));
94 __asm__
__volatile__("movw %%ss, %%ax;" :"=a"(regs
->xss
));
95 __asm__
__volatile__("movw %%cs, %%ax;" :"=a"(regs
->xcs
));
96 __asm__
__volatile__("movw %%ds, %%ax;" :"=a"(regs
->xds
));
97 __asm__
__volatile__("movw %%es, %%ax;" :"=a"(regs
->xes
));
98 __asm__
__volatile__("pushfl; popl %0" :"=m"(regs
->eflags
));
100 regs
->eip
= (unsigned long)current_text_addr();
103 /* CPU does not save ss and esp on stack if execution is already
104 * running in kernel mode at the time of NMI occurrence. This code
107 static void crash_setup_regs(struct pt_regs
*newregs
, struct pt_regs
*oldregs
)
109 memcpy(newregs
, oldregs
, sizeof(*newregs
));
110 newregs
->esp
= (unsigned long)&(oldregs
->esp
);
111 __asm__
__volatile__("xorl %eax, %eax;");
112 __asm__
__volatile__ ("movw %%ss, %%ax;" :"=a"(newregs
->xss
));
115 /* We may have saved_regs from where the error came from
116 * or it is NULL if via a direct panic().
118 static void crash_save_self(struct pt_regs
*saved_regs
)
122 cpu
= smp_processor_id();
125 crash_setup_regs(®s
, saved_regs
);
127 crash_get_current_regs(®s
);
128 crash_save_this_cpu(®s
, cpu
);
132 static atomic_t waiting_for_crash_ipi
;
134 static int crash_nmi_callback(struct pt_regs
*regs
, int cpu
)
136 struct pt_regs fixed_regs
;
138 /* Don't do anything if this handler is invoked on crashing cpu.
139 * Otherwise, system will completely hang. Crashing cpu can get
140 * an NMI if system was initially booted with nmi_watchdog parameter.
142 if (cpu
== crashing_cpu
)
146 if (!user_mode(regs
)) {
147 crash_setup_regs(&fixed_regs
, regs
);
150 crash_save_this_cpu(regs
, cpu
);
151 disable_local_APIC();
152 atomic_dec(&waiting_for_crash_ipi
);
153 /* Assume hlt works */
160 * By using the NMI code instead of a vector we just sneak thru the
161 * word generator coming out with just what we want. AND it does
162 * not matter if clustered_apic_mode is set or not.
164 static void smp_send_nmi_allbutself(void)
166 send_IPI_allbutself(APIC_DM_NMI
);
169 static void nmi_shootdown_cpus(void)
172 atomic_set(&waiting_for_crash_ipi
, num_online_cpus() - 1);
174 /* Would it be better to replace the trap vector here? */
175 set_nmi_callback(crash_nmi_callback
);
176 /* Ensure the new callback function is set before sending
181 smp_send_nmi_allbutself();
183 msecs
= 1000; /* Wait at most a second for the other cpus to stop */
184 while ((atomic_read(&waiting_for_crash_ipi
) > 0) && msecs
) {
189 /* Leave the nmi callback set */
190 disable_local_APIC();
193 static void nmi_shootdown_cpus(void)
195 /* There are no cpus to shootdown */
199 void machine_crash_shutdown(struct pt_regs
*regs
)
201 /* This function is only called after the system
202 * has paniced or is otherwise in a critical state.
203 * The minimum amount of code to allow a kexec'd kernel
204 * to run successfully needs to happen here.
206 * In practice this means shooting down the other cpus in
209 /* The kernel is broken so disable interrupts */
212 /* Make a note of crashing cpu. Will be used in NMI callback.*/
213 crashing_cpu
= smp_processor_id();
214 nmi_shootdown_cpus();
216 #if defined(CONFIG_X86_IO_APIC)
219 crash_save_self(regs
);