2 * PPC64 code to handle Linux booting another kernel.
4 * Copyright (C) 2004-2005, IBM Corp.
6 * Created by: Milton D Miller II
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
13 #include <linux/kexec.h>
14 #include <linux/smp.h>
15 #include <linux/thread_info.h>
16 #include <linux/init_task.h>
17 #include <linux/errno.h>
18 #include <linux/cpu.h>
21 #include <asm/current.h>
22 #include <asm/machdep.h>
23 #include <asm/cacheflush.h>
26 #include <asm/sections.h> /* _end */
30 int default_machine_kexec_prepare(struct kimage
*image
)
33 unsigned long begin
, end
; /* limits of segment */
34 unsigned long low
, high
; /* limits of blocked memory range */
35 struct device_node
*node
;
36 const unsigned long *basep
;
37 const unsigned int *sizep
;
39 if (!ppc_md
.hpte_clear_all
)
43 * Since we use the kernel fault handlers and paging code to
44 * handle the virtual mode, we must make sure no destination
45 * overlaps kernel static data or bss.
47 for (i
= 0; i
< image
->nr_segments
; i
++)
48 if (image
->segment
[i
].mem
< __pa(_end
))
52 * For non-LPAR, we absolutely can not overwrite the mmu hash
53 * table, since we are still using the bolted entries in it to
54 * do the copy. Check that here.
56 * It is safe if the end is below the start of the blocked
57 * region (end <= low), or if the beginning is after the
58 * end of the blocked region (begin >= high). Use the
59 * boolean identity !(a || b) === (!a && !b).
62 low
= __pa(htab_address
);
63 high
= low
+ htab_size_bytes
;
65 for (i
= 0; i
< image
->nr_segments
; i
++) {
66 begin
= image
->segment
[i
].mem
;
67 end
= begin
+ image
->segment
[i
].memsz
;
69 if ((begin
< high
) && (end
> low
))
74 /* We also should not overwrite the tce tables */
75 for (node
= of_find_node_by_type(NULL
, "pci"); node
!= NULL
;
76 node
= of_find_node_by_type(node
, "pci")) {
77 basep
= of_get_property(node
, "linux,tce-base", NULL
);
78 sizep
= of_get_property(node
, "linux,tce-size", NULL
);
79 if (basep
== NULL
|| sizep
== NULL
)
83 high
= low
+ (*sizep
);
85 for (i
= 0; i
< image
->nr_segments
; i
++) {
86 begin
= image
->segment
[i
].mem
;
87 end
= begin
+ image
->segment
[i
].memsz
;
89 if ((begin
< high
) && (end
> low
))
97 #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
99 static void copy_segments(unsigned long ind
)
107 * We rely on kexec_load to create a lists that properly
108 * initializes these pointers before they are used.
109 * We will still crash if the list is wrong, but at least
110 * the compiler will be quiet.
115 for (entry
= ind
; !(entry
& IND_DONE
); entry
= *ptr
++) {
116 addr
= __va(entry
& PAGE_MASK
);
118 switch (entry
& IND_FLAGS
) {
119 case IND_DESTINATION
:
122 case IND_INDIRECTION
:
126 copy_page(dest
, addr
);
132 void kexec_copy_flush(struct kimage
*image
)
134 long i
, nr_segments
= image
->nr_segments
;
135 struct kexec_segment ranges
[KEXEC_SEGMENT_MAX
];
137 /* save the ranges on the stack to efficiently flush the icache */
138 memcpy(ranges
, image
->segment
, sizeof(ranges
));
141 * After this call we may not use anything allocated in dynamic
142 * memory, including *image.
144 * Only globals and the stack are allowed.
146 copy_segments(image
->head
);
149 * we need to clear the icache for all dest pages sometime,
150 * including ones that were in place on the original copy
152 for (i
= 0; i
< nr_segments
; i
++)
153 flush_icache_range((unsigned long)__va(ranges
[i
].mem
),
154 (unsigned long)__va(ranges
[i
].mem
+ ranges
[i
].memsz
));
159 /* FIXME: we should schedule this function to be called on all cpus based
160 * on calling the interrupts, but we would like to call it off irq level
161 * so that the interrupt controller is clean.
163 static void kexec_smp_down(void *arg
)
165 if (ppc_md
.kexec_cpu_down
)
166 ppc_md
.kexec_cpu_down(0, 1);
174 * We need to make sure each present CPU is online. The next kernel will scan
175 * the device tree and assume primary threads are online and query secondary
176 * threads via RTAS to online them if required. If we don't online primary
177 * threads, they will be stuck. However, we also online secondary threads as we
178 * may be using 'cede offline'. In this case RTAS doesn't see the secondary
179 * threads as offline -- and again, these CPUs will be stuck.
181 * So, we online all CPUs that should be running, including secondary threads.
183 static void wake_offline_cpus(void)
187 for_each_present_cpu(cpu
) {
188 if (!cpu_online(cpu
)) {
189 printk(KERN_INFO
"kexec: Waking offline cpu %d.\n",
196 static void kexec_prepare_cpus(void)
198 int my_cpu
, i
, notified
=-1;
201 smp_call_function(kexec_smp_down
, NULL
, /* wait */0);
204 /* check the others cpus are now down (via paca hw cpu id == -1) */
205 for (i
=0; i
< NR_CPUS
; i
++) {
209 while (paca
[i
].hw_cpu_id
!= -1) {
211 if (!cpu_possible(i
)) {
212 printk("kexec: cpu %d hw_cpu_id %d is not"
213 " possible, ignoring\n",
214 i
, paca
[i
].hw_cpu_id
);
217 if (!cpu_online(i
)) {
218 /* Fixme: this can be spinning in
219 * pSeries_secondary_wait with a paca
220 * waiting for it to go online.
222 printk("kexec: cpu %d hw_cpu_id %d is not"
223 " online, ignoring\n",
224 i
, paca
[i
].hw_cpu_id
);
228 printk( "kexec: waiting for cpu %d (physical"
230 i
, paca
[i
].hw_cpu_id
);
236 /* after we tell the others to go down */
237 if (ppc_md
.kexec_cpu_down
)
238 ppc_md
.kexec_cpu_down(0, 0);
247 static void kexec_prepare_cpus(void)
250 * move the secondarys to us so that we can copy
251 * the new kernel 0-0x100 safely
253 * do this if kexec in setup.c ?
255 * We need to release the cpus if we are ever going from an
256 * UP to an SMP kernel.
259 if (ppc_md
.kexec_cpu_down
)
260 ppc_md
.kexec_cpu_down(0, 0);
267 * kexec thread structure and stack.
269 * We need to make sure that this is 16384-byte aligned due to the
270 * way process stacks are handled. It also must be statically allocated
271 * or allocated as part of the kimage, because everything else may be
272 * overwritten when we copy the kexec image. We piggyback on the
273 * "init_task" linker section here to statically allocate a stack.
275 * We could use a smaller stack if we don't care about anything using
276 * current, but that audit has not been performed.
278 static union thread_union kexec_stack __init_task_data
=
281 /* Our assembly helper, in kexec_stub.S */
282 extern NORET_TYPE
void kexec_sequence(void *newstack
, unsigned long start
,
283 void *image
, void *control
,
284 void (*clear_all
)(void)) ATTRIB_NORET
;
286 /* too late to fail here */
287 void default_machine_kexec(struct kimage
*image
)
289 /* prepare control code if any */
292 * If the kexec boot is the normal one, need to shutdown other cpus
293 * into our wait loop and quiesce interrupts.
294 * Otherwise, in the case of crashed mode (crashing_cpu >= 0),
295 * stopping other CPUs and collecting their pt_regs is done before
296 * using debugger IPI.
299 if (crashing_cpu
== -1)
300 kexec_prepare_cpus();
302 /* switch to a staticly allocated stack. Based on irq stack code.
303 * XXX: the task struct will likely be invalid once we do the copy!
305 kexec_stack
.thread_info
.task
= current_thread_info()->task
;
306 kexec_stack
.thread_info
.flags
= 0;
308 /* Some things are best done in assembly. Finding globals with
309 * a toc is easier in C, so pass in what we can.
311 kexec_sequence(&kexec_stack
, image
->start
, image
,
312 page_address(image
->control_code_page
),
313 ppc_md
.hpte_clear_all
);
317 /* Values we need to export to the second kernel via the device tree. */
318 static unsigned long htab_base
;
320 static struct property htab_base_prop
= {
321 .name
= "linux,htab-base",
322 .length
= sizeof(unsigned long),
326 static struct property htab_size_prop
= {
327 .name
= "linux,htab-size",
328 .length
= sizeof(unsigned long),
329 .value
= &htab_size_bytes
,
332 static int __init
export_htab_values(void)
334 struct device_node
*node
;
335 struct property
*prop
;
337 /* On machines with no htab htab_address is NULL */
341 node
= of_find_node_by_path("/chosen");
345 /* remove any stale propertys so ours can be found */
346 prop
= of_find_property(node
, htab_base_prop
.name
, NULL
);
348 prom_remove_property(node
, prop
);
349 prop
= of_find_property(node
, htab_size_prop
.name
, NULL
);
351 prom_remove_property(node
, prop
);
353 htab_base
= __pa(htab_address
);
354 prom_add_property(node
, &htab_base_prop
);
355 prom_add_property(node
, &htab_size_prop
);
360 late_initcall(export_htab_values
);