2 * kexec.c - kexec_load system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/capability.h>
13 #include <linux/file.h>
14 #include <linux/security.h>
15 #include <linux/kexec.h>
16 #include <linux/mutex.h>
17 #include <linux/list.h>
18 #include <linux/syscalls.h>
19 #include <linux/vmalloc.h>
20 #include <linux/slab.h>
22 #include "kexec_internal.h"
24 static int copy_user_segment_list(struct kimage
*image
,
25 unsigned long nr_segments
,
26 struct kexec_segment __user
*segments
)
31 /* Read in the segments */
32 image
->nr_segments
= nr_segments
;
33 segment_bytes
= nr_segments
* sizeof(*segments
);
34 ret
= copy_from_user(image
->segment
, segments
, segment_bytes
);
41 static int kimage_alloc_init(struct kimage
**rimage
, unsigned long entry
,
42 unsigned long nr_segments
,
43 struct kexec_segment __user
*segments
,
48 bool kexec_on_panic
= flags
& KEXEC_ON_CRASH
;
51 /* Verify we have a valid entry point */
52 if ((entry
< phys_to_boot_phys(crashk_res
.start
)) ||
53 (entry
> phys_to_boot_phys(crashk_res
.end
)))
54 return -EADDRNOTAVAIL
;
57 /* Allocate and initialize a controlling structure */
58 image
= do_kimage_alloc_init();
64 ret
= copy_user_segment_list(image
, nr_segments
, segments
);
69 /* Enable special crash kernel control page alloc policy. */
70 image
->control_page
= crashk_res
.start
;
71 image
->type
= KEXEC_TYPE_CRASH
;
74 ret
= sanity_check_segment_list(image
);
79 * Find a location for the control code buffer, and add it
80 * the vector of segments so that it's pages will also be
81 * counted as destination pages.
84 image
->control_code_page
= kimage_alloc_control_pages(image
,
85 get_order(KEXEC_CONTROL_PAGE_SIZE
));
86 if (!image
->control_code_page
) {
87 pr_err("Could not allocate control_code_buffer\n");
91 if (!kexec_on_panic
) {
92 image
->swap_page
= kimage_alloc_control_pages(image
, 0);
93 if (!image
->swap_page
) {
94 pr_err("Could not allocate swap buffer\n");
95 goto out_free_control_pages
;
101 out_free_control_pages
:
102 kimage_free_page_list(&image
->control_pages
);
108 static int do_kexec_load(unsigned long entry
, unsigned long nr_segments
,
109 struct kexec_segment __user
*segments
, unsigned long flags
)
111 struct kimage
**dest_image
, *image
;
115 if (flags
& KEXEC_ON_CRASH
) {
116 dest_image
= &kexec_crash_image
;
117 if (kexec_crash_image
)
118 arch_kexec_unprotect_crashkres();
120 dest_image
= &kexec_image
;
123 if (nr_segments
== 0) {
124 /* Uninstall image */
125 kimage_free(xchg(dest_image
, NULL
));
128 if (flags
& KEXEC_ON_CRASH
) {
130 * Loading another kernel to switch to if this one
131 * crashes. Free any current crash dump kernel before
134 kimage_free(xchg(&kexec_crash_image
, NULL
));
137 ret
= kimage_alloc_init(&image
, entry
, nr_segments
, segments
, flags
);
141 if (flags
& KEXEC_PRESERVE_CONTEXT
)
142 image
->preserve_context
= 1;
144 ret
= machine_kexec_prepare(image
);
149 * Some architecture(like S390) may touch the crash memory before
150 * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
152 ret
= kimage_crash_copy_vmcoreinfo(image
);
156 for (i
= 0; i
< nr_segments
; i
++) {
157 ret
= kimage_load_segment(image
, &image
->segment
[i
]);
162 kimage_terminate(image
);
164 /* Install the new kernel and uninstall the old */
165 image
= xchg(dest_image
, image
);
168 if ((flags
& KEXEC_ON_CRASH
) && kexec_crash_image
)
169 arch_kexec_protect_crashkres();
176 * Exec Kernel system call: for obvious reasons only root may call it.
178 * This call breaks up into three pieces.
179 * - A generic part which loads the new kernel from the current
180 * address space, and very carefully places the data in the
183 * - A generic part that interacts with the kernel and tells all of
184 * the devices to shut down. Preventing on-going dmas, and placing
185 * the devices in a consistent state so a later kernel can
188 * - A machine specific part that includes the syscall number
189 * and then copies the image to it's final destination. And
190 * jumps into the image at entry.
192 * kexec does not sync, or unmount filesystems so if you need
193 * that to happen you need to do that yourself.
196 static inline int kexec_load_check(unsigned long nr_segments
,
201 /* We only trust the superuser with rebooting the system. */
202 if (!capable(CAP_SYS_BOOT
) || kexec_load_disabled
)
205 /* Permit LSMs and IMA to fail the kexec */
206 result
= security_kernel_load_data(LOADING_KEXEC_IMAGE
);
211 * Verify we have a legal set of flags
212 * This leaves us room for future extensions.
214 if ((flags
& KEXEC_FLAGS
) != (flags
& ~KEXEC_ARCH_MASK
))
217 /* Put an artificial cap on the number
218 * of segments passed to kexec_load.
220 if (nr_segments
> KEXEC_SEGMENT_MAX
)
226 SYSCALL_DEFINE4(kexec_load
, unsigned long, entry
, unsigned long, nr_segments
,
227 struct kexec_segment __user
*, segments
, unsigned long, flags
)
231 result
= kexec_load_check(nr_segments
, flags
);
235 /* Verify we are on the appropriate architecture */
236 if (((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH
) &&
237 ((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH_DEFAULT
))
240 /* Because we write directly to the reserved memory
241 * region when loading crash kernels we need a mutex here to
242 * prevent multiple crash kernels from attempting to load
243 * simultaneously, and to prevent a crash kernel from loading
244 * over the top of a in use crash kernel.
246 * KISS: always take the mutex.
248 if (!mutex_trylock(&kexec_mutex
))
251 result
= do_kexec_load(entry
, nr_segments
, segments
, flags
);
253 mutex_unlock(&kexec_mutex
);
259 COMPAT_SYSCALL_DEFINE4(kexec_load
, compat_ulong_t
, entry
,
260 compat_ulong_t
, nr_segments
,
261 struct compat_kexec_segment __user
*, segments
,
262 compat_ulong_t
, flags
)
264 struct compat_kexec_segment in
;
265 struct kexec_segment out
, __user
*ksegments
;
266 unsigned long i
, result
;
268 result
= kexec_load_check(nr_segments
, flags
);
272 /* Don't allow clients that don't understand the native
273 * architecture to do anything.
275 if ((flags
& KEXEC_ARCH_MASK
) == KEXEC_ARCH_DEFAULT
)
278 ksegments
= compat_alloc_user_space(nr_segments
* sizeof(out
));
279 for (i
= 0; i
< nr_segments
; i
++) {
280 result
= copy_from_user(&in
, &segments
[i
], sizeof(in
));
284 out
.buf
= compat_ptr(in
.buf
);
285 out
.bufsz
= in
.bufsz
;
287 out
.memsz
= in
.memsz
;
289 result
= copy_to_user(&ksegments
[i
], &out
, sizeof(out
));
294 /* Because we write directly to the reserved memory
295 * region when loading crash kernels we need a mutex here to
296 * prevent multiple crash kernels from attempting to load
297 * simultaneously, and to prevent a crash kernel from loading
298 * over the top of a in use crash kernel.
300 * KISS: always take the mutex.
302 if (!mutex_trylock(&kexec_mutex
))
305 result
= do_kexec_load(entry
, nr_segments
, ksegments
, flags
);
307 mutex_unlock(&kexec_mutex
);