2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
10 #include <linux/file.h>
11 #include <linux/slab.h>
13 #include <linux/kexec.h>
14 #include <linux/spinlock.h>
15 #include <linux/list.h>
16 #include <linux/highmem.h>
17 #include <linux/syscalls.h>
18 #include <linux/reboot.h>
19 #include <linux/syscalls.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
24 #include <asm/uaccess.h>
26 #include <asm/system.h>
27 #include <asm/semaphore.h>
29 /* Location of the reserved area for the crash kernel */
30 struct resource crashk_res
= {
31 .name
= "Crash kernel",
34 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
37 int kexec_should_crash(struct task_struct
*p
)
39 if (in_interrupt() || !p
->pid
|| p
->pid
== 1 || panic_on_oops
)
45 * When kexec transitions to the new kernel there is a one-to-one
46 * mapping between physical and virtual addresses. On processors
47 * where you can disable the MMU this is trivial, and easy. For
48 * others it is still a simple predictable page table to setup.
50 * In that environment kexec copies the new kernel to its final
51 * resting place. This means I can only support memory whose
52 * physical address can fit in an unsigned long. In particular
53 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
54 * If the assembly stub has more restrictive requirements
55 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
56 * defined more restrictively in <asm/kexec.h>.
58 * The code for the transition from the current kernel to the
59 * the new kernel is placed in the control_code_buffer, whose size
60 * is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single
61 * page of memory is necessary, but some architectures require more.
62 * Because this memory must be identity mapped in the transition from
63 * virtual to physical addresses it must live in the range
64 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
67 * The assembly stub in the control code buffer is passed a linked list
68 * of descriptor pages detailing the source pages of the new kernel,
69 * and the destination addresses of those source pages. As this data
70 * structure is not used in the context of the current OS, it must
73 * The code has been made to work with highmem pages and will use a
74 * destination page in its final resting place (if it happens
75 * to allocate it). The end product of this is that most of the
76 * physical address space, and most of RAM can be used.
78 * Future directions include:
79 * - allocating a page table with the control code buffer identity
80 * mapped, to simplify machine_kexec and make kexec_on_panic more
85 * KIMAGE_NO_DEST is an impossible destination address..., for
86 * allocating pages whose destination address we do not care about.
88 #define KIMAGE_NO_DEST (-1UL)
90 static int kimage_is_destination_range(struct kimage
*image
,
91 unsigned long start
, unsigned long end
);
92 static struct page
*kimage_alloc_page(struct kimage
*image
,
96 static int do_kimage_alloc(struct kimage
**rimage
, unsigned long entry
,
97 unsigned long nr_segments
,
98 struct kexec_segment __user
*segments
)
100 size_t segment_bytes
;
101 struct kimage
*image
;
105 /* Allocate a controlling structure */
107 image
= kmalloc(sizeof(*image
), GFP_KERNEL
);
111 memset(image
, 0, sizeof(*image
));
113 image
->entry
= &image
->head
;
114 image
->last_entry
= &image
->head
;
115 image
->control_page
= ~0; /* By default this does not apply */
116 image
->start
= entry
;
117 image
->type
= KEXEC_TYPE_DEFAULT
;
119 /* Initialize the list of control pages */
120 INIT_LIST_HEAD(&image
->control_pages
);
122 /* Initialize the list of destination pages */
123 INIT_LIST_HEAD(&image
->dest_pages
);
125 /* Initialize the list of unuseable pages */
126 INIT_LIST_HEAD(&image
->unuseable_pages
);
128 /* Read in the segments */
129 image
->nr_segments
= nr_segments
;
130 segment_bytes
= nr_segments
* sizeof(*segments
);
131 result
= copy_from_user(image
->segment
, segments
, segment_bytes
);
136 * Verify we have good destination addresses. The caller is
137 * responsible for making certain we don't attempt to load
138 * the new image into invalid or reserved areas of RAM. This
139 * just verifies it is an address we can use.
141 * Since the kernel does everything in page size chunks ensure
142 * the destination addreses are page aligned. Too many
143 * special cases crop of when we don't do this. The most
144 * insidious is getting overlapping destination addresses
145 * simply because addresses are changed to page size
148 result
= -EADDRNOTAVAIL
;
149 for (i
= 0; i
< nr_segments
; i
++) {
150 unsigned long mstart
, mend
;
152 mstart
= image
->segment
[i
].mem
;
153 mend
= mstart
+ image
->segment
[i
].memsz
;
154 if ((mstart
& ~PAGE_MASK
) || (mend
& ~PAGE_MASK
))
156 if (mend
>= KEXEC_DESTINATION_MEMORY_LIMIT
)
160 /* Verify our destination addresses do not overlap.
161 * If we alloed overlapping destination addresses
162 * through very weird things can happen with no
163 * easy explanation as one segment stops on another.
166 for (i
= 0; i
< nr_segments
; i
++) {
167 unsigned long mstart
, mend
;
170 mstart
= image
->segment
[i
].mem
;
171 mend
= mstart
+ image
->segment
[i
].memsz
;
172 for (j
= 0; j
< i
; j
++) {
173 unsigned long pstart
, pend
;
174 pstart
= image
->segment
[j
].mem
;
175 pend
= pstart
+ image
->segment
[j
].memsz
;
176 /* Do the segments overlap ? */
177 if ((mend
> pstart
) && (mstart
< pend
))
182 /* Ensure our buffer sizes are strictly less than
183 * our memory sizes. This should always be the case,
184 * and it is easier to check up front than to be surprised
188 for (i
= 0; i
< nr_segments
; i
++) {
189 if (image
->segment
[i
].bufsz
> image
->segment
[i
].memsz
)
204 static int kimage_normal_alloc(struct kimage
**rimage
, unsigned long entry
,
205 unsigned long nr_segments
,
206 struct kexec_segment __user
*segments
)
209 struct kimage
*image
;
211 /* Allocate and initialize a controlling structure */
213 result
= do_kimage_alloc(&image
, entry
, nr_segments
, segments
);
220 * Find a location for the control code buffer, and add it
221 * the vector of segments so that it's pages will also be
222 * counted as destination pages.
225 image
->control_code_page
= kimage_alloc_control_pages(image
,
226 get_order(KEXEC_CONTROL_CODE_SIZE
));
227 if (!image
->control_code_page
) {
228 printk(KERN_ERR
"Could not allocate control_code_buffer\n");
242 static int kimage_crash_alloc(struct kimage
**rimage
, unsigned long entry
,
243 unsigned long nr_segments
,
244 struct kexec_segment __user
*segments
)
247 struct kimage
*image
;
251 /* Verify we have a valid entry point */
252 if ((entry
< crashk_res
.start
) || (entry
> crashk_res
.end
)) {
253 result
= -EADDRNOTAVAIL
;
257 /* Allocate and initialize a controlling structure */
258 result
= do_kimage_alloc(&image
, entry
, nr_segments
, segments
);
262 /* Enable the special crash kernel control page
265 image
->control_page
= crashk_res
.start
;
266 image
->type
= KEXEC_TYPE_CRASH
;
269 * Verify we have good destination addresses. Normally
270 * the caller is responsible for making certain we don't
271 * attempt to load the new image into invalid or reserved
272 * areas of RAM. But crash kernels are preloaded into a
273 * reserved area of ram. We must ensure the addresses
274 * are in the reserved area otherwise preloading the
275 * kernel could corrupt things.
277 result
= -EADDRNOTAVAIL
;
278 for (i
= 0; i
< nr_segments
; i
++) {
279 unsigned long mstart
, mend
;
281 mstart
= image
->segment
[i
].mem
;
282 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
283 /* Ensure we are within the crash kernel limits */
284 if ((mstart
< crashk_res
.start
) || (mend
> crashk_res
.end
))
289 * Find a location for the control code buffer, and add
290 * the vector of segments so that it's pages will also be
291 * counted as destination pages.
294 image
->control_code_page
= kimage_alloc_control_pages(image
,
295 get_order(KEXEC_CONTROL_CODE_SIZE
));
296 if (!image
->control_code_page
) {
297 printk(KERN_ERR
"Could not allocate control_code_buffer\n");
311 static int kimage_is_destination_range(struct kimage
*image
,
317 for (i
= 0; i
< image
->nr_segments
; i
++) {
318 unsigned long mstart
, mend
;
320 mstart
= image
->segment
[i
].mem
;
321 mend
= mstart
+ image
->segment
[i
].memsz
;
322 if ((end
> mstart
) && (start
< mend
))
329 static struct page
*kimage_alloc_pages(gfp_t gfp_mask
, unsigned int order
)
333 pages
= alloc_pages(gfp_mask
, order
);
335 unsigned int count
, i
;
336 pages
->mapping
= NULL
;
337 set_page_private(pages
, order
);
339 for (i
= 0; i
< count
; i
++)
340 SetPageReserved(pages
+ i
);
346 static void kimage_free_pages(struct page
*page
)
348 unsigned int order
, count
, i
;
350 order
= page_private(page
);
352 for (i
= 0; i
< count
; i
++)
353 ClearPageReserved(page
+ i
);
354 __free_pages(page
, order
);
357 static void kimage_free_page_list(struct list_head
*list
)
359 struct list_head
*pos
, *next
;
361 list_for_each_safe(pos
, next
, list
) {
364 page
= list_entry(pos
, struct page
, lru
);
365 list_del(&page
->lru
);
366 kimage_free_pages(page
);
370 static struct page
*kimage_alloc_normal_control_pages(struct kimage
*image
,
373 /* Control pages are special, they are the intermediaries
374 * that are needed while we copy the rest of the pages
375 * to their final resting place. As such they must
376 * not conflict with either the destination addresses
377 * or memory the kernel is already using.
379 * The only case where we really need more than one of
380 * these are for architectures where we cannot disable
381 * the MMU and must instead generate an identity mapped
382 * page table for all of the memory.
384 * At worst this runs in O(N) of the image size.
386 struct list_head extra_pages
;
391 INIT_LIST_HEAD(&extra_pages
);
393 /* Loop while I can allocate a page and the page allocated
394 * is a destination page.
397 unsigned long pfn
, epfn
, addr
, eaddr
;
399 pages
= kimage_alloc_pages(GFP_KERNEL
, order
);
402 pfn
= page_to_pfn(pages
);
404 addr
= pfn
<< PAGE_SHIFT
;
405 eaddr
= epfn
<< PAGE_SHIFT
;
406 if ((epfn
>= (KEXEC_CONTROL_MEMORY_LIMIT
>> PAGE_SHIFT
)) ||
407 kimage_is_destination_range(image
, addr
, eaddr
)) {
408 list_add(&pages
->lru
, &extra_pages
);
414 /* Remember the allocated page... */
415 list_add(&pages
->lru
, &image
->control_pages
);
417 /* Because the page is already in it's destination
418 * location we will never allocate another page at
419 * that address. Therefore kimage_alloc_pages
420 * will not return it (again) and we don't need
421 * to give it an entry in image->segment[].
424 /* Deal with the destination pages I have inadvertently allocated.
426 * Ideally I would convert multi-page allocations into single
427 * page allocations, and add everyting to image->dest_pages.
429 * For now it is simpler to just free the pages.
431 kimage_free_page_list(&extra_pages
);
436 static struct page
*kimage_alloc_crash_control_pages(struct kimage
*image
,
439 /* Control pages are special, they are the intermediaries
440 * that are needed while we copy the rest of the pages
441 * to their final resting place. As such they must
442 * not conflict with either the destination addresses
443 * or memory the kernel is already using.
445 * Control pages are also the only pags we must allocate
446 * when loading a crash kernel. All of the other pages
447 * are specified by the segments and we just memcpy
448 * into them directly.
450 * The only case where we really need more than one of
451 * these are for architectures where we cannot disable
452 * the MMU and must instead generate an identity mapped
453 * page table for all of the memory.
455 * Given the low demand this implements a very simple
456 * allocator that finds the first hole of the appropriate
457 * size in the reserved memory region, and allocates all
458 * of the memory up to and including the hole.
460 unsigned long hole_start
, hole_end
, size
;
464 size
= (1 << order
) << PAGE_SHIFT
;
465 hole_start
= (image
->control_page
+ (size
- 1)) & ~(size
- 1);
466 hole_end
= hole_start
+ size
- 1;
467 while (hole_end
<= crashk_res
.end
) {
470 if (hole_end
> KEXEC_CONTROL_MEMORY_LIMIT
)
472 if (hole_end
> crashk_res
.end
)
474 /* See if I overlap any of the segments */
475 for (i
= 0; i
< image
->nr_segments
; i
++) {
476 unsigned long mstart
, mend
;
478 mstart
= image
->segment
[i
].mem
;
479 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
480 if ((hole_end
>= mstart
) && (hole_start
<= mend
)) {
481 /* Advance the hole to the end of the segment */
482 hole_start
= (mend
+ (size
- 1)) & ~(size
- 1);
483 hole_end
= hole_start
+ size
- 1;
487 /* If I don't overlap any segments I have found my hole! */
488 if (i
== image
->nr_segments
) {
489 pages
= pfn_to_page(hole_start
>> PAGE_SHIFT
);
494 image
->control_page
= hole_end
;
500 struct page
*kimage_alloc_control_pages(struct kimage
*image
,
503 struct page
*pages
= NULL
;
505 switch (image
->type
) {
506 case KEXEC_TYPE_DEFAULT
:
507 pages
= kimage_alloc_normal_control_pages(image
, order
);
509 case KEXEC_TYPE_CRASH
:
510 pages
= kimage_alloc_crash_control_pages(image
, order
);
517 static int kimage_add_entry(struct kimage
*image
, kimage_entry_t entry
)
519 if (*image
->entry
!= 0)
522 if (image
->entry
== image
->last_entry
) {
523 kimage_entry_t
*ind_page
;
526 page
= kimage_alloc_page(image
, GFP_KERNEL
, KIMAGE_NO_DEST
);
530 ind_page
= page_address(page
);
531 *image
->entry
= virt_to_phys(ind_page
) | IND_INDIRECTION
;
532 image
->entry
= ind_page
;
533 image
->last_entry
= ind_page
+
534 ((PAGE_SIZE
/sizeof(kimage_entry_t
)) - 1);
536 *image
->entry
= entry
;
543 static int kimage_set_destination(struct kimage
*image
,
544 unsigned long destination
)
548 destination
&= PAGE_MASK
;
549 result
= kimage_add_entry(image
, destination
| IND_DESTINATION
);
551 image
->destination
= destination
;
557 static int kimage_add_page(struct kimage
*image
, unsigned long page
)
562 result
= kimage_add_entry(image
, page
| IND_SOURCE
);
564 image
->destination
+= PAGE_SIZE
;
570 static void kimage_free_extra_pages(struct kimage
*image
)
572 /* Walk through and free any extra destination pages I may have */
573 kimage_free_page_list(&image
->dest_pages
);
575 /* Walk through and free any unuseable pages I have cached */
576 kimage_free_page_list(&image
->unuseable_pages
);
579 static int kimage_terminate(struct kimage
*image
)
581 if (*image
->entry
!= 0)
584 *image
->entry
= IND_DONE
;
589 #define for_each_kimage_entry(image, ptr, entry) \
590 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
591 ptr = (entry & IND_INDIRECTION)? \
592 phys_to_virt((entry & PAGE_MASK)): ptr +1)
594 static void kimage_free_entry(kimage_entry_t entry
)
598 page
= pfn_to_page(entry
>> PAGE_SHIFT
);
599 kimage_free_pages(page
);
602 static void kimage_free(struct kimage
*image
)
604 kimage_entry_t
*ptr
, entry
;
605 kimage_entry_t ind
= 0;
610 kimage_free_extra_pages(image
);
611 for_each_kimage_entry(image
, ptr
, entry
) {
612 if (entry
& IND_INDIRECTION
) {
613 /* Free the previous indirection page */
614 if (ind
& IND_INDIRECTION
)
615 kimage_free_entry(ind
);
616 /* Save this indirection page until we are
621 else if (entry
& IND_SOURCE
)
622 kimage_free_entry(entry
);
624 /* Free the final indirection page */
625 if (ind
& IND_INDIRECTION
)
626 kimage_free_entry(ind
);
628 /* Handle any machine specific cleanup */
629 machine_kexec_cleanup(image
);
631 /* Free the kexec control pages... */
632 kimage_free_page_list(&image
->control_pages
);
636 static kimage_entry_t
*kimage_dst_used(struct kimage
*image
,
639 kimage_entry_t
*ptr
, entry
;
640 unsigned long destination
= 0;
642 for_each_kimage_entry(image
, ptr
, entry
) {
643 if (entry
& IND_DESTINATION
)
644 destination
= entry
& PAGE_MASK
;
645 else if (entry
& IND_SOURCE
) {
646 if (page
== destination
)
648 destination
+= PAGE_SIZE
;
655 static struct page
*kimage_alloc_page(struct kimage
*image
,
657 unsigned long destination
)
660 * Here we implement safeguards to ensure that a source page
661 * is not copied to its destination page before the data on
662 * the destination page is no longer useful.
664 * To do this we maintain the invariant that a source page is
665 * either its own destination page, or it is not a
666 * destination page at all.
668 * That is slightly stronger than required, but the proof
669 * that no problems will not occur is trivial, and the
670 * implementation is simply to verify.
672 * When allocating all pages normally this algorithm will run
673 * in O(N) time, but in the worst case it will run in O(N^2)
674 * time. If the runtime is a problem the data structures can
681 * Walk through the list of destination pages, and see if I
684 list_for_each_entry(page
, &image
->dest_pages
, lru
) {
685 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
686 if (addr
== destination
) {
687 list_del(&page
->lru
);
695 /* Allocate a page, if we run out of memory give up */
696 page
= kimage_alloc_pages(gfp_mask
, 0);
699 /* If the page cannot be used file it away */
700 if (page_to_pfn(page
) >
701 (KEXEC_SOURCE_MEMORY_LIMIT
>> PAGE_SHIFT
)) {
702 list_add(&page
->lru
, &image
->unuseable_pages
);
705 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
707 /* If it is the destination page we want use it */
708 if (addr
== destination
)
711 /* If the page is not a destination page use it */
712 if (!kimage_is_destination_range(image
, addr
,
717 * I know that the page is someones destination page.
718 * See if there is already a source page for this
719 * destination page. And if so swap the source pages.
721 old
= kimage_dst_used(image
, addr
);
724 unsigned long old_addr
;
725 struct page
*old_page
;
727 old_addr
= *old
& PAGE_MASK
;
728 old_page
= pfn_to_page(old_addr
>> PAGE_SHIFT
);
729 copy_highpage(page
, old_page
);
730 *old
= addr
| (*old
& ~PAGE_MASK
);
732 /* The old page I have found cannot be a
733 * destination page, so return it.
740 /* Place the page on the destination list I
743 list_add(&page
->lru
, &image
->dest_pages
);
750 static int kimage_load_normal_segment(struct kimage
*image
,
751 struct kexec_segment
*segment
)
754 unsigned long ubytes
, mbytes
;
756 unsigned char __user
*buf
;
760 ubytes
= segment
->bufsz
;
761 mbytes
= segment
->memsz
;
762 maddr
= segment
->mem
;
764 result
= kimage_set_destination(image
, maddr
);
771 size_t uchunk
, mchunk
;
773 page
= kimage_alloc_page(image
, GFP_HIGHUSER
, maddr
);
778 result
= kimage_add_page(image
, page_to_pfn(page
)
784 /* Start with a clear page */
785 memset(ptr
, 0, PAGE_SIZE
);
786 ptr
+= maddr
& ~PAGE_MASK
;
787 mchunk
= PAGE_SIZE
- (maddr
& ~PAGE_MASK
);
795 result
= copy_from_user(ptr
, buf
, uchunk
);
798 result
= (result
< 0) ? result
: -EIO
;
810 static int kimage_load_crash_segment(struct kimage
*image
,
811 struct kexec_segment
*segment
)
813 /* For crash dumps kernels we simply copy the data from
814 * user space to it's destination.
815 * We do things a page at a time for the sake of kmap.
818 unsigned long ubytes
, mbytes
;
820 unsigned char __user
*buf
;
824 ubytes
= segment
->bufsz
;
825 mbytes
= segment
->memsz
;
826 maddr
= segment
->mem
;
830 size_t uchunk
, mchunk
;
832 page
= pfn_to_page(maddr
>> PAGE_SHIFT
);
838 ptr
+= maddr
& ~PAGE_MASK
;
839 mchunk
= PAGE_SIZE
- (maddr
& ~PAGE_MASK
);
844 if (uchunk
> ubytes
) {
846 /* Zero the trailing part of the page */
847 memset(ptr
+ uchunk
, 0, mchunk
- uchunk
);
849 result
= copy_from_user(ptr
, buf
, uchunk
);
852 result
= (result
< 0) ? result
: -EIO
;
864 static int kimage_load_segment(struct kimage
*image
,
865 struct kexec_segment
*segment
)
867 int result
= -ENOMEM
;
869 switch (image
->type
) {
870 case KEXEC_TYPE_DEFAULT
:
871 result
= kimage_load_normal_segment(image
, segment
);
873 case KEXEC_TYPE_CRASH
:
874 result
= kimage_load_crash_segment(image
, segment
);
882 * Exec Kernel system call: for obvious reasons only root may call it.
884 * This call breaks up into three pieces.
885 * - A generic part which loads the new kernel from the current
886 * address space, and very carefully places the data in the
889 * - A generic part that interacts with the kernel and tells all of
890 * the devices to shut down. Preventing on-going dmas, and placing
891 * the devices in a consistent state so a later kernel can
894 * - A machine specific part that includes the syscall number
895 * and the copies the image to it's final destination. And
896 * jumps into the image at entry.
898 * kexec does not sync, or unmount filesystems so if you need
899 * that to happen you need to do that yourself.
901 struct kimage
*kexec_image
= NULL
;
902 static struct kimage
*kexec_crash_image
= NULL
;
904 * A home grown binary mutex.
905 * Nothing can wait so this mutex is safe to use
906 * in interrupt context :)
908 static int kexec_lock
= 0;
910 asmlinkage
long sys_kexec_load(unsigned long entry
, unsigned long nr_segments
,
911 struct kexec_segment __user
*segments
,
914 struct kimage
**dest_image
, *image
;
918 /* We only trust the superuser with rebooting the system. */
919 if (!capable(CAP_SYS_BOOT
))
923 * Verify we have a legal set of flags
924 * This leaves us room for future extensions.
926 if ((flags
& KEXEC_FLAGS
) != (flags
& ~KEXEC_ARCH_MASK
))
929 /* Verify we are on the appropriate architecture */
930 if (((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH
) &&
931 ((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH_DEFAULT
))
934 /* Put an artificial cap on the number
935 * of segments passed to kexec_load.
937 if (nr_segments
> KEXEC_SEGMENT_MAX
)
943 /* Because we write directly to the reserved memory
944 * region when loading crash kernels we need a mutex here to
945 * prevent multiple crash kernels from attempting to load
946 * simultaneously, and to prevent a crash kernel from loading
947 * over the top of a in use crash kernel.
949 * KISS: always take the mutex.
951 locked
= xchg(&kexec_lock
, 1);
955 dest_image
= &kexec_image
;
956 if (flags
& KEXEC_ON_CRASH
)
957 dest_image
= &kexec_crash_image
;
958 if (nr_segments
> 0) {
961 /* Loading another kernel to reboot into */
962 if ((flags
& KEXEC_ON_CRASH
) == 0)
963 result
= kimage_normal_alloc(&image
, entry
,
964 nr_segments
, segments
);
965 /* Loading another kernel to switch to if this one crashes */
966 else if (flags
& KEXEC_ON_CRASH
) {
967 /* Free any current crash dump kernel before
970 kimage_free(xchg(&kexec_crash_image
, NULL
));
971 result
= kimage_crash_alloc(&image
, entry
,
972 nr_segments
, segments
);
977 result
= machine_kexec_prepare(image
);
981 for (i
= 0; i
< nr_segments
; i
++) {
982 result
= kimage_load_segment(image
, &image
->segment
[i
]);
986 result
= kimage_terminate(image
);
990 /* Install the new kernel, and Uninstall the old */
991 image
= xchg(dest_image
, image
);
994 xchg(&kexec_lock
, 0); /* Release the mutex */
1000 #ifdef CONFIG_COMPAT
1001 asmlinkage
long compat_sys_kexec_load(unsigned long entry
,
1002 unsigned long nr_segments
,
1003 struct compat_kexec_segment __user
*segments
,
1004 unsigned long flags
)
1006 struct compat_kexec_segment in
;
1007 struct kexec_segment out
, __user
*ksegments
;
1008 unsigned long i
, result
;
1010 /* Don't allow clients that don't understand the native
1011 * architecture to do anything.
1013 if ((flags
& KEXEC_ARCH_MASK
) == KEXEC_ARCH_DEFAULT
)
1016 if (nr_segments
> KEXEC_SEGMENT_MAX
)
1019 ksegments
= compat_alloc_user_space(nr_segments
* sizeof(out
));
1020 for (i
=0; i
< nr_segments
; i
++) {
1021 result
= copy_from_user(&in
, &segments
[i
], sizeof(in
));
1025 out
.buf
= compat_ptr(in
.buf
);
1026 out
.bufsz
= in
.bufsz
;
1028 out
.memsz
= in
.memsz
;
1030 result
= copy_to_user(&ksegments
[i
], &out
, sizeof(out
));
1035 return sys_kexec_load(entry
, nr_segments
, ksegments
, flags
);
1039 void crash_kexec(struct pt_regs
*regs
)
1041 struct kimage
*image
;
1045 /* Take the kexec_lock here to prevent sys_kexec_load
1046 * running on one cpu from replacing the crash kernel
1047 * we are using after a panic on a different cpu.
1049 * If the crash kernel was not located in a fixed area
1050 * of memory the xchg(&kexec_crash_image) would be
1051 * sufficient. But since I reuse the memory...
1053 locked
= xchg(&kexec_lock
, 1);
1055 image
= xchg(&kexec_crash_image
, NULL
);
1057 machine_crash_shutdown(regs
);
1058 machine_kexec(image
);
1060 xchg(&kexec_lock
, 0);