2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #include <linux/capability.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
14 #include <linux/kexec.h>
15 #include <linux/spinlock.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/utsrelease.h>
25 #include <linux/utsname.h>
26 #include <linux/numa.h>
29 #include <asm/uaccess.h>
31 #include <asm/system.h>
32 #include <asm/sections.h>
34 /* Per cpu memory for storing cpu states in case of system crash. */
35 note_buf_t
* crash_notes
;
37 /* vmcoreinfo stuff */
38 unsigned char vmcoreinfo_data
[VMCOREINFO_BYTES
];
39 u32 vmcoreinfo_note
[VMCOREINFO_NOTE_SIZE
/4];
40 size_t vmcoreinfo_size
;
41 size_t vmcoreinfo_max_size
= sizeof(vmcoreinfo_data
);
43 /* Location of the reserved area for the crash kernel */
44 struct resource crashk_res
= {
45 .name
= "Crash kernel",
48 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
51 int kexec_should_crash(struct task_struct
*p
)
53 if (in_interrupt() || !p
->pid
|| is_global_init(p
) || panic_on_oops
)
59 * When kexec transitions to the new kernel there is a one-to-one
60 * mapping between physical and virtual addresses. On processors
61 * where you can disable the MMU this is trivial, and easy. For
62 * others it is still a simple predictable page table to setup.
64 * In that environment kexec copies the new kernel to its final
65 * resting place. This means I can only support memory whose
66 * physical address can fit in an unsigned long. In particular
67 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
68 * If the assembly stub has more restrictive requirements
69 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
70 * defined more restrictively in <asm/kexec.h>.
72 * The code for the transition from the current kernel to the
73 * the new kernel is placed in the control_code_buffer, whose size
74 * is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single
75 * page of memory is necessary, but some architectures require more.
76 * Because this memory must be identity mapped in the transition from
77 * virtual to physical addresses it must live in the range
78 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
81 * The assembly stub in the control code buffer is passed a linked list
82 * of descriptor pages detailing the source pages of the new kernel,
83 * and the destination addresses of those source pages. As this data
84 * structure is not used in the context of the current OS, it must
87 * The code has been made to work with highmem pages and will use a
88 * destination page in its final resting place (if it happens
89 * to allocate it). The end product of this is that most of the
90 * physical address space, and most of RAM can be used.
92 * Future directions include:
93 * - allocating a page table with the control code buffer identity
94 * mapped, to simplify machine_kexec and make kexec_on_panic more
99 * KIMAGE_NO_DEST is an impossible destination address..., for
100 * allocating pages whose destination address we do not care about.
102 #define KIMAGE_NO_DEST (-1UL)
104 static int kimage_is_destination_range(struct kimage
*image
,
105 unsigned long start
, unsigned long end
);
106 static struct page
*kimage_alloc_page(struct kimage
*image
,
110 static int do_kimage_alloc(struct kimage
**rimage
, unsigned long entry
,
111 unsigned long nr_segments
,
112 struct kexec_segment __user
*segments
)
114 size_t segment_bytes
;
115 struct kimage
*image
;
119 /* Allocate a controlling structure */
121 image
= kzalloc(sizeof(*image
), GFP_KERNEL
);
126 image
->entry
= &image
->head
;
127 image
->last_entry
= &image
->head
;
128 image
->control_page
= ~0; /* By default this does not apply */
129 image
->start
= entry
;
130 image
->type
= KEXEC_TYPE_DEFAULT
;
132 /* Initialize the list of control pages */
133 INIT_LIST_HEAD(&image
->control_pages
);
135 /* Initialize the list of destination pages */
136 INIT_LIST_HEAD(&image
->dest_pages
);
138 /* Initialize the list of unuseable pages */
139 INIT_LIST_HEAD(&image
->unuseable_pages
);
141 /* Read in the segments */
142 image
->nr_segments
= nr_segments
;
143 segment_bytes
= nr_segments
* sizeof(*segments
);
144 result
= copy_from_user(image
->segment
, segments
, segment_bytes
);
149 * Verify we have good destination addresses. The caller is
150 * responsible for making certain we don't attempt to load
151 * the new image into invalid or reserved areas of RAM. This
152 * just verifies it is an address we can use.
154 * Since the kernel does everything in page size chunks ensure
155 * the destination addreses are page aligned. Too many
156 * special cases crop of when we don't do this. The most
157 * insidious is getting overlapping destination addresses
158 * simply because addresses are changed to page size
161 result
= -EADDRNOTAVAIL
;
162 for (i
= 0; i
< nr_segments
; i
++) {
163 unsigned long mstart
, mend
;
165 mstart
= image
->segment
[i
].mem
;
166 mend
= mstart
+ image
->segment
[i
].memsz
;
167 if ((mstart
& ~PAGE_MASK
) || (mend
& ~PAGE_MASK
))
169 if (mend
>= KEXEC_DESTINATION_MEMORY_LIMIT
)
173 /* Verify our destination addresses do not overlap.
174 * If we alloed overlapping destination addresses
175 * through very weird things can happen with no
176 * easy explanation as one segment stops on another.
179 for (i
= 0; i
< nr_segments
; i
++) {
180 unsigned long mstart
, mend
;
183 mstart
= image
->segment
[i
].mem
;
184 mend
= mstart
+ image
->segment
[i
].memsz
;
185 for (j
= 0; j
< i
; j
++) {
186 unsigned long pstart
, pend
;
187 pstart
= image
->segment
[j
].mem
;
188 pend
= pstart
+ image
->segment
[j
].memsz
;
189 /* Do the segments overlap ? */
190 if ((mend
> pstart
) && (mstart
< pend
))
195 /* Ensure our buffer sizes are strictly less than
196 * our memory sizes. This should always be the case,
197 * and it is easier to check up front than to be surprised
201 for (i
= 0; i
< nr_segments
; i
++) {
202 if (image
->segment
[i
].bufsz
> image
->segment
[i
].memsz
)
217 static int kimage_normal_alloc(struct kimage
**rimage
, unsigned long entry
,
218 unsigned long nr_segments
,
219 struct kexec_segment __user
*segments
)
222 struct kimage
*image
;
224 /* Allocate and initialize a controlling structure */
226 result
= do_kimage_alloc(&image
, entry
, nr_segments
, segments
);
233 * Find a location for the control code buffer, and add it
234 * the vector of segments so that it's pages will also be
235 * counted as destination pages.
238 image
->control_code_page
= kimage_alloc_control_pages(image
,
239 get_order(KEXEC_CONTROL_CODE_SIZE
));
240 if (!image
->control_code_page
) {
241 printk(KERN_ERR
"Could not allocate control_code_buffer\n");
255 static int kimage_crash_alloc(struct kimage
**rimage
, unsigned long entry
,
256 unsigned long nr_segments
,
257 struct kexec_segment __user
*segments
)
260 struct kimage
*image
;
264 /* Verify we have a valid entry point */
265 if ((entry
< crashk_res
.start
) || (entry
> crashk_res
.end
)) {
266 result
= -EADDRNOTAVAIL
;
270 /* Allocate and initialize a controlling structure */
271 result
= do_kimage_alloc(&image
, entry
, nr_segments
, segments
);
275 /* Enable the special crash kernel control page
278 image
->control_page
= crashk_res
.start
;
279 image
->type
= KEXEC_TYPE_CRASH
;
282 * Verify we have good destination addresses. Normally
283 * the caller is responsible for making certain we don't
284 * attempt to load the new image into invalid or reserved
285 * areas of RAM. But crash kernels are preloaded into a
286 * reserved area of ram. We must ensure the addresses
287 * are in the reserved area otherwise preloading the
288 * kernel could corrupt things.
290 result
= -EADDRNOTAVAIL
;
291 for (i
= 0; i
< nr_segments
; i
++) {
292 unsigned long mstart
, mend
;
294 mstart
= image
->segment
[i
].mem
;
295 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
296 /* Ensure we are within the crash kernel limits */
297 if ((mstart
< crashk_res
.start
) || (mend
> crashk_res
.end
))
302 * Find a location for the control code buffer, and add
303 * the vector of segments so that it's pages will also be
304 * counted as destination pages.
307 image
->control_code_page
= kimage_alloc_control_pages(image
,
308 get_order(KEXEC_CONTROL_CODE_SIZE
));
309 if (!image
->control_code_page
) {
310 printk(KERN_ERR
"Could not allocate control_code_buffer\n");
324 static int kimage_is_destination_range(struct kimage
*image
,
330 for (i
= 0; i
< image
->nr_segments
; i
++) {
331 unsigned long mstart
, mend
;
333 mstart
= image
->segment
[i
].mem
;
334 mend
= mstart
+ image
->segment
[i
].memsz
;
335 if ((end
> mstart
) && (start
< mend
))
342 static struct page
*kimage_alloc_pages(gfp_t gfp_mask
, unsigned int order
)
346 pages
= alloc_pages(gfp_mask
, order
);
348 unsigned int count
, i
;
349 pages
->mapping
= NULL
;
350 set_page_private(pages
, order
);
352 for (i
= 0; i
< count
; i
++)
353 SetPageReserved(pages
+ i
);
359 static void kimage_free_pages(struct page
*page
)
361 unsigned int order
, count
, i
;
363 order
= page_private(page
);
365 for (i
= 0; i
< count
; i
++)
366 ClearPageReserved(page
+ i
);
367 __free_pages(page
, order
);
370 static void kimage_free_page_list(struct list_head
*list
)
372 struct list_head
*pos
, *next
;
374 list_for_each_safe(pos
, next
, list
) {
377 page
= list_entry(pos
, struct page
, lru
);
378 list_del(&page
->lru
);
379 kimage_free_pages(page
);
383 static struct page
*kimage_alloc_normal_control_pages(struct kimage
*image
,
386 /* Control pages are special, they are the intermediaries
387 * that are needed while we copy the rest of the pages
388 * to their final resting place. As such they must
389 * not conflict with either the destination addresses
390 * or memory the kernel is already using.
392 * The only case where we really need more than one of
393 * these are for architectures where we cannot disable
394 * the MMU and must instead generate an identity mapped
395 * page table for all of the memory.
397 * At worst this runs in O(N) of the image size.
399 struct list_head extra_pages
;
404 INIT_LIST_HEAD(&extra_pages
);
406 /* Loop while I can allocate a page and the page allocated
407 * is a destination page.
410 unsigned long pfn
, epfn
, addr
, eaddr
;
412 pages
= kimage_alloc_pages(GFP_KERNEL
, order
);
415 pfn
= page_to_pfn(pages
);
417 addr
= pfn
<< PAGE_SHIFT
;
418 eaddr
= epfn
<< PAGE_SHIFT
;
419 if ((epfn
>= (KEXEC_CONTROL_MEMORY_LIMIT
>> PAGE_SHIFT
)) ||
420 kimage_is_destination_range(image
, addr
, eaddr
)) {
421 list_add(&pages
->lru
, &extra_pages
);
427 /* Remember the allocated page... */
428 list_add(&pages
->lru
, &image
->control_pages
);
430 /* Because the page is already in it's destination
431 * location we will never allocate another page at
432 * that address. Therefore kimage_alloc_pages
433 * will not return it (again) and we don't need
434 * to give it an entry in image->segment[].
437 /* Deal with the destination pages I have inadvertently allocated.
439 * Ideally I would convert multi-page allocations into single
440 * page allocations, and add everyting to image->dest_pages.
442 * For now it is simpler to just free the pages.
444 kimage_free_page_list(&extra_pages
);
449 static struct page
*kimage_alloc_crash_control_pages(struct kimage
*image
,
452 /* Control pages are special, they are the intermediaries
453 * that are needed while we copy the rest of the pages
454 * to their final resting place. As such they must
455 * not conflict with either the destination addresses
456 * or memory the kernel is already using.
458 * Control pages are also the only pags we must allocate
459 * when loading a crash kernel. All of the other pages
460 * are specified by the segments and we just memcpy
461 * into them directly.
463 * The only case where we really need more than one of
464 * these are for architectures where we cannot disable
465 * the MMU and must instead generate an identity mapped
466 * page table for all of the memory.
468 * Given the low demand this implements a very simple
469 * allocator that finds the first hole of the appropriate
470 * size in the reserved memory region, and allocates all
471 * of the memory up to and including the hole.
473 unsigned long hole_start
, hole_end
, size
;
477 size
= (1 << order
) << PAGE_SHIFT
;
478 hole_start
= (image
->control_page
+ (size
- 1)) & ~(size
- 1);
479 hole_end
= hole_start
+ size
- 1;
480 while (hole_end
<= crashk_res
.end
) {
483 if (hole_end
> KEXEC_CONTROL_MEMORY_LIMIT
)
485 if (hole_end
> crashk_res
.end
)
487 /* See if I overlap any of the segments */
488 for (i
= 0; i
< image
->nr_segments
; i
++) {
489 unsigned long mstart
, mend
;
491 mstart
= image
->segment
[i
].mem
;
492 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
493 if ((hole_end
>= mstart
) && (hole_start
<= mend
)) {
494 /* Advance the hole to the end of the segment */
495 hole_start
= (mend
+ (size
- 1)) & ~(size
- 1);
496 hole_end
= hole_start
+ size
- 1;
500 /* If I don't overlap any segments I have found my hole! */
501 if (i
== image
->nr_segments
) {
502 pages
= pfn_to_page(hole_start
>> PAGE_SHIFT
);
507 image
->control_page
= hole_end
;
513 struct page
*kimage_alloc_control_pages(struct kimage
*image
,
516 struct page
*pages
= NULL
;
518 switch (image
->type
) {
519 case KEXEC_TYPE_DEFAULT
:
520 pages
= kimage_alloc_normal_control_pages(image
, order
);
522 case KEXEC_TYPE_CRASH
:
523 pages
= kimage_alloc_crash_control_pages(image
, order
);
530 static int kimage_add_entry(struct kimage
*image
, kimage_entry_t entry
)
532 if (*image
->entry
!= 0)
535 if (image
->entry
== image
->last_entry
) {
536 kimage_entry_t
*ind_page
;
539 page
= kimage_alloc_page(image
, GFP_KERNEL
, KIMAGE_NO_DEST
);
543 ind_page
= page_address(page
);
544 *image
->entry
= virt_to_phys(ind_page
) | IND_INDIRECTION
;
545 image
->entry
= ind_page
;
546 image
->last_entry
= ind_page
+
547 ((PAGE_SIZE
/sizeof(kimage_entry_t
)) - 1);
549 *image
->entry
= entry
;
556 static int kimage_set_destination(struct kimage
*image
,
557 unsigned long destination
)
561 destination
&= PAGE_MASK
;
562 result
= kimage_add_entry(image
, destination
| IND_DESTINATION
);
564 image
->destination
= destination
;
570 static int kimage_add_page(struct kimage
*image
, unsigned long page
)
575 result
= kimage_add_entry(image
, page
| IND_SOURCE
);
577 image
->destination
+= PAGE_SIZE
;
583 static void kimage_free_extra_pages(struct kimage
*image
)
585 /* Walk through and free any extra destination pages I may have */
586 kimage_free_page_list(&image
->dest_pages
);
588 /* Walk through and free any unuseable pages I have cached */
589 kimage_free_page_list(&image
->unuseable_pages
);
592 static int kimage_terminate(struct kimage
*image
)
594 if (*image
->entry
!= 0)
597 *image
->entry
= IND_DONE
;
602 #define for_each_kimage_entry(image, ptr, entry) \
603 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
604 ptr = (entry & IND_INDIRECTION)? \
605 phys_to_virt((entry & PAGE_MASK)): ptr +1)
607 static void kimage_free_entry(kimage_entry_t entry
)
611 page
= pfn_to_page(entry
>> PAGE_SHIFT
);
612 kimage_free_pages(page
);
615 static void kimage_free(struct kimage
*image
)
617 kimage_entry_t
*ptr
, entry
;
618 kimage_entry_t ind
= 0;
623 kimage_free_extra_pages(image
);
624 for_each_kimage_entry(image
, ptr
, entry
) {
625 if (entry
& IND_INDIRECTION
) {
626 /* Free the previous indirection page */
627 if (ind
& IND_INDIRECTION
)
628 kimage_free_entry(ind
);
629 /* Save this indirection page until we are
634 else if (entry
& IND_SOURCE
)
635 kimage_free_entry(entry
);
637 /* Free the final indirection page */
638 if (ind
& IND_INDIRECTION
)
639 kimage_free_entry(ind
);
641 /* Handle any machine specific cleanup */
642 machine_kexec_cleanup(image
);
644 /* Free the kexec control pages... */
645 kimage_free_page_list(&image
->control_pages
);
649 static kimage_entry_t
*kimage_dst_used(struct kimage
*image
,
652 kimage_entry_t
*ptr
, entry
;
653 unsigned long destination
= 0;
655 for_each_kimage_entry(image
, ptr
, entry
) {
656 if (entry
& IND_DESTINATION
)
657 destination
= entry
& PAGE_MASK
;
658 else if (entry
& IND_SOURCE
) {
659 if (page
== destination
)
661 destination
+= PAGE_SIZE
;
668 static struct page
*kimage_alloc_page(struct kimage
*image
,
670 unsigned long destination
)
673 * Here we implement safeguards to ensure that a source page
674 * is not copied to its destination page before the data on
675 * the destination page is no longer useful.
677 * To do this we maintain the invariant that a source page is
678 * either its own destination page, or it is not a
679 * destination page at all.
681 * That is slightly stronger than required, but the proof
682 * that no problems will not occur is trivial, and the
683 * implementation is simply to verify.
685 * When allocating all pages normally this algorithm will run
686 * in O(N) time, but in the worst case it will run in O(N^2)
687 * time. If the runtime is a problem the data structures can
694 * Walk through the list of destination pages, and see if I
697 list_for_each_entry(page
, &image
->dest_pages
, lru
) {
698 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
699 if (addr
== destination
) {
700 list_del(&page
->lru
);
708 /* Allocate a page, if we run out of memory give up */
709 page
= kimage_alloc_pages(gfp_mask
, 0);
712 /* If the page cannot be used file it away */
713 if (page_to_pfn(page
) >
714 (KEXEC_SOURCE_MEMORY_LIMIT
>> PAGE_SHIFT
)) {
715 list_add(&page
->lru
, &image
->unuseable_pages
);
718 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
720 /* If it is the destination page we want use it */
721 if (addr
== destination
)
724 /* If the page is not a destination page use it */
725 if (!kimage_is_destination_range(image
, addr
,
730 * I know that the page is someones destination page.
731 * See if there is already a source page for this
732 * destination page. And if so swap the source pages.
734 old
= kimage_dst_used(image
, addr
);
737 unsigned long old_addr
;
738 struct page
*old_page
;
740 old_addr
= *old
& PAGE_MASK
;
741 old_page
= pfn_to_page(old_addr
>> PAGE_SHIFT
);
742 copy_highpage(page
, old_page
);
743 *old
= addr
| (*old
& ~PAGE_MASK
);
745 /* The old page I have found cannot be a
746 * destination page, so return it.
753 /* Place the page on the destination list I
756 list_add(&page
->lru
, &image
->dest_pages
);
763 static int kimage_load_normal_segment(struct kimage
*image
,
764 struct kexec_segment
*segment
)
767 unsigned long ubytes
, mbytes
;
769 unsigned char __user
*buf
;
773 ubytes
= segment
->bufsz
;
774 mbytes
= segment
->memsz
;
775 maddr
= segment
->mem
;
777 result
= kimage_set_destination(image
, maddr
);
784 size_t uchunk
, mchunk
;
786 page
= kimage_alloc_page(image
, GFP_HIGHUSER
, maddr
);
791 result
= kimage_add_page(image
, page_to_pfn(page
)
797 /* Start with a clear page */
798 memset(ptr
, 0, PAGE_SIZE
);
799 ptr
+= maddr
& ~PAGE_MASK
;
800 mchunk
= PAGE_SIZE
- (maddr
& ~PAGE_MASK
);
808 result
= copy_from_user(ptr
, buf
, uchunk
);
811 result
= (result
< 0) ? result
: -EIO
;
823 static int kimage_load_crash_segment(struct kimage
*image
,
824 struct kexec_segment
*segment
)
826 /* For crash dumps kernels we simply copy the data from
827 * user space to it's destination.
828 * We do things a page at a time for the sake of kmap.
831 unsigned long ubytes
, mbytes
;
833 unsigned char __user
*buf
;
837 ubytes
= segment
->bufsz
;
838 mbytes
= segment
->memsz
;
839 maddr
= segment
->mem
;
843 size_t uchunk
, mchunk
;
845 page
= pfn_to_page(maddr
>> PAGE_SHIFT
);
851 ptr
+= maddr
& ~PAGE_MASK
;
852 mchunk
= PAGE_SIZE
- (maddr
& ~PAGE_MASK
);
857 if (uchunk
> ubytes
) {
859 /* Zero the trailing part of the page */
860 memset(ptr
+ uchunk
, 0, mchunk
- uchunk
);
862 result
= copy_from_user(ptr
, buf
, uchunk
);
863 kexec_flush_icache_page(page
);
866 result
= (result
< 0) ? result
: -EIO
;
878 static int kimage_load_segment(struct kimage
*image
,
879 struct kexec_segment
*segment
)
881 int result
= -ENOMEM
;
883 switch (image
->type
) {
884 case KEXEC_TYPE_DEFAULT
:
885 result
= kimage_load_normal_segment(image
, segment
);
887 case KEXEC_TYPE_CRASH
:
888 result
= kimage_load_crash_segment(image
, segment
);
896 * Exec Kernel system call: for obvious reasons only root may call it.
898 * This call breaks up into three pieces.
899 * - A generic part which loads the new kernel from the current
900 * address space, and very carefully places the data in the
903 * - A generic part that interacts with the kernel and tells all of
904 * the devices to shut down. Preventing on-going dmas, and placing
905 * the devices in a consistent state so a later kernel can
908 * - A machine specific part that includes the syscall number
909 * and the copies the image to it's final destination. And
910 * jumps into the image at entry.
912 * kexec does not sync, or unmount filesystems so if you need
913 * that to happen you need to do that yourself.
915 struct kimage
*kexec_image
;
916 struct kimage
*kexec_crash_image
;
918 * A home grown binary mutex.
919 * Nothing can wait so this mutex is safe to use
920 * in interrupt context :)
922 static int kexec_lock
;
924 asmlinkage
long sys_kexec_load(unsigned long entry
, unsigned long nr_segments
,
925 struct kexec_segment __user
*segments
,
928 struct kimage
**dest_image
, *image
;
932 /* We only trust the superuser with rebooting the system. */
933 if (!capable(CAP_SYS_BOOT
))
937 * Verify we have a legal set of flags
938 * This leaves us room for future extensions.
940 if ((flags
& KEXEC_FLAGS
) != (flags
& ~KEXEC_ARCH_MASK
))
943 /* Verify we are on the appropriate architecture */
944 if (((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH
) &&
945 ((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH_DEFAULT
))
948 /* Put an artificial cap on the number
949 * of segments passed to kexec_load.
951 if (nr_segments
> KEXEC_SEGMENT_MAX
)
957 /* Because we write directly to the reserved memory
958 * region when loading crash kernels we need a mutex here to
959 * prevent multiple crash kernels from attempting to load
960 * simultaneously, and to prevent a crash kernel from loading
961 * over the top of a in use crash kernel.
963 * KISS: always take the mutex.
965 locked
= xchg(&kexec_lock
, 1);
969 dest_image
= &kexec_image
;
970 if (flags
& KEXEC_ON_CRASH
)
971 dest_image
= &kexec_crash_image
;
972 if (nr_segments
> 0) {
975 /* Loading another kernel to reboot into */
976 if ((flags
& KEXEC_ON_CRASH
) == 0)
977 result
= kimage_normal_alloc(&image
, entry
,
978 nr_segments
, segments
);
979 /* Loading another kernel to switch to if this one crashes */
980 else if (flags
& KEXEC_ON_CRASH
) {
981 /* Free any current crash dump kernel before
984 kimage_free(xchg(&kexec_crash_image
, NULL
));
985 result
= kimage_crash_alloc(&image
, entry
,
986 nr_segments
, segments
);
991 result
= machine_kexec_prepare(image
);
995 for (i
= 0; i
< nr_segments
; i
++) {
996 result
= kimage_load_segment(image
, &image
->segment
[i
]);
1000 result
= kimage_terminate(image
);
1004 /* Install the new kernel, and Uninstall the old */
1005 image
= xchg(dest_image
, image
);
1008 locked
= xchg(&kexec_lock
, 0); /* Release the mutex */
1015 #ifdef CONFIG_COMPAT
1016 asmlinkage
long compat_sys_kexec_load(unsigned long entry
,
1017 unsigned long nr_segments
,
1018 struct compat_kexec_segment __user
*segments
,
1019 unsigned long flags
)
1021 struct compat_kexec_segment in
;
1022 struct kexec_segment out
, __user
*ksegments
;
1023 unsigned long i
, result
;
1025 /* Don't allow clients that don't understand the native
1026 * architecture to do anything.
1028 if ((flags
& KEXEC_ARCH_MASK
) == KEXEC_ARCH_DEFAULT
)
1031 if (nr_segments
> KEXEC_SEGMENT_MAX
)
1034 ksegments
= compat_alloc_user_space(nr_segments
* sizeof(out
));
1035 for (i
=0; i
< nr_segments
; i
++) {
1036 result
= copy_from_user(&in
, &segments
[i
], sizeof(in
));
1040 out
.buf
= compat_ptr(in
.buf
);
1041 out
.bufsz
= in
.bufsz
;
1043 out
.memsz
= in
.memsz
;
1045 result
= copy_to_user(&ksegments
[i
], &out
, sizeof(out
));
1050 return sys_kexec_load(entry
, nr_segments
, ksegments
, flags
);
1054 void crash_kexec(struct pt_regs
*regs
)
1059 /* Take the kexec_lock here to prevent sys_kexec_load
1060 * running on one cpu from replacing the crash kernel
1061 * we are using after a panic on a different cpu.
1063 * If the crash kernel was not located in a fixed area
1064 * of memory the xchg(&kexec_crash_image) would be
1065 * sufficient. But since I reuse the memory...
1067 locked
= xchg(&kexec_lock
, 1);
1069 if (kexec_crash_image
) {
1070 struct pt_regs fixed_regs
;
1071 crash_setup_regs(&fixed_regs
, regs
);
1072 crash_save_vmcoreinfo();
1073 machine_crash_shutdown(&fixed_regs
);
1074 machine_kexec(kexec_crash_image
);
1076 locked
= xchg(&kexec_lock
, 0);
1081 static u32
*append_elf_note(u32
*buf
, char *name
, unsigned type
, void *data
,
1084 struct elf_note note
;
1086 note
.n_namesz
= strlen(name
) + 1;
1087 note
.n_descsz
= data_len
;
1089 memcpy(buf
, ¬e
, sizeof(note
));
1090 buf
+= (sizeof(note
) + 3)/4;
1091 memcpy(buf
, name
, note
.n_namesz
);
1092 buf
+= (note
.n_namesz
+ 3)/4;
1093 memcpy(buf
, data
, note
.n_descsz
);
1094 buf
+= (note
.n_descsz
+ 3)/4;
1099 static void final_note(u32
*buf
)
1101 struct elf_note note
;
1106 memcpy(buf
, ¬e
, sizeof(note
));
1109 void crash_save_cpu(struct pt_regs
*regs
, int cpu
)
1111 struct elf_prstatus prstatus
;
1114 if ((cpu
< 0) || (cpu
>= NR_CPUS
))
1117 /* Using ELF notes here is opportunistic.
1118 * I need a well defined structure format
1119 * for the data I pass, and I need tags
1120 * on the data to indicate what information I have
1121 * squirrelled away. ELF notes happen to provide
1122 * all of that, so there is no need to invent something new.
1124 buf
= (u32
*)per_cpu_ptr(crash_notes
, cpu
);
1127 memset(&prstatus
, 0, sizeof(prstatus
));
1128 prstatus
.pr_pid
= current
->pid
;
1129 elf_core_copy_regs(&prstatus
.pr_reg
, regs
);
1130 buf
= append_elf_note(buf
, KEXEC_CORE_NOTE_NAME
, NT_PRSTATUS
,
1131 &prstatus
, sizeof(prstatus
));
1135 static int __init
crash_notes_memory_init(void)
1137 /* Allocate memory for saving cpu registers. */
1138 crash_notes
= alloc_percpu(note_buf_t
);
1140 printk("Kexec: Memory allocation for saving cpu register"
1141 " states failed\n");
1146 module_init(crash_notes_memory_init
)
1150 * parsing the "crashkernel" commandline
1152 * this code is intended to be called from architecture specific code
1157 * This function parses command lines in the format
1159 * crashkernel=ramsize-range:size[,...][@offset]
1161 * The function returns 0 on success and -EINVAL on failure.
1163 static int __init
parse_crashkernel_mem(char *cmdline
,
1164 unsigned long long system_ram
,
1165 unsigned long long *crash_size
,
1166 unsigned long long *crash_base
)
1168 char *cur
= cmdline
, *tmp
;
1170 /* for each entry of the comma-separated list */
1172 unsigned long long start
, end
= ULLONG_MAX
, size
;
1174 /* get the start of the range */
1175 start
= memparse(cur
, &tmp
);
1177 pr_warning("crashkernel: Memory value expected\n");
1182 pr_warning("crashkernel: '-' expected\n");
1187 /* if no ':' is here, than we read the end */
1189 end
= memparse(cur
, &tmp
);
1191 pr_warning("crashkernel: Memory "
1192 "value expected\n");
1197 pr_warning("crashkernel: end <= start\n");
1203 pr_warning("crashkernel: ':' expected\n");
1208 size
= memparse(cur
, &tmp
);
1210 pr_warning("Memory value expected\n");
1214 if (size
>= system_ram
) {
1215 pr_warning("crashkernel: invalid size\n");
1220 if (system_ram
>= start
&& system_ram
< end
) {
1224 } while (*cur
++ == ',');
1226 if (*crash_size
> 0) {
1227 while (*cur
!= ' ' && *cur
!= '@')
1231 *crash_base
= memparse(cur
, &tmp
);
1233 pr_warning("Memory value expected "
1244 * That function parses "simple" (old) crashkernel command lines like
1246 * crashkernel=size[@offset]
1248 * It returns 0 on success and -EINVAL on failure.
1250 static int __init
parse_crashkernel_simple(char *cmdline
,
1251 unsigned long long *crash_size
,
1252 unsigned long long *crash_base
)
1254 char *cur
= cmdline
;
1256 *crash_size
= memparse(cmdline
, &cur
);
1257 if (cmdline
== cur
) {
1258 pr_warning("crashkernel: memory value expected\n");
1263 *crash_base
= memparse(cur
+1, &cur
);
1269 * That function is the entry point for command line parsing and should be
1270 * called from the arch-specific code.
1272 int __init
parse_crashkernel(char *cmdline
,
1273 unsigned long long system_ram
,
1274 unsigned long long *crash_size
,
1275 unsigned long long *crash_base
)
1277 char *p
= cmdline
, *ck_cmdline
= NULL
;
1278 char *first_colon
, *first_space
;
1280 BUG_ON(!crash_size
|| !crash_base
);
1284 /* find crashkernel and use the last one if there are more */
1285 p
= strstr(p
, "crashkernel=");
1288 p
= strstr(p
+1, "crashkernel=");
1294 ck_cmdline
+= 12; /* strlen("crashkernel=") */
1297 * if the commandline contains a ':', then that's the extended
1298 * syntax -- if not, it must be the classic syntax
1300 first_colon
= strchr(ck_cmdline
, ':');
1301 first_space
= strchr(ck_cmdline
, ' ');
1302 if (first_colon
&& (!first_space
|| first_colon
< first_space
))
1303 return parse_crashkernel_mem(ck_cmdline
, system_ram
,
1304 crash_size
, crash_base
);
1306 return parse_crashkernel_simple(ck_cmdline
, crash_size
,
1314 void crash_save_vmcoreinfo(void)
1318 if (!vmcoreinfo_size
)
1321 vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
1323 buf
= (u32
*)vmcoreinfo_note
;
1325 buf
= append_elf_note(buf
, VMCOREINFO_NOTE_NAME
, 0, vmcoreinfo_data
,
1331 void vmcoreinfo_append_str(const char *fmt
, ...)
1337 va_start(args
, fmt
);
1338 r
= vsnprintf(buf
, sizeof(buf
), fmt
, args
);
1341 if (r
+ vmcoreinfo_size
> vmcoreinfo_max_size
)
1342 r
= vmcoreinfo_max_size
- vmcoreinfo_size
;
1344 memcpy(&vmcoreinfo_data
[vmcoreinfo_size
], buf
, r
);
1346 vmcoreinfo_size
+= r
;
1350 * provide an empty default implementation here -- architecture
1351 * code may override this
1353 void __attribute__ ((weak
)) arch_crash_save_vmcoreinfo(void)
1356 unsigned long __attribute__ ((weak
)) paddr_vmcoreinfo_note(void)
1358 return __pa((unsigned long)(char *)&vmcoreinfo_note
);
1361 static int __init
crash_save_vmcoreinfo_init(void)
1363 VMCOREINFO_OSRELEASE(init_uts_ns
.name
.release
);
1364 VMCOREINFO_PAGESIZE(PAGE_SIZE
);
1366 VMCOREINFO_SYMBOL(init_uts_ns
);
1367 VMCOREINFO_SYMBOL(node_online_map
);
1368 VMCOREINFO_SYMBOL(swapper_pg_dir
);
1369 VMCOREINFO_SYMBOL(_stext
);
1371 #ifndef CONFIG_NEED_MULTIPLE_NODES
1372 VMCOREINFO_SYMBOL(mem_map
);
1373 VMCOREINFO_SYMBOL(contig_page_data
);
1375 #ifdef CONFIG_SPARSEMEM
1376 VMCOREINFO_SYMBOL(mem_section
);
1377 VMCOREINFO_LENGTH(mem_section
, NR_SECTION_ROOTS
);
1378 VMCOREINFO_STRUCT_SIZE(mem_section
);
1379 VMCOREINFO_OFFSET(mem_section
, section_mem_map
);
1381 VMCOREINFO_STRUCT_SIZE(page
);
1382 VMCOREINFO_STRUCT_SIZE(pglist_data
);
1383 VMCOREINFO_STRUCT_SIZE(zone
);
1384 VMCOREINFO_STRUCT_SIZE(free_area
);
1385 VMCOREINFO_STRUCT_SIZE(list_head
);
1386 VMCOREINFO_SIZE(nodemask_t
);
1387 VMCOREINFO_OFFSET(page
, flags
);
1388 VMCOREINFO_OFFSET(page
, _count
);
1389 VMCOREINFO_OFFSET(page
, mapping
);
1390 VMCOREINFO_OFFSET(page
, lru
);
1391 VMCOREINFO_OFFSET(pglist_data
, node_zones
);
1392 VMCOREINFO_OFFSET(pglist_data
, nr_zones
);
1393 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1394 VMCOREINFO_OFFSET(pglist_data
, node_mem_map
);
1396 VMCOREINFO_OFFSET(pglist_data
, node_start_pfn
);
1397 VMCOREINFO_OFFSET(pglist_data
, node_spanned_pages
);
1398 VMCOREINFO_OFFSET(pglist_data
, node_id
);
1399 VMCOREINFO_OFFSET(zone
, free_area
);
1400 VMCOREINFO_OFFSET(zone
, vm_stat
);
1401 VMCOREINFO_OFFSET(zone
, spanned_pages
);
1402 VMCOREINFO_OFFSET(free_area
, free_list
);
1403 VMCOREINFO_OFFSET(list_head
, next
);
1404 VMCOREINFO_OFFSET(list_head
, prev
);
1405 VMCOREINFO_LENGTH(zone
.free_area
, MAX_ORDER
);
1406 VMCOREINFO_LENGTH(free_area
.free_list
, MIGRATE_TYPES
);
1407 VMCOREINFO_NUMBER(NR_FREE_PAGES
);
1408 VMCOREINFO_NUMBER(PG_lru
);
1409 VMCOREINFO_NUMBER(PG_private
);
1410 VMCOREINFO_NUMBER(PG_swapcache
);
1412 arch_crash_save_vmcoreinfo();
1417 module_init(crash_save_vmcoreinfo_init
)