2 * kexec: kexec_file_load system call
4 * Copyright (C) 2014 Red Hat Inc.
6 * Vivek Goyal <vgoyal@redhat.com>
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/capability.h>
16 #include <linux/file.h>
17 #include <linux/slab.h>
18 #include <linux/kexec.h>
19 #include <linux/mutex.h>
20 #include <linux/list.h>
22 #include <linux/ima.h>
23 #include <crypto/hash.h>
24 #include <crypto/sha.h>
25 #include <linux/syscalls.h>
26 #include <linux/vmalloc.h>
27 #include "kexec_internal.h"
30 * Declare these symbols weak so that if architecture provides a purgatory,
31 * these will be overridden.
33 char __weak kexec_purgatory
[0];
34 size_t __weak kexec_purgatory_size
= 0;
36 static int kexec_calculate_store_digests(struct kimage
*image
);
38 /* Architectures can provide this probe function */
39 int __weak
arch_kexec_kernel_image_probe(struct kimage
*image
, void *buf
,
40 unsigned long buf_len
)
45 void * __weak
arch_kexec_kernel_image_load(struct kimage
*image
)
47 return ERR_PTR(-ENOEXEC
);
50 int __weak
arch_kimage_file_post_load_cleanup(struct kimage
*image
)
55 #ifdef CONFIG_KEXEC_VERIFY_SIG
56 int __weak
arch_kexec_kernel_verify_sig(struct kimage
*image
, void *buf
,
57 unsigned long buf_len
)
63 /* Apply relocations of type RELA */
65 arch_kexec_apply_relocations_add(const Elf_Ehdr
*ehdr
, Elf_Shdr
*sechdrs
,
68 pr_err("RELA relocation unsupported.\n");
72 /* Apply relocations of type REL */
74 arch_kexec_apply_relocations(const Elf_Ehdr
*ehdr
, Elf_Shdr
*sechdrs
,
77 pr_err("REL relocation unsupported.\n");
82 * Free up memory used by kernel, initrd, and command line. This is temporary
83 * memory allocation which is not needed any more after these buffers have
84 * been loaded into separate segments and have been copied elsewhere.
86 void kimage_file_post_load_cleanup(struct kimage
*image
)
88 struct purgatory_info
*pi
= &image
->purgatory_info
;
90 vfree(image
->kernel_buf
);
91 image
->kernel_buf
= NULL
;
93 vfree(image
->initrd_buf
);
94 image
->initrd_buf
= NULL
;
96 kfree(image
->cmdline_buf
);
97 image
->cmdline_buf
= NULL
;
99 vfree(pi
->purgatory_buf
);
100 pi
->purgatory_buf
= NULL
;
105 /* See if architecture has anything to cleanup post load */
106 arch_kimage_file_post_load_cleanup(image
);
109 * Above call should have called into bootloader to free up
110 * any data stored in kimage->image_loader_data. It should
111 * be ok now to free it up.
113 kfree(image
->image_loader_data
);
114 image
->image_loader_data
= NULL
;
118 * In file mode list of segments is prepared by kernel. Copy relevant
119 * data from user space, do error checking, prepare segment list
122 kimage_file_prepare_segments(struct kimage
*image
, int kernel_fd
, int initrd_fd
,
123 const char __user
*cmdline_ptr
,
124 unsigned long cmdline_len
, unsigned flags
)
130 ret
= kernel_read_file_from_fd(kernel_fd
, &image
->kernel_buf
,
131 &size
, INT_MAX
, READING_KEXEC_IMAGE
);
134 image
->kernel_buf_len
= size
;
136 /* IMA needs to pass the measurement list to the next kernel. */
137 ima_add_kexec_buffer(image
);
139 /* Call arch image probe handlers */
140 ret
= arch_kexec_kernel_image_probe(image
, image
->kernel_buf
,
141 image
->kernel_buf_len
);
145 #ifdef CONFIG_KEXEC_VERIFY_SIG
146 ret
= arch_kexec_kernel_verify_sig(image
, image
->kernel_buf
,
147 image
->kernel_buf_len
);
149 pr_debug("kernel signature verification failed.\n");
152 pr_debug("kernel signature verification successful.\n");
154 /* It is possible that there no initramfs is being loaded */
155 if (!(flags
& KEXEC_FILE_NO_INITRAMFS
)) {
156 ret
= kernel_read_file_from_fd(initrd_fd
, &image
->initrd_buf
,
158 READING_KEXEC_INITRAMFS
);
161 image
->initrd_buf_len
= size
;
165 image
->cmdline_buf
= memdup_user(cmdline_ptr
, cmdline_len
);
166 if (IS_ERR(image
->cmdline_buf
)) {
167 ret
= PTR_ERR(image
->cmdline_buf
);
168 image
->cmdline_buf
= NULL
;
172 image
->cmdline_buf_len
= cmdline_len
;
174 /* command line should be a string with last byte null */
175 if (image
->cmdline_buf
[cmdline_len
- 1] != '\0') {
181 /* Call arch image load handlers */
182 ldata
= arch_kexec_kernel_image_load(image
);
185 ret
= PTR_ERR(ldata
);
189 image
->image_loader_data
= ldata
;
191 /* In case of error, free up all allocated memory in this function */
193 kimage_file_post_load_cleanup(image
);
198 kimage_file_alloc_init(struct kimage
**rimage
, int kernel_fd
,
199 int initrd_fd
, const char __user
*cmdline_ptr
,
200 unsigned long cmdline_len
, unsigned long flags
)
203 struct kimage
*image
;
204 bool kexec_on_panic
= flags
& KEXEC_FILE_ON_CRASH
;
206 image
= do_kimage_alloc_init();
210 image
->file_mode
= 1;
212 if (kexec_on_panic
) {
213 /* Enable special crash kernel control page alloc policy. */
214 image
->control_page
= crashk_res
.start
;
215 image
->type
= KEXEC_TYPE_CRASH
;
218 ret
= kimage_file_prepare_segments(image
, kernel_fd
, initrd_fd
,
219 cmdline_ptr
, cmdline_len
, flags
);
223 ret
= sanity_check_segment_list(image
);
225 goto out_free_post_load_bufs
;
228 image
->control_code_page
= kimage_alloc_control_pages(image
,
229 get_order(KEXEC_CONTROL_PAGE_SIZE
));
230 if (!image
->control_code_page
) {
231 pr_err("Could not allocate control_code_buffer\n");
232 goto out_free_post_load_bufs
;
235 if (!kexec_on_panic
) {
236 image
->swap_page
= kimage_alloc_control_pages(image
, 0);
237 if (!image
->swap_page
) {
238 pr_err("Could not allocate swap buffer\n");
239 goto out_free_control_pages
;
245 out_free_control_pages
:
246 kimage_free_page_list(&image
->control_pages
);
247 out_free_post_load_bufs
:
248 kimage_file_post_load_cleanup(image
);
254 SYSCALL_DEFINE5(kexec_file_load
, int, kernel_fd
, int, initrd_fd
,
255 unsigned long, cmdline_len
, const char __user
*, cmdline_ptr
,
256 unsigned long, flags
)
259 struct kimage
**dest_image
, *image
;
261 /* We only trust the superuser with rebooting the system. */
262 if (!capable(CAP_SYS_BOOT
) || kexec_load_disabled
)
265 /* Make sure we have a legal set of flags */
266 if (flags
!= (flags
& KEXEC_FILE_FLAGS
))
271 if (!mutex_trylock(&kexec_mutex
))
274 dest_image
= &kexec_image
;
275 if (flags
& KEXEC_FILE_ON_CRASH
) {
276 dest_image
= &kexec_crash_image
;
277 if (kexec_crash_image
)
278 arch_kexec_unprotect_crashkres();
281 if (flags
& KEXEC_FILE_UNLOAD
)
285 * In case of crash, new kernel gets loaded in reserved region. It is
286 * same memory where old crash kernel might be loaded. Free any
287 * current crash dump kernel before we corrupt it.
289 if (flags
& KEXEC_FILE_ON_CRASH
)
290 kimage_free(xchg(&kexec_crash_image
, NULL
));
292 ret
= kimage_file_alloc_init(&image
, kernel_fd
, initrd_fd
, cmdline_ptr
,
297 ret
= machine_kexec_prepare(image
);
301 ret
= kexec_calculate_store_digests(image
);
305 for (i
= 0; i
< image
->nr_segments
; i
++) {
306 struct kexec_segment
*ksegment
;
308 ksegment
= &image
->segment
[i
];
309 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
310 i
, ksegment
->buf
, ksegment
->bufsz
, ksegment
->mem
,
313 ret
= kimage_load_segment(image
, &image
->segment
[i
]);
318 kimage_terminate(image
);
321 * Free up any temporary buffers allocated which are not needed
322 * after image has been loaded
324 kimage_file_post_load_cleanup(image
);
326 image
= xchg(dest_image
, image
);
328 if ((flags
& KEXEC_FILE_ON_CRASH
) && kexec_crash_image
)
329 arch_kexec_protect_crashkres();
331 mutex_unlock(&kexec_mutex
);
336 static int locate_mem_hole_top_down(unsigned long start
, unsigned long end
,
337 struct kexec_buf
*kbuf
)
339 struct kimage
*image
= kbuf
->image
;
340 unsigned long temp_start
, temp_end
;
342 temp_end
= min(end
, kbuf
->buf_max
);
343 temp_start
= temp_end
- kbuf
->memsz
;
346 /* align down start */
347 temp_start
= temp_start
& (~(kbuf
->buf_align
- 1));
349 if (temp_start
< start
|| temp_start
< kbuf
->buf_min
)
352 temp_end
= temp_start
+ kbuf
->memsz
- 1;
355 * Make sure this does not conflict with any of existing
358 if (kimage_is_destination_range(image
, temp_start
, temp_end
)) {
359 temp_start
= temp_start
- PAGE_SIZE
;
363 /* We found a suitable memory range */
367 /* If we are here, we found a suitable memory range */
368 kbuf
->mem
= temp_start
;
370 /* Success, stop navigating through remaining System RAM ranges */
374 static int locate_mem_hole_bottom_up(unsigned long start
, unsigned long end
,
375 struct kexec_buf
*kbuf
)
377 struct kimage
*image
= kbuf
->image
;
378 unsigned long temp_start
, temp_end
;
380 temp_start
= max(start
, kbuf
->buf_min
);
383 temp_start
= ALIGN(temp_start
, kbuf
->buf_align
);
384 temp_end
= temp_start
+ kbuf
->memsz
- 1;
386 if (temp_end
> end
|| temp_end
> kbuf
->buf_max
)
389 * Make sure this does not conflict with any of existing
392 if (kimage_is_destination_range(image
, temp_start
, temp_end
)) {
393 temp_start
= temp_start
+ PAGE_SIZE
;
397 /* We found a suitable memory range */
401 /* If we are here, we found a suitable memory range */
402 kbuf
->mem
= temp_start
;
404 /* Success, stop navigating through remaining System RAM ranges */
408 static int locate_mem_hole_callback(u64 start
, u64 end
, void *arg
)
410 struct kexec_buf
*kbuf
= (struct kexec_buf
*)arg
;
411 unsigned long sz
= end
- start
+ 1;
413 /* Returning 0 will take to next memory range */
414 if (sz
< kbuf
->memsz
)
417 if (end
< kbuf
->buf_min
|| start
> kbuf
->buf_max
)
421 * Allocate memory top down with-in ram range. Otherwise bottom up
425 return locate_mem_hole_top_down(start
, end
, kbuf
);
426 return locate_mem_hole_bottom_up(start
, end
, kbuf
);
430 * arch_kexec_walk_mem - call func(data) on free memory regions
431 * @kbuf: Context info for the search. Also passed to @func.
432 * @func: Function to call for each memory region.
434 * Return: The memory walk will stop when func returns a non-zero value
435 * and that value will be returned. If all free regions are visited without
436 * func returning non-zero, then zero will be returned.
438 int __weak
arch_kexec_walk_mem(struct kexec_buf
*kbuf
,
439 int (*func
)(u64
, u64
, void *))
441 if (kbuf
->image
->type
== KEXEC_TYPE_CRASH
)
442 return walk_iomem_res_desc(crashk_res
.desc
,
443 IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
,
444 crashk_res
.start
, crashk_res
.end
,
447 return walk_system_ram_res(0, ULONG_MAX
, kbuf
, func
);
451 * kexec_locate_mem_hole - find free memory for the purgatory or the next kernel
452 * @kbuf: Parameters for the memory search.
454 * On success, kbuf->mem will have the start address of the memory region found.
456 * Return: 0 on success, negative errno on error.
458 int kexec_locate_mem_hole(struct kexec_buf
*kbuf
)
462 ret
= arch_kexec_walk_mem(kbuf
, locate_mem_hole_callback
);
464 return ret
== 1 ? 0 : -EADDRNOTAVAIL
;
468 * kexec_add_buffer - place a buffer in a kexec segment
469 * @kbuf: Buffer contents and memory parameters.
471 * This function assumes that kexec_mutex is held.
472 * On successful return, @kbuf->mem will have the physical address of
473 * the buffer in memory.
475 * Return: 0 on success, negative errno on error.
477 int kexec_add_buffer(struct kexec_buf
*kbuf
)
480 struct kexec_segment
*ksegment
;
483 /* Currently adding segment this way is allowed only in file mode */
484 if (!kbuf
->image
->file_mode
)
487 if (kbuf
->image
->nr_segments
>= KEXEC_SEGMENT_MAX
)
491 * Make sure we are not trying to add buffer after allocating
492 * control pages. All segments need to be placed first before
493 * any control pages are allocated. As control page allocation
494 * logic goes through list of segments to make sure there are
495 * no destination overlaps.
497 if (!list_empty(&kbuf
->image
->control_pages
)) {
502 /* Ensure minimum alignment needed for segments. */
503 kbuf
->memsz
= ALIGN(kbuf
->memsz
, PAGE_SIZE
);
504 kbuf
->buf_align
= max(kbuf
->buf_align
, PAGE_SIZE
);
506 /* Walk the RAM ranges and allocate a suitable range for the buffer */
507 ret
= kexec_locate_mem_hole(kbuf
);
511 /* Found a suitable memory range */
512 ksegment
= &kbuf
->image
->segment
[kbuf
->image
->nr_segments
];
513 ksegment
->kbuf
= kbuf
->buffer
;
514 ksegment
->bufsz
= kbuf
->bufsz
;
515 ksegment
->mem
= kbuf
->mem
;
516 ksegment
->memsz
= kbuf
->memsz
;
517 kbuf
->image
->nr_segments
++;
521 /* Calculate and store the digest of segments */
522 static int kexec_calculate_store_digests(struct kimage
*image
)
524 struct crypto_shash
*tfm
;
525 struct shash_desc
*desc
;
526 int ret
= 0, i
, j
, zero_buf_sz
, sha_region_sz
;
527 size_t desc_size
, nullsz
;
530 struct kexec_sha_region
*sha_regions
;
531 struct purgatory_info
*pi
= &image
->purgatory_info
;
533 zero_buf
= __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT
);
534 zero_buf_sz
= PAGE_SIZE
;
536 tfm
= crypto_alloc_shash("sha256", 0, 0);
542 desc_size
= crypto_shash_descsize(tfm
) + sizeof(*desc
);
543 desc
= kzalloc(desc_size
, GFP_KERNEL
);
549 sha_region_sz
= KEXEC_SEGMENT_MAX
* sizeof(struct kexec_sha_region
);
550 sha_regions
= vzalloc(sha_region_sz
);
557 ret
= crypto_shash_init(desc
);
559 goto out_free_sha_regions
;
561 digest
= kzalloc(SHA256_DIGEST_SIZE
, GFP_KERNEL
);
564 goto out_free_sha_regions
;
567 for (j
= i
= 0; i
< image
->nr_segments
; i
++) {
568 struct kexec_segment
*ksegment
;
570 ksegment
= &image
->segment
[i
];
572 * Skip purgatory as it will be modified once we put digest
575 if (ksegment
->kbuf
== pi
->purgatory_buf
)
578 ret
= crypto_shash_update(desc
, ksegment
->kbuf
,
584 * Assume rest of the buffer is filled with zero and
585 * update digest accordingly.
587 nullsz
= ksegment
->memsz
- ksegment
->bufsz
;
589 unsigned long bytes
= nullsz
;
591 if (bytes
> zero_buf_sz
)
593 ret
= crypto_shash_update(desc
, zero_buf
, bytes
);
602 sha_regions
[j
].start
= ksegment
->mem
;
603 sha_regions
[j
].len
= ksegment
->memsz
;
608 ret
= crypto_shash_final(desc
, digest
);
610 goto out_free_digest
;
611 ret
= kexec_purgatory_get_set_symbol(image
, "purgatory_sha_regions",
612 sha_regions
, sha_region_sz
, 0);
614 goto out_free_digest
;
616 ret
= kexec_purgatory_get_set_symbol(image
, "purgatory_sha256_digest",
617 digest
, SHA256_DIGEST_SIZE
, 0);
619 goto out_free_digest
;
624 out_free_sha_regions
:
634 /* Actually load purgatory. Lot of code taken from kexec-tools */
635 static int __kexec_load_purgatory(struct kimage
*image
, unsigned long min
,
636 unsigned long max
, int top_down
)
638 struct purgatory_info
*pi
= &image
->purgatory_info
;
639 unsigned long align
, bss_align
, bss_sz
, bss_pad
;
640 unsigned long entry
, load_addr
, curr_load_addr
, bss_addr
, offset
;
641 unsigned char *buf_addr
, *src
;
642 int i
, ret
= 0, entry_sidx
= -1;
643 const Elf_Shdr
*sechdrs_c
;
644 Elf_Shdr
*sechdrs
= NULL
;
645 struct kexec_buf kbuf
= { .image
= image
, .bufsz
= 0, .buf_align
= 1,
646 .buf_min
= min
, .buf_max
= max
,
647 .top_down
= top_down
};
650 * sechdrs_c points to section headers in purgatory and are read
651 * only. No modifications allowed.
653 sechdrs_c
= (void *)pi
->ehdr
+ pi
->ehdr
->e_shoff
;
656 * We can not modify sechdrs_c[] and its fields. It is read only.
657 * Copy it over to a local copy where one can store some temporary
658 * data and free it at the end. We need to modify ->sh_addr and
659 * ->sh_offset fields to keep track of permanent and temporary
660 * locations of sections.
662 sechdrs
= vzalloc(pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
));
666 memcpy(sechdrs
, sechdrs_c
, pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
));
669 * We seem to have multiple copies of sections. First copy is which
670 * is embedded in kernel in read only section. Some of these sections
671 * will be copied to a temporary buffer and relocated. And these
672 * sections will finally be copied to their final destination at
675 * Use ->sh_offset to reflect section address in memory. It will
676 * point to original read only copy if section is not allocatable.
677 * Otherwise it will point to temporary copy which will be relocated.
679 * Use ->sh_addr to contain final address of the section where it
680 * will go during execution time.
682 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
683 if (sechdrs
[i
].sh_type
== SHT_NOBITS
)
686 sechdrs
[i
].sh_offset
= (unsigned long)pi
->ehdr
+
687 sechdrs
[i
].sh_offset
;
691 * Identify entry point section and make entry relative to section
694 entry
= pi
->ehdr
->e_entry
;
695 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
696 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
699 if (!(sechdrs
[i
].sh_flags
& SHF_EXECINSTR
))
702 /* Make entry section relative */
703 if (sechdrs
[i
].sh_addr
<= pi
->ehdr
->e_entry
&&
704 ((sechdrs
[i
].sh_addr
+ sechdrs
[i
].sh_size
) >
705 pi
->ehdr
->e_entry
)) {
707 entry
-= sechdrs
[i
].sh_addr
;
712 /* Determine how much memory is needed to load relocatable object. */
716 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
717 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
720 align
= sechdrs
[i
].sh_addralign
;
721 if (sechdrs
[i
].sh_type
!= SHT_NOBITS
) {
722 if (kbuf
.buf_align
< align
)
723 kbuf
.buf_align
= align
;
724 kbuf
.bufsz
= ALIGN(kbuf
.bufsz
, align
);
725 kbuf
.bufsz
+= sechdrs
[i
].sh_size
;
728 if (bss_align
< align
)
730 bss_sz
= ALIGN(bss_sz
, align
);
731 bss_sz
+= sechdrs
[i
].sh_size
;
735 /* Determine the bss padding required to align bss properly */
737 if (kbuf
.bufsz
& (bss_align
- 1))
738 bss_pad
= bss_align
- (kbuf
.bufsz
& (bss_align
- 1));
740 kbuf
.memsz
= kbuf
.bufsz
+ bss_pad
+ bss_sz
;
742 /* Allocate buffer for purgatory */
743 kbuf
.buffer
= vzalloc(kbuf
.bufsz
);
749 if (kbuf
.buf_align
< bss_align
)
750 kbuf
.buf_align
= bss_align
;
752 /* Add buffer to segment list */
753 ret
= kexec_add_buffer(&kbuf
);
756 pi
->purgatory_load_addr
= kbuf
.mem
;
758 /* Load SHF_ALLOC sections */
759 buf_addr
= kbuf
.buffer
;
760 load_addr
= curr_load_addr
= pi
->purgatory_load_addr
;
761 bss_addr
= load_addr
+ kbuf
.bufsz
+ bss_pad
;
763 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
764 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
767 align
= sechdrs
[i
].sh_addralign
;
768 if (sechdrs
[i
].sh_type
!= SHT_NOBITS
) {
769 curr_load_addr
= ALIGN(curr_load_addr
, align
);
770 offset
= curr_load_addr
- load_addr
;
771 /* We already modifed ->sh_offset to keep src addr */
772 src
= (char *) sechdrs
[i
].sh_offset
;
773 memcpy(buf_addr
+ offset
, src
, sechdrs
[i
].sh_size
);
775 /* Store load address and source address of section */
776 sechdrs
[i
].sh_addr
= curr_load_addr
;
779 * This section got copied to temporary buffer. Update
780 * ->sh_offset accordingly.
782 sechdrs
[i
].sh_offset
= (unsigned long)(buf_addr
+ offset
);
784 /* Advance to the next address */
785 curr_load_addr
+= sechdrs
[i
].sh_size
;
787 bss_addr
= ALIGN(bss_addr
, align
);
788 sechdrs
[i
].sh_addr
= bss_addr
;
789 bss_addr
+= sechdrs
[i
].sh_size
;
793 /* Update entry point based on load address of text section */
795 entry
+= sechdrs
[entry_sidx
].sh_addr
;
797 /* Make kernel jump to purgatory after shutdown */
798 image
->start
= entry
;
800 /* Used later to get/set symbol values */
801 pi
->sechdrs
= sechdrs
;
804 * Used later to identify which section is purgatory and skip it
807 pi
->purgatory_buf
= kbuf
.buffer
;
815 static int kexec_apply_relocations(struct kimage
*image
)
818 struct purgatory_info
*pi
= &image
->purgatory_info
;
819 Elf_Shdr
*sechdrs
= pi
->sechdrs
;
821 /* Apply relocations */
822 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
823 Elf_Shdr
*section
, *symtab
;
825 if (sechdrs
[i
].sh_type
!= SHT_RELA
&&
826 sechdrs
[i
].sh_type
!= SHT_REL
)
830 * For section of type SHT_RELA/SHT_REL,
831 * ->sh_link contains section header index of associated
832 * symbol table. And ->sh_info contains section header
833 * index of section to which relocations apply.
835 if (sechdrs
[i
].sh_info
>= pi
->ehdr
->e_shnum
||
836 sechdrs
[i
].sh_link
>= pi
->ehdr
->e_shnum
)
839 section
= &sechdrs
[sechdrs
[i
].sh_info
];
840 symtab
= &sechdrs
[sechdrs
[i
].sh_link
];
842 if (!(section
->sh_flags
& SHF_ALLOC
))
846 * symtab->sh_link contain section header index of associated
849 if (symtab
->sh_link
>= pi
->ehdr
->e_shnum
)
850 /* Invalid section number? */
854 * Respective architecture needs to provide support for applying
855 * relocations of type SHT_RELA/SHT_REL.
857 if (sechdrs
[i
].sh_type
== SHT_RELA
)
858 ret
= arch_kexec_apply_relocations_add(pi
->ehdr
,
860 else if (sechdrs
[i
].sh_type
== SHT_REL
)
861 ret
= arch_kexec_apply_relocations(pi
->ehdr
,
870 /* Load relocatable purgatory object and relocate it appropriately */
871 int kexec_load_purgatory(struct kimage
*image
, unsigned long min
,
872 unsigned long max
, int top_down
,
873 unsigned long *load_addr
)
875 struct purgatory_info
*pi
= &image
->purgatory_info
;
878 if (kexec_purgatory_size
<= 0)
881 if (kexec_purgatory_size
< sizeof(Elf_Ehdr
))
884 pi
->ehdr
= (Elf_Ehdr
*)kexec_purgatory
;
886 if (memcmp(pi
->ehdr
->e_ident
, ELFMAG
, SELFMAG
) != 0
887 || pi
->ehdr
->e_type
!= ET_REL
888 || !elf_check_arch(pi
->ehdr
)
889 || pi
->ehdr
->e_shentsize
!= sizeof(Elf_Shdr
))
892 if (pi
->ehdr
->e_shoff
>= kexec_purgatory_size
893 || (pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
) >
894 kexec_purgatory_size
- pi
->ehdr
->e_shoff
))
897 ret
= __kexec_load_purgatory(image
, min
, max
, top_down
);
901 ret
= kexec_apply_relocations(image
);
905 *load_addr
= pi
->purgatory_load_addr
;
911 vfree(pi
->purgatory_buf
);
912 pi
->purgatory_buf
= NULL
;
916 static Elf_Sym
*kexec_purgatory_find_symbol(struct purgatory_info
*pi
,
925 if (!pi
->sechdrs
|| !pi
->ehdr
)
928 sechdrs
= pi
->sechdrs
;
931 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
932 if (sechdrs
[i
].sh_type
!= SHT_SYMTAB
)
935 if (sechdrs
[i
].sh_link
>= ehdr
->e_shnum
)
936 /* Invalid strtab section number */
938 strtab
= (char *)sechdrs
[sechdrs
[i
].sh_link
].sh_offset
;
939 syms
= (Elf_Sym
*)sechdrs
[i
].sh_offset
;
941 /* Go through symbols for a match */
942 for (k
= 0; k
< sechdrs
[i
].sh_size
/sizeof(Elf_Sym
); k
++) {
943 if (ELF_ST_BIND(syms
[k
].st_info
) != STB_GLOBAL
)
946 if (strcmp(strtab
+ syms
[k
].st_name
, name
) != 0)
949 if (syms
[k
].st_shndx
== SHN_UNDEF
||
950 syms
[k
].st_shndx
>= ehdr
->e_shnum
) {
951 pr_debug("Symbol: %s has bad section index %d.\n",
952 name
, syms
[k
].st_shndx
);
956 /* Found the symbol we are looking for */
964 void *kexec_purgatory_get_symbol_addr(struct kimage
*image
, const char *name
)
966 struct purgatory_info
*pi
= &image
->purgatory_info
;
970 sym
= kexec_purgatory_find_symbol(pi
, name
);
972 return ERR_PTR(-EINVAL
);
974 sechdr
= &pi
->sechdrs
[sym
->st_shndx
];
977 * Returns the address where symbol will finally be loaded after
978 * kexec_load_segment()
980 return (void *)(sechdr
->sh_addr
+ sym
->st_value
);
984 * Get or set value of a symbol. If "get_value" is true, symbol value is
985 * returned in buf otherwise symbol value is set based on value in buf.
987 int kexec_purgatory_get_set_symbol(struct kimage
*image
, const char *name
,
988 void *buf
, unsigned int size
, bool get_value
)
992 struct purgatory_info
*pi
= &image
->purgatory_info
;
995 sym
= kexec_purgatory_find_symbol(pi
, name
);
999 if (sym
->st_size
!= size
) {
1000 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
1001 name
, (unsigned long)sym
->st_size
, size
);
1005 sechdrs
= pi
->sechdrs
;
1007 if (sechdrs
[sym
->st_shndx
].sh_type
== SHT_NOBITS
) {
1008 pr_err("symbol %s is in a bss section. Cannot %s\n", name
,
1009 get_value
? "get" : "set");
1013 sym_buf
= (unsigned char *)sechdrs
[sym
->st_shndx
].sh_offset
+
1017 memcpy((void *)buf
, sym_buf
, size
);
1019 memcpy((void *)sym_buf
, buf
, size
);