2 * This file is part of the coreboot project.
4 * Copyright (C) 2003 Eric W. Biederman <ebiederm@xmission.com>
5 * Copyright (C) 2009 Ron Minnich <rminnich@gmail.com>
6 * Copyright (C) 2016 George Trudeau <george.trudeau@usherbrooke.ca>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <commonlib/compression.h>
19 #include <commonlib/endian.h>
20 #include <console/console.h>
29 #include <program_loading.h>
30 #include <timestamp.h>
32 static const unsigned long lb_start
= (unsigned long)&_program
;
33 static const unsigned long lb_end
= (unsigned long)&_eprogram
;
38 unsigned long s_dstaddr
;
39 unsigned long s_srcaddr
;
40 unsigned long s_memsz
;
41 unsigned long s_filesz
;
45 static void segment_insert_before(struct segment
*seg
, struct segment
*new)
48 new->prev
= seg
->prev
;
49 seg
->prev
->next
= new;
53 static void segment_insert_after(struct segment
*seg
, struct segment
*new)
55 new->next
= seg
->next
;
57 seg
->next
->prev
= new;
62 * Static executables all want to share the same addresses
63 * in memory because only a few addresses are reliably present on
64 * a machine, and implementing general relocation is hard.
67 * - Allocate a buffer the size of the coreboot image plus additional
69 * - Anything that would overwrite coreboot copy into the lower part of
71 * - After loading an ELF image copy coreboot to the top of the buffer.
72 * - Then jump to the loaded image.
75 * - Nearly arbitrary standalone executables can be loaded.
76 * - coreboot is preserved, so it can be returned to.
77 * - The implementation is still relatively simple,
78 * and much simpler than the general case implemented in kexec.
81 static unsigned long bounce_size
, bounce_buffer
;
83 static void get_bounce_buffer(unsigned long req_size
)
85 unsigned long lb_size
;
88 /* When the ramstage is relocatable there is no need for a bounce
89 * buffer. All payloads should not overlap the ramstage.
91 if (IS_ENABLED(CONFIG_RELOCATABLE_RAMSTAGE
) ||
92 !arch_supports_bounce_buffer()) {
98 lb_size
= lb_end
- lb_start
;
99 /* Plus coreboot size so I have somewhere
100 * to place a copy to return to.
102 lb_size
= req_size
+ lb_size
;
104 buffer
= bootmem_allocate_buffer(lb_size
);
106 printk(BIOS_SPEW
, "Bounce Buffer at %p, %lu bytes\n", buffer
, lb_size
);
108 bounce_buffer
= (uintptr_t)buffer
;
109 bounce_size
= req_size
;
112 static int overlaps_coreboot(struct segment
*seg
)
114 unsigned long start
, end
;
115 start
= seg
->s_dstaddr
;
116 end
= start
+ seg
->s_memsz
;
117 return !((end
<= lb_start
) || (start
>= lb_end
));
120 static int relocate_segment(unsigned long buffer
, struct segment
*seg
)
122 /* Modify all segments that want to load onto coreboot
123 * to load onto the bounce buffer instead.
125 /* ret: 1 : A new segment is inserted before the seg.
126 * 0 : A new segment is inserted after the seg, or no new one.
128 unsigned long start
, middle
, end
, ret
= 0;
130 printk(BIOS_SPEW
, "lb: [0x%016lx, 0x%016lx)\n",
133 /* I don't conflict with coreboot so get out of here */
134 if (!overlaps_coreboot(seg
))
137 if (!arch_supports_bounce_buffer())
138 die("bounce buffer not supported");
140 start
= seg
->s_dstaddr
;
141 middle
= start
+ seg
->s_filesz
;
142 end
= start
+ seg
->s_memsz
;
144 printk(BIOS_SPEW
, "segment: [0x%016lx, 0x%016lx, 0x%016lx)\n",
147 if (seg
->compression
== CBFS_COMPRESS_NONE
) {
148 /* Slice off a piece at the beginning
149 * that doesn't conflict with coreboot.
151 if (start
< lb_start
) {
153 unsigned long len
= lb_start
- start
;
154 new = malloc(sizeof(*new));
158 seg
->s_dstaddr
+= len
;
159 seg
->s_srcaddr
+= len
;
160 if (seg
->s_filesz
> len
) {
162 seg
->s_filesz
-= len
;
167 /* Order by stream offset */
168 segment_insert_before(seg
, new);
170 /* compute the new value of start */
171 start
= seg
->s_dstaddr
;
174 " early: [0x%016lx, 0x%016lx, 0x%016lx)\n",
176 new->s_dstaddr
+ new->s_filesz
,
177 new->s_dstaddr
+ new->s_memsz
);
182 /* Slice off a piece at the end
183 * that doesn't conflict with coreboot
186 unsigned long len
= lb_end
- start
;
188 new = malloc(sizeof(*new));
192 new->s_dstaddr
+= len
;
193 new->s_srcaddr
+= len
;
194 if (seg
->s_filesz
> len
) {
196 new->s_filesz
-= len
;
200 /* Order by stream offset */
201 segment_insert_after(seg
, new);
204 " late: [0x%016lx, 0x%016lx, 0x%016lx)\n",
206 new->s_dstaddr
+ new->s_filesz
,
207 new->s_dstaddr
+ new->s_memsz
);
211 /* Now retarget this segment onto the bounce buffer */
212 /* sort of explanation: the buffer is a 1:1 mapping to coreboot.
213 * so you will make the dstaddr be this buffer, and it will get copied
214 * later to where coreboot lives.
216 seg
->s_dstaddr
= buffer
+ (seg
->s_dstaddr
- lb_start
);
218 printk(BIOS_SPEW
, " bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n",
220 seg
->s_dstaddr
+ seg
->s_filesz
,
221 seg
->s_dstaddr
+ seg
->s_memsz
);
226 /* Decode a serialized cbfs payload segment
227 * from memory into native endianness.
229 static void cbfs_decode_payload_segment(struct cbfs_payload_segment
*segment
,
230 const struct cbfs_payload_segment
*src
)
232 segment
->type
= read_be32(&src
->type
);
233 segment
->compression
= read_be32(&src
->compression
);
234 segment
->offset
= read_be32(&src
->offset
);
235 segment
->load_addr
= read_be64(&src
->load_addr
);
236 segment
->len
= read_be32(&src
->len
);
237 segment
->mem_len
= read_be32(&src
->mem_len
);
240 static int build_self_segment_list(
241 struct segment
*head
,
242 struct cbfs_payload
*cbfs_payload
, uintptr_t *entry
)
245 struct cbfs_payload_segment
*current_segment
, *first_segment
, segment
;
247 memset(head
, 0, sizeof(*head
));
248 head
->next
= head
->prev
= head
;
250 first_segment
= &cbfs_payload
->segments
;
252 for (current_segment
= first_segment
;; ++current_segment
) {
254 "Loading segment from ROM address 0x%p\n",
257 cbfs_decode_payload_segment(&segment
, current_segment
);
259 switch (segment
.type
) {
260 case PAYLOAD_SEGMENT_PARAMS
:
261 printk(BIOS_DEBUG
, " parameter section (skipped)\n");
264 case PAYLOAD_SEGMENT_CODE
:
265 case PAYLOAD_SEGMENT_DATA
:
266 printk(BIOS_DEBUG
, " %s (compression=%x)\n",
267 segment
.type
== PAYLOAD_SEGMENT_CODE
268 ? "code" : "data", segment
.compression
);
270 new = malloc(sizeof(*new));
271 new->s_dstaddr
= segment
.load_addr
;
272 new->s_memsz
= segment
.mem_len
;
273 new->compression
= segment
.compression
;
274 new->s_srcaddr
= (uintptr_t)
275 ((unsigned char *)first_segment
)
277 new->s_filesz
= segment
.len
;
280 " New segment dstaddr 0x%lx memsize 0x%lx srcaddr 0x%lx filesize 0x%lx\n",
281 new->s_dstaddr
, new->s_memsz
, new->s_srcaddr
,
284 /* Clean up the values */
285 if (new->s_filesz
> new->s_memsz
) {
286 new->s_filesz
= new->s_memsz
;
288 " cleaned up filesize 0x%lx\n",
293 case PAYLOAD_SEGMENT_BSS
:
294 printk(BIOS_DEBUG
, " BSS 0x%p (%d byte)\n", (void *)
295 (intptr_t)segment
.load_addr
, segment
.mem_len
);
297 new = malloc(sizeof(*new));
299 new->s_srcaddr
= (uintptr_t)
300 ((unsigned char *)first_segment
)
302 new->s_dstaddr
= segment
.load_addr
;
303 new->s_memsz
= segment
.mem_len
;
304 new->compression
= CBFS_COMPRESS_NONE
;
307 case PAYLOAD_SEGMENT_ENTRY
:
308 printk(BIOS_DEBUG
, " Entry Point 0x%p\n", (void *)
309 (intptr_t)segment
.load_addr
);
311 *entry
= segment
.load_addr
;
312 /* Per definition, a payload always has the entry point
313 * as last segment. Thus, we use the occurrence of the
314 * entry point as break condition for the loop.
315 * Can we actually just look at the number of section?
320 /* We found something that we don't know about. Throw
321 * hands into the sky and run away!
323 printk(BIOS_EMERG
, "Bad segment type %x\n",
328 /* We have found another CODE, DATA or BSS segment */
329 /* Insert new segment at the end of the list */
330 segment_insert_before(head
, new);
336 static int payload_targets_usable_ram(struct segment
*head
)
338 const unsigned long one_meg
= (1UL << 20);
341 for (ptr
= head
->next
; ptr
!= head
; ptr
= ptr
->next
) {
342 if (bootmem_region_targets_usable_ram(ptr
->s_dstaddr
,
346 if (ptr
->s_dstaddr
< one_meg
&&
347 (ptr
->s_dstaddr
+ ptr
->s_memsz
) <= one_meg
) {
349 "Payload being loaded at below 1MiB "
350 "without region being marked as RAM usable.\n");
354 /* Payload segment not targeting RAM. */
355 printk(BIOS_ERR
, "SELF Payload doesn't target RAM:\n");
356 printk(BIOS_ERR
, "Failed Segment: 0x%lx, %lu bytes\n",
357 ptr
->s_dstaddr
, ptr
->s_memsz
);
358 bootmem_dump_ranges();
365 static int load_self_segments(struct segment
*head
, struct prog
*payload
,
369 unsigned long bounce_high
= lb_end
;
372 if (!payload_targets_usable_ram(head
))
376 for (ptr
= head
->next
; ptr
!= head
; ptr
= ptr
->next
) {
378 * Add segments to bootmem memory map before a bounce buffer is
379 * allocated so that there aren't conflicts with the actual
383 bootmem_add_range(ptr
->s_dstaddr
, ptr
->s_memsz
,
387 if (!overlaps_coreboot(ptr
))
389 if (ptr
->s_dstaddr
+ ptr
->s_memsz
> bounce_high
)
390 bounce_high
= ptr
->s_dstaddr
+ ptr
->s_memsz
;
392 get_bounce_buffer(bounce_high
- lb_start
);
393 if (!bounce_buffer
) {
394 printk(BIOS_ERR
, "Could not find a bounce buffer...\n");
398 for (ptr
= head
->next
; ptr
!= head
; ptr
= ptr
->next
) {
399 unsigned char *dest
, *src
, *middle
, *end
;
402 "Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
403 ptr
->s_dstaddr
, ptr
->s_memsz
, ptr
->s_filesz
);
405 /* Modify the segment to load onto the bounce_buffer if
408 if (relocate_segment(bounce_buffer
, ptr
)) {
409 ptr
= (ptr
->prev
)->prev
;
414 "Post relocation: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
415 ptr
->s_dstaddr
, ptr
->s_memsz
, ptr
->s_filesz
);
417 /* Compute the boundaries of the segment */
418 dest
= (unsigned char *)(ptr
->s_dstaddr
);
419 src
= (unsigned char *)(ptr
->s_srcaddr
);
421 memsz
= ptr
->s_memsz
;
424 /* Copy data from the initial buffer */
425 switch (ptr
->compression
) {
426 case CBFS_COMPRESS_LZMA
: {
427 printk(BIOS_DEBUG
, "using LZMA\n");
428 timestamp_add_now(TS_START_ULZMA
);
429 len
= ulzman(src
, len
, dest
, memsz
);
430 timestamp_add_now(TS_END_ULZMA
);
431 if (!len
) /* Decompression Error. */
435 case CBFS_COMPRESS_LZ4
: {
436 printk(BIOS_DEBUG
, "using LZ4\n");
437 timestamp_add_now(TS_START_ULZ4F
);
438 len
= ulz4fn(src
, len
, dest
, memsz
);
439 timestamp_add_now(TS_END_ULZ4F
);
440 if (!len
) /* Decompression Error. */
444 case CBFS_COMPRESS_NONE
: {
445 printk(BIOS_DEBUG
, "it's not compressed!\n");
446 memcpy(dest
, src
, len
);
450 printk(BIOS_INFO
, "CBFS: Unknown compression type %d\n",
454 /* Calculate middle after any changes to len. */
456 printk(BIOS_SPEW
, "[ 0x%08lx, %08lx, 0x%08lx) <- %08lx\n",
458 (unsigned long)middle
,
462 /* Zero the extra bytes between middle & end */
465 "Clearing Segment: addr: 0x%016lx memsz: 0x%016lx\n",
466 (unsigned long)middle
,
467 (unsigned long)(end
- middle
));
469 /* Zero the extra bytes */
470 memset(middle
, 0, end
- middle
);
473 /* Copy the data that's outside the area that shadows ramstage
475 printk(BIOS_DEBUG
, "dest %p, end %p, bouncebuffer %lx\n", dest
,
477 if ((unsigned long)end
> bounce_buffer
) {
478 if ((unsigned long)dest
< bounce_buffer
) {
479 unsigned char *from
= dest
;
480 unsigned char *to
= (unsigned char *)
481 (lb_start
- (bounce_buffer
482 - (unsigned long)dest
));
483 unsigned long amount
= bounce_buffer
484 - (unsigned long)dest
;
486 "move prefix around: from %p, to %p, amount: %lx\n",
488 memcpy(to
, from
, amount
);
490 if ((unsigned long)end
> bounce_buffer
+ (lb_end
492 unsigned long from
= bounce_buffer
+ (lb_end
494 unsigned long to
= lb_end
;
495 unsigned long amount
=
496 (unsigned long)end
- from
;
498 "move suffix around: from %lx, to %lx, amount: %lx\n",
500 memcpy((char *)to
, (char *)from
, amount
);
505 * Each architecture can perform additonal operations
506 * on the loaded segment
508 prog_segment_loaded((uintptr_t)dest
, ptr
->s_memsz
,
509 ptr
->next
== head
? SEG_FINAL
: 0);
515 void *selfload(struct prog
*payload
, bool check_regions
)
521 data
= rdev_mmap_full(prog_rdev(payload
));
526 /* Preprocess the self segments */
527 if (!build_self_segment_list(&head
, data
, &entry
))
530 /* Load the segments */
531 if (!load_self_segments(&head
, payload
, check_regions
))
534 printk(BIOS_SPEW
, "Loaded segments\n");
536 rdev_munmap(prog_rdev(payload
), data
);
538 /* Update the payload's area with the bounce buffer information. */
539 prog_set_area(payload
, (void *)(uintptr_t)bounce_buffer
, bounce_size
);
541 return (void *)entry
;
544 rdev_munmap(prog_rdev(payload
), data
);