Rename __attribute__((packed)) --> __packed
[coreboot.git] / src / lib / selfboot.c
blob160e8f5516ecf832aca611846f6506a3399aec5c
1 /*
2 * This file is part of the coreboot project.
4 * Copyright (C) 2003 Eric W. Biederman <ebiederm@xmission.com>
5 * Copyright (C) 2009 Ron Minnich <rminnich@gmail.com>
6 * Copyright (C) 2016 George Trudeau <george.trudeau@usherbrooke.ca>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <commonlib/compression.h>
19 #include <commonlib/endian.h>
20 #include <console/console.h>
21 #include <cpu/cpu.h>
22 #include <stdint.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <symbols.h>
26 #include <cbfs.h>
27 #include <lib.h>
28 #include <bootmem.h>
29 #include <program_loading.h>
30 #include <timestamp.h>
32 static const unsigned long lb_start = (unsigned long)&_program;
33 static const unsigned long lb_end = (unsigned long)&_eprogram;
35 struct segment {
36 struct segment *next;
37 struct segment *prev;
38 unsigned long s_dstaddr;
39 unsigned long s_srcaddr;
40 unsigned long s_memsz;
41 unsigned long s_filesz;
42 int compression;
45 static void segment_insert_before(struct segment *seg, struct segment *new)
47 new->next = seg;
48 new->prev = seg->prev;
49 seg->prev->next = new;
50 seg->prev = new;
53 static void segment_insert_after(struct segment *seg, struct segment *new)
55 new->next = seg->next;
56 new->prev = seg;
57 seg->next->prev = new;
58 seg->next = new;
61 /* The problem:
62 * Static executables all want to share the same addresses
63 * in memory because only a few addresses are reliably present on
64 * a machine, and implementing general relocation is hard.
66 * The solution:
67 * - Allocate a buffer the size of the coreboot image plus additional
68 * required space.
69 * - Anything that would overwrite coreboot copy into the lower part of
70 * the buffer.
71 * - After loading an ELF image copy coreboot to the top of the buffer.
72 * - Then jump to the loaded image.
74 * Benefits:
75 * - Nearly arbitrary standalone executables can be loaded.
76 * - coreboot is preserved, so it can be returned to.
77 * - The implementation is still relatively simple,
78 * and much simpler than the general case implemented in kexec.
81 static unsigned long bounce_size, bounce_buffer;
83 static void get_bounce_buffer(unsigned long req_size)
85 unsigned long lb_size;
86 void *buffer;
88 /* When the ramstage is relocatable there is no need for a bounce
89 * buffer. All payloads should not overlap the ramstage.
91 if (IS_ENABLED(CONFIG_RELOCATABLE_RAMSTAGE) ||
92 !arch_supports_bounce_buffer()) {
93 bounce_buffer = ~0UL;
94 bounce_size = 0;
95 return;
98 lb_size = lb_end - lb_start;
99 /* Plus coreboot size so I have somewhere
100 * to place a copy to return to.
102 lb_size = req_size + lb_size;
104 buffer = bootmem_allocate_buffer(lb_size);
106 printk(BIOS_SPEW, "Bounce Buffer at %p, %lu bytes\n", buffer, lb_size);
108 bounce_buffer = (uintptr_t)buffer;
109 bounce_size = req_size;
112 static int overlaps_coreboot(struct segment *seg)
114 unsigned long start, end;
115 start = seg->s_dstaddr;
116 end = start + seg->s_memsz;
117 return !((end <= lb_start) || (start >= lb_end));
120 static int relocate_segment(unsigned long buffer, struct segment *seg)
122 /* Modify all segments that want to load onto coreboot
123 * to load onto the bounce buffer instead.
125 /* ret: 1 : A new segment is inserted before the seg.
126 * 0 : A new segment is inserted after the seg, or no new one.
128 unsigned long start, middle, end, ret = 0;
130 printk(BIOS_SPEW, "lb: [0x%016lx, 0x%016lx)\n",
131 lb_start, lb_end);
133 /* I don't conflict with coreboot so get out of here */
134 if (!overlaps_coreboot(seg))
135 return 0;
137 if (!arch_supports_bounce_buffer())
138 die("bounce buffer not supported");
140 start = seg->s_dstaddr;
141 middle = start + seg->s_filesz;
142 end = start + seg->s_memsz;
144 printk(BIOS_SPEW, "segment: [0x%016lx, 0x%016lx, 0x%016lx)\n",
145 start, middle, end);
147 if (seg->compression == CBFS_COMPRESS_NONE) {
148 /* Slice off a piece at the beginning
149 * that doesn't conflict with coreboot.
151 if (start < lb_start) {
152 struct segment *new;
153 unsigned long len = lb_start - start;
154 new = malloc(sizeof(*new));
155 *new = *seg;
156 new->s_memsz = len;
157 seg->s_memsz -= len;
158 seg->s_dstaddr += len;
159 seg->s_srcaddr += len;
160 if (seg->s_filesz > len) {
161 new->s_filesz = len;
162 seg->s_filesz -= len;
163 } else {
164 seg->s_filesz = 0;
167 /* Order by stream offset */
168 segment_insert_before(seg, new);
170 /* compute the new value of start */
171 start = seg->s_dstaddr;
173 printk(BIOS_SPEW,
174 " early: [0x%016lx, 0x%016lx, 0x%016lx)\n",
175 new->s_dstaddr,
176 new->s_dstaddr + new->s_filesz,
177 new->s_dstaddr + new->s_memsz);
179 ret = 1;
182 /* Slice off a piece at the end
183 * that doesn't conflict with coreboot
185 if (end > lb_end) {
186 unsigned long len = lb_end - start;
187 struct segment *new;
188 new = malloc(sizeof(*new));
189 *new = *seg;
190 seg->s_memsz = len;
191 new->s_memsz -= len;
192 new->s_dstaddr += len;
193 new->s_srcaddr += len;
194 if (seg->s_filesz > len) {
195 seg->s_filesz = len;
196 new->s_filesz -= len;
197 } else {
198 new->s_filesz = 0;
200 /* Order by stream offset */
201 segment_insert_after(seg, new);
203 printk(BIOS_SPEW,
204 " late: [0x%016lx, 0x%016lx, 0x%016lx)\n",
205 new->s_dstaddr,
206 new->s_dstaddr + new->s_filesz,
207 new->s_dstaddr + new->s_memsz);
211 /* Now retarget this segment onto the bounce buffer */
212 /* sort of explanation: the buffer is a 1:1 mapping to coreboot.
213 * so you will make the dstaddr be this buffer, and it will get copied
214 * later to where coreboot lives.
216 seg->s_dstaddr = buffer + (seg->s_dstaddr - lb_start);
218 printk(BIOS_SPEW, " bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n",
219 seg->s_dstaddr,
220 seg->s_dstaddr + seg->s_filesz,
221 seg->s_dstaddr + seg->s_memsz);
223 return ret;
226 /* Decode a serialized cbfs payload segment
227 * from memory into native endianness.
229 static void cbfs_decode_payload_segment(struct cbfs_payload_segment *segment,
230 const struct cbfs_payload_segment *src)
232 segment->type = read_be32(&src->type);
233 segment->compression = read_be32(&src->compression);
234 segment->offset = read_be32(&src->offset);
235 segment->load_addr = read_be64(&src->load_addr);
236 segment->len = read_be32(&src->len);
237 segment->mem_len = read_be32(&src->mem_len);
240 static int build_self_segment_list(
241 struct segment *head,
242 struct cbfs_payload *cbfs_payload, uintptr_t *entry)
244 struct segment *new;
245 struct cbfs_payload_segment *current_segment, *first_segment, segment;
247 memset(head, 0, sizeof(*head));
248 head->next = head->prev = head;
250 first_segment = &cbfs_payload->segments;
252 for (current_segment = first_segment;; ++current_segment) {
253 printk(BIOS_DEBUG,
254 "Loading segment from ROM address 0x%p\n",
255 current_segment);
257 cbfs_decode_payload_segment(&segment, current_segment);
259 switch (segment.type) {
260 case PAYLOAD_SEGMENT_PARAMS:
261 printk(BIOS_DEBUG, " parameter section (skipped)\n");
262 continue;
264 case PAYLOAD_SEGMENT_CODE:
265 case PAYLOAD_SEGMENT_DATA:
266 printk(BIOS_DEBUG, " %s (compression=%x)\n",
267 segment.type == PAYLOAD_SEGMENT_CODE
268 ? "code" : "data", segment.compression);
270 new = malloc(sizeof(*new));
271 new->s_dstaddr = segment.load_addr;
272 new->s_memsz = segment.mem_len;
273 new->compression = segment.compression;
274 new->s_srcaddr = (uintptr_t)
275 ((unsigned char *)first_segment)
276 + segment.offset;
277 new->s_filesz = segment.len;
279 printk(BIOS_DEBUG,
280 " New segment dstaddr 0x%lx memsize 0x%lx srcaddr 0x%lx filesize 0x%lx\n",
281 new->s_dstaddr, new->s_memsz, new->s_srcaddr,
282 new->s_filesz);
284 /* Clean up the values */
285 if (new->s_filesz > new->s_memsz) {
286 new->s_filesz = new->s_memsz;
287 printk(BIOS_DEBUG,
288 " cleaned up filesize 0x%lx\n",
289 new->s_filesz);
291 break;
293 case PAYLOAD_SEGMENT_BSS:
294 printk(BIOS_DEBUG, " BSS 0x%p (%d byte)\n", (void *)
295 (intptr_t)segment.load_addr, segment.mem_len);
297 new = malloc(sizeof(*new));
298 new->s_filesz = 0;
299 new->s_srcaddr = (uintptr_t)
300 ((unsigned char *)first_segment)
301 + segment.offset;
302 new->s_dstaddr = segment.load_addr;
303 new->s_memsz = segment.mem_len;
304 new->compression = CBFS_COMPRESS_NONE;
305 break;
307 case PAYLOAD_SEGMENT_ENTRY:
308 printk(BIOS_DEBUG, " Entry Point 0x%p\n", (void *)
309 (intptr_t)segment.load_addr);
311 *entry = segment.load_addr;
312 /* Per definition, a payload always has the entry point
313 * as last segment. Thus, we use the occurrence of the
314 * entry point as break condition for the loop.
315 * Can we actually just look at the number of section?
317 return 1;
319 default:
320 /* We found something that we don't know about. Throw
321 * hands into the sky and run away!
323 printk(BIOS_EMERG, "Bad segment type %x\n",
324 segment.type);
325 return -1;
328 /* We have found another CODE, DATA or BSS segment */
329 /* Insert new segment at the end of the list */
330 segment_insert_before(head, new);
333 return 1;
336 static int payload_targets_usable_ram(struct segment *head)
338 const unsigned long one_meg = (1UL << 20);
339 struct segment *ptr;
341 for (ptr = head->next; ptr != head; ptr = ptr->next) {
342 if (bootmem_region_targets_usable_ram(ptr->s_dstaddr,
343 ptr->s_memsz))
344 continue;
346 if (ptr->s_dstaddr < one_meg &&
347 (ptr->s_dstaddr + ptr->s_memsz) <= one_meg) {
348 printk(BIOS_DEBUG,
349 "Payload being loaded at below 1MiB "
350 "without region being marked as RAM usable.\n");
351 continue;
354 /* Payload segment not targeting RAM. */
355 printk(BIOS_ERR, "SELF Payload doesn't target RAM:\n");
356 printk(BIOS_ERR, "Failed Segment: 0x%lx, %lu bytes\n",
357 ptr->s_dstaddr, ptr->s_memsz);
358 bootmem_dump_ranges();
359 return 0;
362 return 1;
365 static int load_self_segments(struct segment *head, struct prog *payload,
366 bool check_regions)
368 struct segment *ptr;
369 unsigned long bounce_high = lb_end;
371 if (check_regions) {
372 if (!payload_targets_usable_ram(head))
373 return 0;
376 for (ptr = head->next; ptr != head; ptr = ptr->next) {
378 * Add segments to bootmem memory map before a bounce buffer is
379 * allocated so that there aren't conflicts with the actual
380 * payload.
382 if (check_regions) {
383 bootmem_add_range(ptr->s_dstaddr, ptr->s_memsz,
384 LB_MEM_UNUSABLE);
387 if (!overlaps_coreboot(ptr))
388 continue;
389 if (ptr->s_dstaddr + ptr->s_memsz > bounce_high)
390 bounce_high = ptr->s_dstaddr + ptr->s_memsz;
392 get_bounce_buffer(bounce_high - lb_start);
393 if (!bounce_buffer) {
394 printk(BIOS_ERR, "Could not find a bounce buffer...\n");
395 return 0;
398 for (ptr = head->next; ptr != head; ptr = ptr->next) {
399 unsigned char *dest, *src, *middle, *end;
400 size_t len, memsz;
401 printk(BIOS_DEBUG,
402 "Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
403 ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
405 /* Modify the segment to load onto the bounce_buffer if
406 * necessary.
408 if (relocate_segment(bounce_buffer, ptr)) {
409 ptr = (ptr->prev)->prev;
410 continue;
413 printk(BIOS_DEBUG,
414 "Post relocation: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
415 ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
417 /* Compute the boundaries of the segment */
418 dest = (unsigned char *)(ptr->s_dstaddr);
419 src = (unsigned char *)(ptr->s_srcaddr);
420 len = ptr->s_filesz;
421 memsz = ptr->s_memsz;
422 end = dest + memsz;
424 /* Copy data from the initial buffer */
425 switch (ptr->compression) {
426 case CBFS_COMPRESS_LZMA: {
427 printk(BIOS_DEBUG, "using LZMA\n");
428 timestamp_add_now(TS_START_ULZMA);
429 len = ulzman(src, len, dest, memsz);
430 timestamp_add_now(TS_END_ULZMA);
431 if (!len) /* Decompression Error. */
432 return 0;
433 break;
435 case CBFS_COMPRESS_LZ4: {
436 printk(BIOS_DEBUG, "using LZ4\n");
437 timestamp_add_now(TS_START_ULZ4F);
438 len = ulz4fn(src, len, dest, memsz);
439 timestamp_add_now(TS_END_ULZ4F);
440 if (!len) /* Decompression Error. */
441 return 0;
442 break;
444 case CBFS_COMPRESS_NONE: {
445 printk(BIOS_DEBUG, "it's not compressed!\n");
446 memcpy(dest, src, len);
447 break;
449 default:
450 printk(BIOS_INFO, "CBFS: Unknown compression type %d\n",
451 ptr->compression);
452 return -1;
454 /* Calculate middle after any changes to len. */
455 middle = dest + len;
456 printk(BIOS_SPEW, "[ 0x%08lx, %08lx, 0x%08lx) <- %08lx\n",
457 (unsigned long)dest,
458 (unsigned long)middle,
459 (unsigned long)end,
460 (unsigned long)src);
462 /* Zero the extra bytes between middle & end */
463 if (middle < end) {
464 printk(BIOS_DEBUG,
465 "Clearing Segment: addr: 0x%016lx memsz: 0x%016lx\n",
466 (unsigned long)middle,
467 (unsigned long)(end - middle));
469 /* Zero the extra bytes */
470 memset(middle, 0, end - middle);
473 /* Copy the data that's outside the area that shadows ramstage
475 printk(BIOS_DEBUG, "dest %p, end %p, bouncebuffer %lx\n", dest,
476 end, bounce_buffer);
477 if ((unsigned long)end > bounce_buffer) {
478 if ((unsigned long)dest < bounce_buffer) {
479 unsigned char *from = dest;
480 unsigned char *to = (unsigned char *)
481 (lb_start - (bounce_buffer
482 - (unsigned long)dest));
483 unsigned long amount = bounce_buffer
484 - (unsigned long)dest;
485 printk(BIOS_DEBUG,
486 "move prefix around: from %p, to %p, amount: %lx\n",
487 from, to, amount);
488 memcpy(to, from, amount);
490 if ((unsigned long)end > bounce_buffer + (lb_end
491 - lb_start)) {
492 unsigned long from = bounce_buffer + (lb_end
493 - lb_start);
494 unsigned long to = lb_end;
495 unsigned long amount =
496 (unsigned long)end - from;
497 printk(BIOS_DEBUG,
498 "move suffix around: from %lx, to %lx, amount: %lx\n",
499 from, to, amount);
500 memcpy((char *)to, (char *)from, amount);
505 * Each architecture can perform additonal operations
506 * on the loaded segment
508 prog_segment_loaded((uintptr_t)dest, ptr->s_memsz,
509 ptr->next == head ? SEG_FINAL : 0);
512 return 1;
515 void *selfload(struct prog *payload, bool check_regions)
517 uintptr_t entry = 0;
518 struct segment head;
519 void *data;
521 data = rdev_mmap_full(prog_rdev(payload));
523 if (data == NULL)
524 return NULL;
526 /* Preprocess the self segments */
527 if (!build_self_segment_list(&head, data, &entry))
528 goto out;
530 /* Load the segments */
531 if (!load_self_segments(&head, payload, check_regions))
532 goto out;
534 printk(BIOS_SPEW, "Loaded segments\n");
536 rdev_munmap(prog_rdev(payload), data);
538 /* Update the payload's area with the bounce buffer information. */
539 prog_set_area(payload, (void *)(uintptr_t)bounce_buffer, bounce_size);
541 return (void *)entry;
543 out:
544 rdev_munmap(prog_rdev(payload), data);
545 return NULL;