soc/intel/braswell/chip.h: Use `bool` type
[coreboot.git] / src / lib / imd.c
blobac19d76f528714e6790f62a2f10e392e49f3b6c1
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <assert.h>
4 #include <cbmem.h>
5 #include <console/console.h>
6 #include <imd.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <types.h>
10 #include <imd_private.h>
13 /* For more details on implementation and usage please see the imd.h header. */
15 static void *relative_pointer(void *base, ssize_t offset)
17 intptr_t b = (intptr_t)base;
18 b += offset;
19 return (void *)b;
22 static bool imd_root_pointer_valid(const struct imd_root_pointer *rp)
24 return !!(rp->magic == IMD_ROOT_PTR_MAGIC);
27 static struct imd_root *imdr_root(const struct imdr *imdr)
29 return imdr->r;
33 * The root pointer is relative to the upper limit of the imd. i.e. It sits
34 * just below the upper limit.
36 static struct imd_root_pointer *imdr_get_root_pointer(const struct imdr *imdr)
38 struct imd_root_pointer *rp;
40 rp = relative_pointer((void *)imdr->limit, -sizeof(*rp));
42 return rp;
45 static void imd_link_root(struct imd_root_pointer *rp, struct imd_root *r)
47 rp->magic = IMD_ROOT_PTR_MAGIC;
48 rp->root_offset = (int32_t)((intptr_t)r - (intptr_t)rp);
51 static struct imd_entry *root_last_entry(struct imd_root *r)
53 return &r->entries[r->num_entries - 1];
56 static size_t root_num_entries(size_t root_size)
58 size_t entries_size;
60 entries_size = root_size;
61 entries_size -= sizeof(struct imd_root_pointer);
62 entries_size -= sizeof(struct imd_root);
64 return entries_size / sizeof(struct imd_entry);
67 static size_t imd_root_data_left(struct imd_root *r)
69 struct imd_entry *last_entry;
71 last_entry = root_last_entry(r);
73 if (r->max_offset != 0)
74 return last_entry->start_offset - r->max_offset;
76 return ~(size_t)0;
79 static bool root_is_locked(const struct imd_root *r)
81 return !!(r->flags & IMD_FLAG_LOCKED);
84 static void imd_entry_assign(struct imd_entry *e, uint32_t id,
85 ssize_t offset, size_t size)
87 e->magic = IMD_ENTRY_MAGIC;
88 e->start_offset = offset;
89 e->size = size;
90 e->id = id;
93 static void imdr_init(struct imdr *ir, void *upper_limit)
95 uintptr_t limit = (uintptr_t)upper_limit;
96 /* Upper limit is aligned down to 4KiB */
97 ir->limit = ALIGN_DOWN(limit, LIMIT_ALIGN);
98 ir->r = NULL;
101 static int imdr_create_empty(struct imdr *imdr, size_t root_size,
102 size_t entry_align)
104 struct imd_root_pointer *rp;
105 struct imd_root *r;
106 struct imd_entry *e;
107 ssize_t root_offset;
109 if (!imdr->limit)
110 return -1;
112 /* root_size and entry_align should be a power of 2. */
113 assert(IS_POWER_OF_2(root_size));
114 assert(IS_POWER_OF_2(entry_align));
117 * root_size needs to be large enough to accommodate root pointer and
118 * root book keeping structure. Furthermore, there needs to be a space
119 * for at least one entry covering root region. The caller needs to
120 * ensure there's enough room for tracking individual allocations.
122 if (root_size < (sizeof(*rp) + sizeof(*r) + sizeof(*e)))
123 return -1;
125 /* For simplicity don't allow sizes or alignments to exceed LIMIT_ALIGN.
127 if (root_size > LIMIT_ALIGN || entry_align > LIMIT_ALIGN)
128 return -1;
130 /* Additionally, don't handle an entry alignment > root_size. */
131 if (entry_align > root_size)
132 return -1;
134 rp = imdr_get_root_pointer(imdr);
136 root_offset = -(ssize_t)root_size;
137 /* Set root pointer. */
138 imdr->r = relative_pointer((void *)imdr->limit, root_offset);
139 r = imdr_root(imdr);
140 imd_link_root(rp, r);
142 memset(r, 0, sizeof(*r));
143 r->entry_align = entry_align;
145 /* Calculate size left for entries. */
146 r->max_entries = root_num_entries(root_size);
148 /* Fill in first entry covering the root region. */
149 r->num_entries = 1;
150 e = &r->entries[0];
151 imd_entry_assign(e, CBMEM_ID_IMD_ROOT, 0, root_size);
153 printk(BIOS_DEBUG, "IMD: root @ %p %u entries.\n", r, r->max_entries);
155 return 0;
158 static int imdr_recover(struct imdr *imdr)
160 struct imd_root_pointer *rp;
161 struct imd_root *r;
162 uintptr_t low_limit;
163 size_t i;
165 if (!imdr->limit)
166 return -1;
168 rp = imdr_get_root_pointer(imdr);
170 if (!imd_root_pointer_valid(rp))
171 return -1;
173 r = relative_pointer(rp, rp->root_offset);
175 /* Ensure that root is just under the root pointer */
176 if ((intptr_t)rp - (intptr_t)&r->entries[r->max_entries] > sizeof(struct imd_entry))
177 return -1;
179 if (r->num_entries > r->max_entries)
180 return -1;
182 /* Entry alignment should be power of 2. */
183 if (!IS_POWER_OF_2(r->entry_align))
184 return -1;
186 low_limit = (uintptr_t)relative_pointer(r, r->max_offset);
188 /* If no max_offset then lowest limit is 0. */
189 if (low_limit == (uintptr_t)r)
190 low_limit = 0;
192 for (i = 0; i < r->num_entries; i++) {
193 uintptr_t start_addr;
194 const struct imd_entry *e = &r->entries[i];
196 if (e->magic != IMD_ENTRY_MAGIC)
197 return -1;
199 start_addr = (uintptr_t)relative_pointer(r, e->start_offset);
200 if (start_addr < low_limit)
201 return -1;
202 if (start_addr >= imdr->limit ||
203 (start_addr + e->size) > imdr->limit)
204 return -1;
207 /* Set root pointer. */
208 imdr->r = r;
210 return 0;
213 static const struct imd_entry *imdr_entry_find(const struct imdr *imdr,
214 uint32_t id)
216 struct imd_root *r;
217 struct imd_entry *e;
218 size_t i;
220 r = imdr_root(imdr);
222 if (r == NULL)
223 return NULL;
225 e = NULL;
226 /* Skip first entry covering the root. */
227 for (i = 1; i < r->num_entries; i++) {
228 if (id != r->entries[i].id)
229 continue;
230 e = &r->entries[i];
231 break;
234 return e;
237 static int imdr_limit_size(struct imdr *imdr, size_t max_size)
239 struct imd_root *r;
240 ssize_t smax_size;
241 size_t root_size;
243 r = imdr_root(imdr);
244 if (r == NULL)
245 return -1;
247 root_size = imdr->limit - (uintptr_t)r;
249 if (max_size < root_size)
250 return -1;
252 /* Take into account the root size. */
253 smax_size = max_size - root_size;
254 smax_size = -smax_size;
256 r->max_offset = smax_size;
258 return 0;
261 static size_t imdr_entry_size(const struct imd_entry *e)
263 return e->size;
266 static void *imdr_entry_at(const struct imdr *imdr, const struct imd_entry *e)
268 return relative_pointer(imdr_root(imdr), e->start_offset);
271 static struct imd_entry *imd_entry_add_to_root(struct imd_root *r, uint32_t id,
272 size_t size)
274 struct imd_entry *entry;
275 struct imd_entry *last_entry;
276 ssize_t e_offset;
277 size_t used_size;
279 if (r->num_entries == r->max_entries)
280 return NULL;
282 /* Determine total size taken up by entry. */
283 used_size = ALIGN_UP(size, r->entry_align);
285 /* See if size overflows imd total size. */
286 if (used_size > imd_root_data_left(r))
287 return NULL;
290 * Determine if offset field overflows. All offsets should be lower
291 * than the previous one.
293 last_entry = root_last_entry(r);
294 e_offset = last_entry->start_offset;
295 e_offset -= (ssize_t)used_size;
296 if (e_offset >= last_entry->start_offset)
297 return NULL;
299 entry = root_last_entry(r) + 1;
300 r->num_entries++;
302 imd_entry_assign(entry, id, e_offset, size);
304 return entry;
307 static const struct imd_entry *imdr_entry_add(const struct imdr *imdr,
308 uint32_t id, size_t size)
310 struct imd_root *r;
312 r = imdr_root(imdr);
314 if (r == NULL)
315 return NULL;
317 if (root_is_locked(r))
318 return NULL;
320 return imd_entry_add_to_root(r, id, size);
323 static bool imdr_has_entry(const struct imdr *imdr, const struct imd_entry *e)
325 struct imd_root *r;
326 size_t idx;
328 r = imdr_root(imdr);
329 if (r == NULL)
330 return false;
332 /* Determine if the entry is within this root structure. */
333 idx = e - &r->entries[0];
334 if (idx >= r->num_entries)
335 return false;
337 return true;
340 static const struct imdr *imd_entry_to_imdr(const struct imd *imd,
341 const struct imd_entry *entry)
343 if (imdr_has_entry(&imd->lg, entry))
344 return &imd->lg;
346 if (imdr_has_entry(&imd->sm, entry))
347 return &imd->sm;
349 return NULL;
352 /* Initialize imd handle. */
353 void imd_handle_init(struct imd *imd, void *upper_limit)
355 imdr_init(&imd->lg, upper_limit);
356 imdr_init(&imd->sm, NULL);
359 void imd_handle_init_partial_recovery(struct imd *imd)
361 const struct imd_entry *e;
362 struct imd_root_pointer *rp;
363 struct imdr *imdr;
365 if (imd->lg.limit == 0)
366 return;
368 imd_handle_init(imd, (void *)imd->lg.limit);
370 /* Initialize root pointer for the large regions. */
371 imdr = &imd->lg;
372 rp = imdr_get_root_pointer(imdr);
373 imdr->r = relative_pointer(rp, rp->root_offset);
375 e = imdr_entry_find(imdr, SMALL_REGION_ID);
377 if (e == NULL)
378 return;
380 imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
381 imd->sm.limit += imdr_entry_size(e);
382 imdr = &imd->sm;
383 rp = imdr_get_root_pointer(imdr);
384 imdr->r = relative_pointer(rp, rp->root_offset);
387 int imd_create_empty(struct imd *imd, size_t root_size, size_t entry_align)
389 return imdr_create_empty(&imd->lg, root_size, entry_align);
392 int imd_create_tiered_empty(struct imd *imd,
393 size_t lg_root_size, size_t lg_entry_align,
394 size_t sm_root_size, size_t sm_entry_align)
396 size_t sm_region_size;
397 const struct imd_entry *e;
398 struct imdr *imdr;
400 imdr = &imd->lg;
402 if (imdr_create_empty(imdr, lg_root_size, lg_entry_align) != 0)
403 return -1;
405 /* Calculate the size of the small region to request. */
406 sm_region_size = root_num_entries(sm_root_size) * sm_entry_align;
407 sm_region_size += sm_root_size;
408 sm_region_size = ALIGN_UP(sm_region_size, lg_entry_align);
410 /* Add a new entry to the large region to cover the root and entries. */
411 e = imdr_entry_add(imdr, SMALL_REGION_ID, sm_region_size);
413 if (e == NULL)
414 goto fail;
416 imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
417 imd->sm.limit += sm_region_size;
419 if (imdr_create_empty(&imd->sm, sm_root_size, sm_entry_align) != 0 ||
420 imdr_limit_size(&imd->sm, sm_region_size))
421 goto fail;
423 return 0;
424 fail:
425 imd_handle_init(imd, (void *)imdr->limit);
426 return -1;
429 int imd_recover(struct imd *imd)
431 const struct imd_entry *e;
432 uintptr_t small_upper_limit;
433 struct imdr *imdr;
435 imdr = &imd->lg;
436 if (imdr_recover(imdr) != 0)
437 return -1;
439 /* Determine if small region is present. */
440 e = imdr_entry_find(imdr, SMALL_REGION_ID);
442 if (e == NULL)
443 return 0;
445 small_upper_limit = (uintptr_t)imdr_entry_at(imdr, e);
446 small_upper_limit += imdr_entry_size(e);
448 imd->sm.limit = small_upper_limit;
450 /* Tear down any changes on failure. */
451 if (imdr_recover(&imd->sm) != 0) {
452 imd_handle_init(imd, (void *)imd->lg.limit);
453 return -1;
456 return 0;
459 int imd_limit_size(struct imd *imd, size_t max_size)
461 return imdr_limit_size(&imd->lg, max_size);
464 int imd_lockdown(struct imd *imd)
466 struct imd_root *r;
468 r = imdr_root(&imd->lg);
469 if (r == NULL)
470 return -1;
472 r->flags |= IMD_FLAG_LOCKED;
474 r = imdr_root(&imd->sm);
475 if (r != NULL)
476 r->flags |= IMD_FLAG_LOCKED;
478 return 0;
481 int imd_region_used(struct imd *imd, void **base, size_t *size)
483 struct imd_root *r;
484 struct imd_entry *e;
485 void *low_addr;
486 size_t sz_used;
488 if (!imd->lg.limit)
489 return -1;
491 r = imdr_root(&imd->lg);
493 if (r == NULL)
494 return -1;
496 /* Use last entry to obtain lowest address. */
497 e = root_last_entry(r);
499 low_addr = relative_pointer(r, e->start_offset);
501 /* Total size used is the last entry's base up to the limit. */
502 sz_used = imd->lg.limit - (uintptr_t)low_addr;
504 *base = low_addr;
505 *size = sz_used;
507 return 0;
510 const struct imd_entry *imd_entry_add(const struct imd *imd, uint32_t id,
511 size_t size)
513 struct imd_root *r;
514 const struct imdr *imdr;
515 const struct imd_entry *e = NULL;
518 * Determine if requested size is less than 1/4 of small data
519 * region is left.
521 imdr = &imd->sm;
522 r = imdr_root(imdr);
524 /* No small region. Use the large region. */
525 if (r == NULL)
526 return imdr_entry_add(&imd->lg, id, size);
527 else if (size <= r->entry_align || size <= imd_root_data_left(r) / 4)
528 e = imdr_entry_add(imdr, id, size);
530 /* Fall back on large region allocation. */
531 if (e == NULL)
532 e = imdr_entry_add(&imd->lg, id, size);
534 return e;
537 const struct imd_entry *imd_entry_find(const struct imd *imd, uint32_t id)
539 const struct imd_entry *e;
541 /* Many of the smaller allocations are used a lot. Therefore, try
542 * the small region first. */
543 e = imdr_entry_find(&imd->sm, id);
545 if (e == NULL)
546 e = imdr_entry_find(&imd->lg, id);
548 return e;
551 const struct imd_entry *imd_entry_find_or_add(const struct imd *imd,
552 uint32_t id, size_t size)
554 const struct imd_entry *e;
556 e = imd_entry_find(imd, id);
558 if (e != NULL)
559 return e;
561 return imd_entry_add(imd, id, size);
564 size_t imd_entry_size(const struct imd_entry *entry)
566 return imdr_entry_size(entry);
569 void *imd_entry_at(const struct imd *imd, const struct imd_entry *entry)
571 const struct imdr *imdr;
573 imdr = imd_entry_to_imdr(imd, entry);
575 if (imdr == NULL)
576 return NULL;
578 return imdr_entry_at(imdr, entry);
581 uint32_t imd_entry_id(const struct imd_entry *entry)
583 return entry->id;
586 int imd_entry_remove(const struct imd *imd, const struct imd_entry *entry)
588 struct imd_root *r;
589 const struct imdr *imdr;
591 imdr = imd_entry_to_imdr(imd, entry);
593 if (imdr == NULL)
594 return -1;
596 r = imdr_root(imdr);
598 if (root_is_locked(r))
599 return -1;
601 if (entry != root_last_entry(r))
602 return -1;
604 /* Don't remove entry covering root region */
605 if (r->num_entries == 1)
606 return -1;
608 r->num_entries--;
610 return 0;
613 static void imdr_print_entries(const struct imdr *imdr, const char *indent,
614 const struct imd_lookup *lookup, size_t size)
616 struct imd_root *r;
617 size_t i;
618 size_t j;
620 if (imdr == NULL)
621 return;
623 r = imdr_root(imdr);
625 for (i = 0; i < r->num_entries; i++) {
626 const char *name = NULL;
627 const struct imd_entry *e = &r->entries[i];
629 for (j = 0; j < size; j++) {
630 if (lookup[j].id == e->id) {
631 name = lookup[j].name;
632 break;
636 printk(BIOS_DEBUG, "%s", indent);
638 if (name == NULL)
639 printk(BIOS_DEBUG, "%08x ", e->id);
640 else
641 printk(BIOS_DEBUG, "%s", name);
642 printk(BIOS_DEBUG, "%2zu. ", i);
643 printk(BIOS_DEBUG, "%p ", imdr_entry_at(imdr, e));
644 printk(BIOS_DEBUG, "0x%08zx\n", imdr_entry_size(e));
648 int imd_print_entries(const struct imd *imd, const struct imd_lookup *lookup,
649 size_t size)
651 if (imdr_root(&imd->lg) == NULL)
652 return -1;
654 imdr_print_entries(&imd->lg, "", lookup, size);
655 if (imdr_root(&imd->sm) != NULL) {
656 printk(BIOS_DEBUG, "IMD small region:\n");
657 imdr_print_entries(&imd->sm, " ", lookup, size);
660 return 0;
663 int imd_cursor_init(const struct imd *imd, struct imd_cursor *cursor)
665 if (imd == NULL || cursor == NULL)
666 return -1;
668 memset(cursor, 0, sizeof(*cursor));
670 cursor->imdr[0] = &imd->lg;
671 cursor->imdr[1] = &imd->sm;
673 return 0;
676 const struct imd_entry *imd_cursor_next(struct imd_cursor *cursor)
678 struct imd_root *r;
679 const struct imd_entry *e;
681 if (cursor->current_imdr >= ARRAY_SIZE(cursor->imdr))
682 return NULL;
684 r = imdr_root(cursor->imdr[cursor->current_imdr]);
686 if (r == NULL)
687 return NULL;
689 if (cursor->current_entry >= r->num_entries) {
690 /* Try next imdr. */
691 cursor->current_imdr++;
692 cursor->current_entry = 0;
693 return imd_cursor_next(cursor);
696 e = &r->entries[cursor->current_entry];
697 cursor->current_entry++;
699 return e;