1 /* SPDX-License-Identifier: GPL-2.0-only */
5 #include <console/console.h>
10 #include <imd_private.h>
13 /* For more details on implementation and usage please see the imd.h header. */
15 static void *relative_pointer(void *base
, ssize_t offset
)
17 intptr_t b
= (intptr_t)base
;
22 static bool imd_root_pointer_valid(const struct imd_root_pointer
*rp
)
24 return !!(rp
->magic
== IMD_ROOT_PTR_MAGIC
);
27 static struct imd_root
*imdr_root(const struct imdr
*imdr
)
33 * The root pointer is relative to the upper limit of the imd. i.e. It sits
34 * just below the upper limit.
36 static struct imd_root_pointer
*imdr_get_root_pointer(const struct imdr
*imdr
)
38 struct imd_root_pointer
*rp
;
40 rp
= relative_pointer((void *)imdr
->limit
, -sizeof(*rp
));
45 static void imd_link_root(struct imd_root_pointer
*rp
, struct imd_root
*r
)
47 rp
->magic
= IMD_ROOT_PTR_MAGIC
;
48 rp
->root_offset
= (int32_t)((intptr_t)r
- (intptr_t)rp
);
51 static struct imd_entry
*root_last_entry(struct imd_root
*r
)
53 return &r
->entries
[r
->num_entries
- 1];
56 static size_t root_num_entries(size_t root_size
)
60 entries_size
= root_size
;
61 entries_size
-= sizeof(struct imd_root_pointer
);
62 entries_size
-= sizeof(struct imd_root
);
64 return entries_size
/ sizeof(struct imd_entry
);
67 static size_t imd_root_data_left(struct imd_root
*r
)
69 struct imd_entry
*last_entry
;
71 last_entry
= root_last_entry(r
);
73 if (r
->max_offset
!= 0)
74 return last_entry
->start_offset
- r
->max_offset
;
79 static bool root_is_locked(const struct imd_root
*r
)
81 return !!(r
->flags
& IMD_FLAG_LOCKED
);
84 static void imd_entry_assign(struct imd_entry
*e
, uint32_t id
,
85 ssize_t offset
, size_t size
)
87 e
->magic
= IMD_ENTRY_MAGIC
;
88 e
->start_offset
= offset
;
93 static void imdr_init(struct imdr
*ir
, void *upper_limit
)
95 uintptr_t limit
= (uintptr_t)upper_limit
;
96 /* Upper limit is aligned down to 4KiB */
97 ir
->limit
= ALIGN_DOWN(limit
, LIMIT_ALIGN
);
101 static int imdr_create_empty(struct imdr
*imdr
, size_t root_size
,
104 struct imd_root_pointer
*rp
;
112 /* root_size and entry_align should be a power of 2. */
113 assert(IS_POWER_OF_2(root_size
));
114 assert(IS_POWER_OF_2(entry_align
));
117 * root_size needs to be large enough to accommodate root pointer and
118 * root book keeping structure. Furthermore, there needs to be a space
119 * for at least one entry covering root region. The caller needs to
120 * ensure there's enough room for tracking individual allocations.
122 if (root_size
< (sizeof(*rp
) + sizeof(*r
) + sizeof(*e
)))
125 /* For simplicity don't allow sizes or alignments to exceed LIMIT_ALIGN.
127 if (root_size
> LIMIT_ALIGN
|| entry_align
> LIMIT_ALIGN
)
130 /* Additionally, don't handle an entry alignment > root_size. */
131 if (entry_align
> root_size
)
134 rp
= imdr_get_root_pointer(imdr
);
136 root_offset
= -(ssize_t
)root_size
;
137 /* Set root pointer. */
138 imdr
->r
= relative_pointer((void *)imdr
->limit
, root_offset
);
140 imd_link_root(rp
, r
);
142 memset(r
, 0, sizeof(*r
));
143 r
->entry_align
= entry_align
;
145 /* Calculate size left for entries. */
146 r
->max_entries
= root_num_entries(root_size
);
148 /* Fill in first entry covering the root region. */
151 imd_entry_assign(e
, CBMEM_ID_IMD_ROOT
, 0, root_size
);
153 printk(BIOS_DEBUG
, "IMD: root @ %p %u entries.\n", r
, r
->max_entries
);
158 static int imdr_recover(struct imdr
*imdr
)
160 struct imd_root_pointer
*rp
;
168 rp
= imdr_get_root_pointer(imdr
);
170 if (!imd_root_pointer_valid(rp
))
173 r
= relative_pointer(rp
, rp
->root_offset
);
175 /* Ensure that root is just under the root pointer */
176 if ((intptr_t)rp
- (intptr_t)&r
->entries
[r
->max_entries
] > sizeof(struct imd_entry
))
179 if (r
->num_entries
> r
->max_entries
)
182 /* Entry alignment should be power of 2. */
183 if (!IS_POWER_OF_2(r
->entry_align
))
186 low_limit
= (uintptr_t)relative_pointer(r
, r
->max_offset
);
188 /* If no max_offset then lowest limit is 0. */
189 if (low_limit
== (uintptr_t)r
)
192 for (i
= 0; i
< r
->num_entries
; i
++) {
193 uintptr_t start_addr
;
194 const struct imd_entry
*e
= &r
->entries
[i
];
196 if (e
->magic
!= IMD_ENTRY_MAGIC
)
199 start_addr
= (uintptr_t)relative_pointer(r
, e
->start_offset
);
200 if (start_addr
< low_limit
)
202 if (start_addr
>= imdr
->limit
||
203 (start_addr
+ e
->size
) > imdr
->limit
)
207 /* Set root pointer. */
213 static const struct imd_entry
*imdr_entry_find(const struct imdr
*imdr
,
226 /* Skip first entry covering the root. */
227 for (i
= 1; i
< r
->num_entries
; i
++) {
228 if (id
!= r
->entries
[i
].id
)
237 static int imdr_limit_size(struct imdr
*imdr
, size_t max_size
)
247 root_size
= imdr
->limit
- (uintptr_t)r
;
249 if (max_size
< root_size
)
252 /* Take into account the root size. */
253 smax_size
= max_size
- root_size
;
254 smax_size
= -smax_size
;
256 r
->max_offset
= smax_size
;
261 static size_t imdr_entry_size(const struct imd_entry
*e
)
266 static void *imdr_entry_at(const struct imdr
*imdr
, const struct imd_entry
*e
)
268 return relative_pointer(imdr_root(imdr
), e
->start_offset
);
271 static struct imd_entry
*imd_entry_add_to_root(struct imd_root
*r
, uint32_t id
,
274 struct imd_entry
*entry
;
275 struct imd_entry
*last_entry
;
279 if (r
->num_entries
== r
->max_entries
)
282 /* Determine total size taken up by entry. */
283 used_size
= ALIGN_UP(size
, r
->entry_align
);
285 /* See if size overflows imd total size. */
286 if (used_size
> imd_root_data_left(r
))
290 * Determine if offset field overflows. All offsets should be lower
291 * than the previous one.
293 last_entry
= root_last_entry(r
);
294 e_offset
= last_entry
->start_offset
;
295 e_offset
-= (ssize_t
)used_size
;
296 if (e_offset
>= last_entry
->start_offset
)
299 entry
= root_last_entry(r
) + 1;
302 imd_entry_assign(entry
, id
, e_offset
, size
);
307 static const struct imd_entry
*imdr_entry_add(const struct imdr
*imdr
,
308 uint32_t id
, size_t size
)
317 if (root_is_locked(r
))
320 return imd_entry_add_to_root(r
, id
, size
);
323 static bool imdr_has_entry(const struct imdr
*imdr
, const struct imd_entry
*e
)
332 /* Determine if the entry is within this root structure. */
333 idx
= e
- &r
->entries
[0];
334 if (idx
>= r
->num_entries
)
340 static const struct imdr
*imd_entry_to_imdr(const struct imd
*imd
,
341 const struct imd_entry
*entry
)
343 if (imdr_has_entry(&imd
->lg
, entry
))
346 if (imdr_has_entry(&imd
->sm
, entry
))
352 /* Initialize imd handle. */
353 void imd_handle_init(struct imd
*imd
, void *upper_limit
)
355 imdr_init(&imd
->lg
, upper_limit
);
356 imdr_init(&imd
->sm
, NULL
);
359 void imd_handle_init_partial_recovery(struct imd
*imd
)
361 const struct imd_entry
*e
;
362 struct imd_root_pointer
*rp
;
365 if (imd
->lg
.limit
== 0)
368 imd_handle_init(imd
, (void *)imd
->lg
.limit
);
370 /* Initialize root pointer for the large regions. */
372 rp
= imdr_get_root_pointer(imdr
);
373 imdr
->r
= relative_pointer(rp
, rp
->root_offset
);
375 e
= imdr_entry_find(imdr
, SMALL_REGION_ID
);
380 imd
->sm
.limit
= (uintptr_t)imdr_entry_at(imdr
, e
);
381 imd
->sm
.limit
+= imdr_entry_size(e
);
383 rp
= imdr_get_root_pointer(imdr
);
384 imdr
->r
= relative_pointer(rp
, rp
->root_offset
);
387 int imd_create_empty(struct imd
*imd
, size_t root_size
, size_t entry_align
)
389 return imdr_create_empty(&imd
->lg
, root_size
, entry_align
);
392 int imd_create_tiered_empty(struct imd
*imd
,
393 size_t lg_root_size
, size_t lg_entry_align
,
394 size_t sm_root_size
, size_t sm_entry_align
)
396 size_t sm_region_size
;
397 const struct imd_entry
*e
;
402 if (imdr_create_empty(imdr
, lg_root_size
, lg_entry_align
) != 0)
405 /* Calculate the size of the small region to request. */
406 sm_region_size
= root_num_entries(sm_root_size
) * sm_entry_align
;
407 sm_region_size
+= sm_root_size
;
408 sm_region_size
= ALIGN_UP(sm_region_size
, lg_entry_align
);
410 /* Add a new entry to the large region to cover the root and entries. */
411 e
= imdr_entry_add(imdr
, SMALL_REGION_ID
, sm_region_size
);
416 imd
->sm
.limit
= (uintptr_t)imdr_entry_at(imdr
, e
);
417 imd
->sm
.limit
+= sm_region_size
;
419 if (imdr_create_empty(&imd
->sm
, sm_root_size
, sm_entry_align
) != 0 ||
420 imdr_limit_size(&imd
->sm
, sm_region_size
))
425 imd_handle_init(imd
, (void *)imdr
->limit
);
429 int imd_recover(struct imd
*imd
)
431 const struct imd_entry
*e
;
432 uintptr_t small_upper_limit
;
436 if (imdr_recover(imdr
) != 0)
439 /* Determine if small region is present. */
440 e
= imdr_entry_find(imdr
, SMALL_REGION_ID
);
445 small_upper_limit
= (uintptr_t)imdr_entry_at(imdr
, e
);
446 small_upper_limit
+= imdr_entry_size(e
);
448 imd
->sm
.limit
= small_upper_limit
;
450 /* Tear down any changes on failure. */
451 if (imdr_recover(&imd
->sm
) != 0) {
452 imd_handle_init(imd
, (void *)imd
->lg
.limit
);
459 int imd_limit_size(struct imd
*imd
, size_t max_size
)
461 return imdr_limit_size(&imd
->lg
, max_size
);
464 int imd_lockdown(struct imd
*imd
)
468 r
= imdr_root(&imd
->lg
);
472 r
->flags
|= IMD_FLAG_LOCKED
;
474 r
= imdr_root(&imd
->sm
);
476 r
->flags
|= IMD_FLAG_LOCKED
;
481 int imd_region_used(struct imd
*imd
, void **base
, size_t *size
)
491 r
= imdr_root(&imd
->lg
);
496 /* Use last entry to obtain lowest address. */
497 e
= root_last_entry(r
);
499 low_addr
= relative_pointer(r
, e
->start_offset
);
501 /* Total size used is the last entry's base up to the limit. */
502 sz_used
= imd
->lg
.limit
- (uintptr_t)low_addr
;
510 const struct imd_entry
*imd_entry_add(const struct imd
*imd
, uint32_t id
,
514 const struct imdr
*imdr
;
515 const struct imd_entry
*e
= NULL
;
518 * Determine if requested size is less than 1/4 of small data
524 /* No small region. Use the large region. */
526 return imdr_entry_add(&imd
->lg
, id
, size
);
527 else if (size
<= r
->entry_align
|| size
<= imd_root_data_left(r
) / 4)
528 e
= imdr_entry_add(imdr
, id
, size
);
530 /* Fall back on large region allocation. */
532 e
= imdr_entry_add(&imd
->lg
, id
, size
);
537 const struct imd_entry
*imd_entry_find(const struct imd
*imd
, uint32_t id
)
539 const struct imd_entry
*e
;
541 /* Many of the smaller allocations are used a lot. Therefore, try
542 * the small region first. */
543 e
= imdr_entry_find(&imd
->sm
, id
);
546 e
= imdr_entry_find(&imd
->lg
, id
);
551 const struct imd_entry
*imd_entry_find_or_add(const struct imd
*imd
,
552 uint32_t id
, size_t size
)
554 const struct imd_entry
*e
;
556 e
= imd_entry_find(imd
, id
);
561 return imd_entry_add(imd
, id
, size
);
564 size_t imd_entry_size(const struct imd_entry
*entry
)
566 return imdr_entry_size(entry
);
569 void *imd_entry_at(const struct imd
*imd
, const struct imd_entry
*entry
)
571 const struct imdr
*imdr
;
573 imdr
= imd_entry_to_imdr(imd
, entry
);
578 return imdr_entry_at(imdr
, entry
);
581 uint32_t imd_entry_id(const struct imd_entry
*entry
)
586 int imd_entry_remove(const struct imd
*imd
, const struct imd_entry
*entry
)
589 const struct imdr
*imdr
;
591 imdr
= imd_entry_to_imdr(imd
, entry
);
598 if (root_is_locked(r
))
601 if (entry
!= root_last_entry(r
))
604 /* Don't remove entry covering root region */
605 if (r
->num_entries
== 1)
613 static void imdr_print_entries(const struct imdr
*imdr
, const char *indent
,
614 const struct imd_lookup
*lookup
, size_t size
)
625 for (i
= 0; i
< r
->num_entries
; i
++) {
626 const char *name
= NULL
;
627 const struct imd_entry
*e
= &r
->entries
[i
];
629 for (j
= 0; j
< size
; j
++) {
630 if (lookup
[j
].id
== e
->id
) {
631 name
= lookup
[j
].name
;
636 printk(BIOS_DEBUG
, "%s", indent
);
639 printk(BIOS_DEBUG
, "%08x ", e
->id
);
641 printk(BIOS_DEBUG
, "%s", name
);
642 printk(BIOS_DEBUG
, "%2zu. ", i
);
643 printk(BIOS_DEBUG
, "%p ", imdr_entry_at(imdr
, e
));
644 printk(BIOS_DEBUG
, "0x%08zx\n", imdr_entry_size(e
));
648 int imd_print_entries(const struct imd
*imd
, const struct imd_lookup
*lookup
,
651 if (imdr_root(&imd
->lg
) == NULL
)
654 imdr_print_entries(&imd
->lg
, "", lookup
, size
);
655 if (imdr_root(&imd
->sm
) != NULL
) {
656 printk(BIOS_DEBUG
, "IMD small region:\n");
657 imdr_print_entries(&imd
->sm
, " ", lookup
, size
);
663 int imd_cursor_init(const struct imd
*imd
, struct imd_cursor
*cursor
)
665 if (imd
== NULL
|| cursor
== NULL
)
668 memset(cursor
, 0, sizeof(*cursor
));
670 cursor
->imdr
[0] = &imd
->lg
;
671 cursor
->imdr
[1] = &imd
->sm
;
676 const struct imd_entry
*imd_cursor_next(struct imd_cursor
*cursor
)
679 const struct imd_entry
*e
;
681 if (cursor
->current_imdr
>= ARRAY_SIZE(cursor
->imdr
))
684 r
= imdr_root(cursor
->imdr
[cursor
->current_imdr
]);
689 if (cursor
->current_entry
>= r
->num_entries
) {
691 cursor
->current_imdr
++;
692 cursor
->current_entry
= 0;
693 return imd_cursor_next(cursor
);
696 e
= &r
->entries
[cursor
->current_entry
];
697 cursor
->current_entry
++;