2 * This file is part of the libpayload project.
4 * Copyright (C) 2008 Advanced Micro Devices, Inc.
5 * Copyright (C) 2008-2010 coresystems GmbH
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * This is a classically weak malloc() implementation. We have a relatively
33 * small and static heap, so we take the easy route with an O(N) loop
34 * through the tree for every malloc() and free(). Obviously, this doesn't
35 * scale past a few hundred KB (if that).
37 * We're also susceptible to the usual buffer overrun poisoning, though the
38 * risk is within acceptable ranges for this implementation (don't overrun
39 * your buffers, kids!).
43 #include <libpayload.h>
49 struct align_region_t
* align_regions
;
50 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
51 int magic_initialized
;
57 extern char _heap
, _eheap
; /* Defined in the ldscript. */
59 static struct memory_type default_type
=
60 { (void *)&_heap
, (void *)&_eheap
, NULL
61 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
65 static struct memory_type
*const heap
= &default_type
;
66 static struct memory_type
*dma
= &default_type
;
68 typedef u64 hdrtype_t
;
69 #define HDRSIZE (sizeof(hdrtype_t))
71 #define SIZE_BITS ((HDRSIZE << 3) - 7)
72 #define MAGIC (((hdrtype_t)0x2a) << (SIZE_BITS + 1))
73 #define FLAG_FREE (((hdrtype_t)0x01) << (SIZE_BITS + 0))
74 #define MAX_SIZE ((((hdrtype_t)0x01) << SIZE_BITS) - 1)
76 #define SIZE(_h) ((_h) & MAX_SIZE)
78 #define _HEADER(_s, _f) ((hdrtype_t) (MAGIC | (_f) | ((_s) & MAX_SIZE)))
80 #define FREE_BLOCK(_s) _HEADER(_s, FLAG_FREE)
81 #define USED_BLOCK(_s) _HEADER(_s, 0)
83 #define IS_FREE(_h) (((_h) & (MAGIC | FLAG_FREE)) == (MAGIC | FLAG_FREE))
84 #define HAS_MAGIC(_h) (((_h) & MAGIC) == MAGIC)
86 static int free_aligned(void* addr
, struct memory_type
*type
);
87 void print_malloc_map(void);
89 void init_dma_memory(void *start
, u32 size
)
91 if (dma_initialized()) {
92 printf("ERROR: %s called twice!\n", __func__
);
97 * DMA memory might not be zeroed by coreboot on stage loading, so make
98 * sure we clear the magic cookie from last boot.
100 *(hdrtype_t
*)start
= 0;
102 dma
= malloc(sizeof(*dma
));
104 dma
->end
= start
+ size
;
105 dma
->align_regions
= NULL
;
107 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
108 dma
->minimal_free
= 0;
109 dma
->magic_initialized
= 0;
112 printf("Initialized cache-coherent DMA memory at [%p:%p]\n", start
, start
+ size
);
116 int dma_initialized()
121 /* For boards that don't initialize DMA we assume all locations are coherent */
122 int dma_coherent(void *ptr
)
124 return !dma_initialized() || (dma
->start
<= ptr
&& dma
->end
> ptr
);
127 static void *alloc(int len
, struct memory_type
*type
)
130 hdrtype_t
volatile *ptr
= (hdrtype_t
volatile *)type
->start
;
132 /* Align the size. */
133 len
= ALIGN_UP(len
, HDRSIZE
);
135 if (!len
|| len
> MAX_SIZE
)
138 /* Make sure the region is setup correctly. */
139 if (!HAS_MAGIC(*ptr
)) {
140 size_t size
= (type
->end
- type
->start
) - HDRSIZE
;
141 *ptr
= FREE_BLOCK(size
);
142 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
143 type
->magic_initialized
= 1;
144 type
->minimal_free
= size
;
148 /* Find some free space. */
151 int size
= SIZE(header
);
153 if (!HAS_MAGIC(header
) || size
== 0) {
154 printf("memory allocator panic. (%s%s)\n",
155 !HAS_MAGIC(header
) ? " no magic " : "",
156 size
== 0 ? " size=0 " : "");
160 if (header
& FLAG_FREE
) {
162 hdrtype_t
volatile *nptr
= (hdrtype_t
volatile *)((uintptr_t)ptr
+ HDRSIZE
+ len
);
163 int nsize
= size
- (HDRSIZE
+ len
);
165 /* If there is still room in this block,
166 * then mark it as such otherwise account
167 * the whole space for that block.
171 /* Mark the block as used. */
172 *ptr
= USED_BLOCK(len
);
174 /* Create a new free block. */
175 *nptr
= FREE_BLOCK(nsize
);
177 /* Mark the block as used. */
178 *ptr
= USED_BLOCK(size
);
181 return (void *)((uintptr_t)ptr
+ HDRSIZE
);
185 ptr
= (hdrtype_t
volatile *)((uintptr_t)ptr
+ HDRSIZE
+ size
);
187 } while (ptr
< (hdrtype_t
*) type
->end
);
189 /* Nothing available. */
193 static void _consolidate(struct memory_type
*type
)
195 void *ptr
= type
->start
;
197 while (ptr
< type
->end
) {
199 hdrtype_t hdr
= *((hdrtype_t
*) ptr
);
200 unsigned int size
= 0;
203 ptr
+= HDRSIZE
+ SIZE(hdr
);
208 nptr
= ptr
+ HDRSIZE
+ SIZE(hdr
);
210 while (nptr
< type
->end
) {
211 hdrtype_t nhdr
= *((hdrtype_t
*) nptr
);
213 if (!(IS_FREE(nhdr
)))
216 size
+= SIZE(nhdr
) + HDRSIZE
;
218 *((hdrtype_t
*) nptr
) = 0;
220 nptr
+= (HDRSIZE
+ SIZE(nhdr
));
223 *((hdrtype_t
*) ptr
) = FREE_BLOCK(size
);
231 struct memory_type
*type
= heap
;
234 if (ptr
< type
->start
|| ptr
>= type
->end
) {
236 if (ptr
< type
->start
|| ptr
>= type
->end
)
240 if (free_aligned(ptr
, type
)) return;
243 hdr
= *((hdrtype_t
*) ptr
);
245 /* Not our header (we're probably poisoned). */
253 *((hdrtype_t
*) ptr
) = FREE_BLOCK(SIZE(hdr
));
257 void *malloc(size_t size
)
259 return alloc(size
, heap
);
262 void *dma_malloc(size_t size
)
264 return alloc(size
, dma
);
267 void *calloc(size_t nmemb
, size_t size
)
269 size_t total
= nmemb
* size
;
270 void *ptr
= alloc(total
, heap
);
273 memset(ptr
, 0, total
);
278 void *realloc(void *ptr
, size_t size
)
282 struct memory_type
*type
= heap
;
285 return alloc(size
, type
);
287 pptr
= ptr
- HDRSIZE
;
289 if (!HAS_MAGIC(*((hdrtype_t
*) pptr
)))
292 if (ptr
< type
->start
|| ptr
>= type
->end
)
295 /* Get the original size of the block. */
296 osize
= SIZE(*((hdrtype_t
*) pptr
));
299 * Free the memory to update the tables - this won't touch the actual
300 * memory, so we can still use it for the copy after we have
301 * reallocated the new space.
304 ret
= alloc(size
, type
);
307 * if ret == NULL, then doh - failure.
308 * if ret == ptr then woo-hoo! no copy needed.
310 if (ret
== NULL
|| ret
== ptr
)
313 /* Copy the memory to the new location. */
314 memcpy(ret
, ptr
, osize
> size
? size
: osize
);
319 struct align_region_t
321 /* If alignment is 0 then the region reqpresents a large region which
322 * has no metadata for tracking subelements. */
324 /* start in memory, and size in bytes */
327 /* layout within a region:
328 - num_elements bytes, 0: free, 1: used, 2: used, combines with next
329 - padding to alignment
333 start_data points to the start of the data section
336 /* number of free blocks sized "alignment" */
338 struct align_region_t
*next
;
341 static inline int region_is_large(const struct align_region_t
*r
)
343 return r
->alignment
== 0;
346 static inline int addr_in_region(const struct align_region_t
*r
, void *addr
)
348 return ((addr
>= r
->start_data
) && (addr
< r
->start_data
+ r
->size
));
351 /* num_elements == 0 indicates a large aligned region instead of a smaller
352 * region comprised of alignment-sized chunks. */
353 static struct align_region_t
*allocate_region(int alignment
, int num_elements
,
354 size_t size
, struct memory_type
*type
)
356 struct align_region_t
*r
;
359 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
360 printf("%s(old align_regions=%p, alignment=%u, num_elements=%u, size=%zu)\n",
361 __func__
, type
->align_regions
, alignment
, num_elements
, size
);
364 r
= malloc(sizeof(*r
));
369 memset(r
, 0, sizeof(*r
));
371 if (num_elements
!= 0) {
372 r
->alignment
= alignment
;
373 r
->size
= num_elements
* alignment
;
374 r
->free
= num_elements
;
375 /* Allocate enough memory for alignment requirements and
376 * metadata for each chunk. */
377 extra_space
= num_elements
;
379 /* Large aligned allocation. Set alignment = 0. */
385 r
->start
= alloc(r
->size
+ alignment
+ extra_space
, type
);
387 if (r
->start
== NULL
) {
392 r
->start_data
= (void *)ALIGN_UP((uintptr_t)r
->start
+ extra_space
,
395 /* Clear any (if requested) metadata. */
396 memset(r
->start
, 0, extra_space
);
398 /* Link the region with the rest. */
399 r
->next
= type
->align_regions
;
400 type
->align_regions
= r
;
405 static void try_free_region(struct align_region_t
**prev_link
)
407 struct align_region_t
*r
= *prev_link
;
409 /* All large regions are immediately free-able. Non-large regions
410 * need to be checked for the fully freed state. */
411 if (!region_is_large(r
)) {
412 if (r
->free
!= r
->size
/ r
->alignment
)
416 /* Unlink region from link list. */
417 *prev_link
= r
->next
;
419 /* Free the data and metadata. */
424 static int free_aligned(void* addr
, struct memory_type
*type
)
426 struct align_region_t
**prev_link
= &type
->align_regions
;
428 while (*prev_link
!= NULL
)
430 if (!addr_in_region(*prev_link
, addr
)) {
431 prev_link
= &((*prev_link
)->next
);
435 if (region_is_large(*prev_link
)) {
436 try_free_region(prev_link
);
440 int i
= (addr
-(*prev_link
)->start_data
)/(*prev_link
)->alignment
;
441 u8
*meta
= (*prev_link
)->start
;
445 (*prev_link
)->free
++;
448 (*prev_link
)->free
++;
449 try_free_region(prev_link
);
455 static void *alloc_aligned(size_t align
, size_t size
, struct memory_type
*type
)
457 /* Define a large request to be 1024 bytes for either alignment or
458 * size of allocation. */
459 const size_t large_request
= 1024;
461 if (size
== 0) return 0;
462 if (type
->align_regions
== 0) {
463 type
->align_regions
= malloc(sizeof(struct align_region_t
));
464 if (type
->align_regions
== NULL
)
466 memset(type
->align_regions
, 0, sizeof(struct align_region_t
));
468 struct align_region_t
*reg
= type
->align_regions
;
470 if (size
>= large_request
|| align
>= large_request
) {
471 reg
= allocate_region(align
, 0, size
, type
);
474 return reg
->start_data
;
480 if ((reg
->alignment
== align
) && (reg
->free
>= (size
+ align
- 1)/align
))
482 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
483 printf(" found memalign region. %x free, %x required\n", reg
->free
, (size
+ align
- 1)/align
);
491 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
492 printf(" need to allocate a new memalign region\n");
494 /* get align regions */
495 reg
= allocate_region(align
, large_request
/align
, size
, type
);
496 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
497 printf(" ... returned %p\n", reg
);
501 /* Nothing available. */
505 int i
, count
= 0, target
= (size
+align
-1)/align
;
506 for (i
= 0; i
< (reg
->size
/align
); i
++)
508 if (((u8
*)reg
->start
)[i
] == 0)
511 if (count
== target
) {
513 for (i
=0; i
<target
-1; i
++)
515 ((u8
*)reg
->start
)[count
+i
]=2;
517 ((u8
*)reg
->start
)[count
+target
-1]=1;
519 return reg
->start_data
+(align
*count
);
525 /* The free space in this region is fragmented,
526 so we will move on and try the next one: */
528 goto look_further
; // end condition is once a new region is allocated - it always has enough space
531 void *memalign(size_t align
, size_t size
)
533 return alloc_aligned(align
, size
, heap
);
536 void *dma_memalign(size_t align
, size_t size
)
538 return alloc_aligned(align
, size
, dma
);
541 /* This is for debugging purposes. */
542 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
543 void print_malloc_map(void)
545 struct memory_type
*type
= heap
;
553 while (ptr
< type
->end
) {
554 hdrtype_t hdr
= *((hdrtype_t
*) ptr
);
556 if (!HAS_MAGIC(hdr
)) {
557 if (type
->magic_initialized
)
558 printf("%s: Poisoned magic - we're toast\n", type
->name
);
560 printf("%s: No magic yet - going to initialize\n", type
->name
);
564 /* FIXME: Verify the size of the block. */
566 printf("%s %x: %s (%x bytes)\n", type
->name
,
567 (unsigned int)(ptr
- type
->start
),
568 hdr
& FLAG_FREE
? "FREE" : "USED", SIZE(hdr
));
571 free_memory
+= SIZE(hdr
);
573 ptr
+= HDRSIZE
+ SIZE(hdr
);
576 if (free_memory
&& (type
->minimal_free
> free_memory
))
577 type
->minimal_free
= free_memory
;
578 printf("%s: Maximum memory consumption: %u bytes\n", type
->name
,
579 (type
->end
- type
->start
) - HDRSIZE
- type
->minimal_free
);