rockchip/rk3399: mipi: properly configure PHY timing
[coreboot.git] / payloads / libpayload / libc / malloc.c
blob595af631949a8bff19afe3bf2b115ff1716643fe
1 /*
2 * This file is part of the libpayload project.
4 * Copyright (C) 2008 Advanced Micro Devices, Inc.
5 * Copyright (C) 2008-2010 coresystems GmbH
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
32 * This is a classically weak malloc() implementation. We have a relatively
33 * small and static heap, so we take the easy route with an O(N) loop
34 * through the tree for every malloc() and free(). Obviously, this doesn't
35 * scale past a few hundred KB (if that).
37 * We're also susceptible to the usual buffer overrun poisoning, though the
38 * risk is within acceptable ranges for this implementation (don't overrun
39 * your buffers, kids!).
42 #define IN_MALLOC_C
43 #include <libpayload.h>
44 #include <stdint.h>
46 struct memory_type {
47 void *start;
48 void *end;
49 struct align_region_t* align_regions;
50 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
51 int magic_initialized;
52 size_t minimal_free;
53 const char *name;
54 #endif
57 extern char _heap, _eheap; /* Defined in the ldscript. */
59 static struct memory_type default_type =
60 { (void *)&_heap, (void *)&_eheap, NULL
61 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
62 , 0, 0, "HEAP"
63 #endif
65 static struct memory_type *const heap = &default_type;
66 static struct memory_type *dma = &default_type;
68 typedef u64 hdrtype_t;
69 #define HDRSIZE (sizeof(hdrtype_t))
71 #define SIZE_BITS ((HDRSIZE << 3) - 7)
72 #define MAGIC (((hdrtype_t)0x2a) << (SIZE_BITS + 1))
73 #define FLAG_FREE (((hdrtype_t)0x01) << (SIZE_BITS + 0))
74 #define MAX_SIZE ((((hdrtype_t)0x01) << SIZE_BITS) - 1)
76 #define SIZE(_h) ((_h) & MAX_SIZE)
78 #define _HEADER(_s, _f) ((hdrtype_t) (MAGIC | (_f) | ((_s) & MAX_SIZE)))
80 #define FREE_BLOCK(_s) _HEADER(_s, FLAG_FREE)
81 #define USED_BLOCK(_s) _HEADER(_s, 0)
83 #define IS_FREE(_h) (((_h) & (MAGIC | FLAG_FREE)) == (MAGIC | FLAG_FREE))
84 #define HAS_MAGIC(_h) (((_h) & MAGIC) == MAGIC)
86 static int free_aligned(void* addr, struct memory_type *type);
87 void print_malloc_map(void);
89 void init_dma_memory(void *start, u32 size)
91 if (dma_initialized()) {
92 printf("ERROR: %s called twice!\n", __func__);
93 return;
97 * DMA memory might not be zeroed by coreboot on stage loading, so make
98 * sure we clear the magic cookie from last boot.
100 *(hdrtype_t *)start = 0;
102 dma = malloc(sizeof(*dma));
103 dma->start = start;
104 dma->end = start + size;
105 dma->align_regions = NULL;
107 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
108 dma->minimal_free = 0;
109 dma->magic_initialized = 0;
110 dma->name = "DMA";
112 printf("Initialized cache-coherent DMA memory at [%p:%p]\n", start, start + size);
113 #endif
116 int dma_initialized()
118 return dma != heap;
121 /* For boards that don't initialize DMA we assume all locations are coherent */
122 int dma_coherent(void *ptr)
124 return !dma_initialized() || (dma->start <= ptr && dma->end > ptr);
127 static void *alloc(int len, struct memory_type *type)
129 hdrtype_t header;
130 hdrtype_t volatile *ptr = (hdrtype_t volatile *)type->start;
132 /* Align the size. */
133 len = ALIGN_UP(len, HDRSIZE);
135 if (!len || len > MAX_SIZE)
136 return (void *)NULL;
138 /* Make sure the region is setup correctly. */
139 if (!HAS_MAGIC(*ptr)) {
140 size_t size = (type->end - type->start) - HDRSIZE;
141 *ptr = FREE_BLOCK(size);
142 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
143 type->magic_initialized = 1;
144 type->minimal_free = size;
145 #endif
148 /* Find some free space. */
149 do {
150 header = *ptr;
151 int size = SIZE(header);
153 if (!HAS_MAGIC(header) || size == 0) {
154 printf("memory allocator panic. (%s%s)\n",
155 !HAS_MAGIC(header) ? " no magic " : "",
156 size == 0 ? " size=0 " : "");
157 halt();
160 if (header & FLAG_FREE) {
161 if (len <= size) {
162 hdrtype_t volatile *nptr = (hdrtype_t volatile *)((uintptr_t)ptr + HDRSIZE + len);
163 int nsize = size - (HDRSIZE + len);
165 /* If there is still room in this block,
166 * then mark it as such otherwise account
167 * the whole space for that block.
170 if (nsize > 0) {
171 /* Mark the block as used. */
172 *ptr = USED_BLOCK(len);
174 /* Create a new free block. */
175 *nptr = FREE_BLOCK(nsize);
176 } else {
177 /* Mark the block as used. */
178 *ptr = USED_BLOCK(size);
181 return (void *)((uintptr_t)ptr + HDRSIZE);
185 ptr = (hdrtype_t volatile *)((uintptr_t)ptr + HDRSIZE + size);
187 } while (ptr < (hdrtype_t *) type->end);
189 /* Nothing available. */
190 return (void *)NULL;
193 static void _consolidate(struct memory_type *type)
195 void *ptr = type->start;
197 while (ptr < type->end) {
198 void *nptr;
199 hdrtype_t hdr = *((hdrtype_t *) ptr);
200 unsigned int size = 0;
202 if (!IS_FREE(hdr)) {
203 ptr += HDRSIZE + SIZE(hdr);
204 continue;
207 size = SIZE(hdr);
208 nptr = ptr + HDRSIZE + SIZE(hdr);
210 while (nptr < type->end) {
211 hdrtype_t nhdr = *((hdrtype_t *) nptr);
213 if (!(IS_FREE(nhdr)))
214 break;
216 size += SIZE(nhdr) + HDRSIZE;
218 *((hdrtype_t *) nptr) = 0;
220 nptr += (HDRSIZE + SIZE(nhdr));
223 *((hdrtype_t *) ptr) = FREE_BLOCK(size);
224 ptr = nptr;
228 void free(void *ptr)
230 hdrtype_t hdr;
231 struct memory_type *type = heap;
233 /* Sanity check. */
234 if (ptr < type->start || ptr >= type->end) {
235 type = dma;
236 if (ptr < type->start || ptr >= type->end)
237 return;
240 if (free_aligned(ptr, type)) return;
242 ptr -= HDRSIZE;
243 hdr = *((hdrtype_t *) ptr);
245 /* Not our header (we're probably poisoned). */
246 if (!HAS_MAGIC(hdr))
247 return;
249 /* Double free. */
250 if (hdr & FLAG_FREE)
251 return;
253 *((hdrtype_t *) ptr) = FREE_BLOCK(SIZE(hdr));
254 _consolidate(type);
257 void *malloc(size_t size)
259 return alloc(size, heap);
262 void *dma_malloc(size_t size)
264 return alloc(size, dma);
267 void *calloc(size_t nmemb, size_t size)
269 size_t total = nmemb * size;
270 void *ptr = alloc(total, heap);
272 if (ptr)
273 memset(ptr, 0, total);
275 return ptr;
278 void *realloc(void *ptr, size_t size)
280 void *ret, *pptr;
281 unsigned int osize;
282 struct memory_type *type = heap;
284 if (ptr == NULL)
285 return alloc(size, type);
287 pptr = ptr - HDRSIZE;
289 if (!HAS_MAGIC(*((hdrtype_t *) pptr)))
290 return NULL;
292 if (ptr < type->start || ptr >= type->end)
293 type = dma;
295 /* Get the original size of the block. */
296 osize = SIZE(*((hdrtype_t *) pptr));
299 * Free the memory to update the tables - this won't touch the actual
300 * memory, so we can still use it for the copy after we have
301 * reallocated the new space.
303 free(ptr);
304 ret = alloc(size, type);
307 * if ret == NULL, then doh - failure.
308 * if ret == ptr then woo-hoo! no copy needed.
310 if (ret == NULL || ret == ptr)
311 return ret;
313 /* Copy the memory to the new location. */
314 memcpy(ret, ptr, osize > size ? size : osize);
316 return ret;
319 struct align_region_t
321 /* If alignment is 0 then the region reqpresents a large region which
322 * has no metadata for tracking subelements. */
323 int alignment;
324 /* start in memory, and size in bytes */
325 void* start;
326 int size;
327 /* layout within a region:
328 - num_elements bytes, 0: free, 1: used, 2: used, combines with next
329 - padding to alignment
330 - data section
331 - waste space
333 start_data points to the start of the data section
335 void* start_data;
336 /* number of free blocks sized "alignment" */
337 int free;
338 struct align_region_t *next;
341 static inline int region_is_large(const struct align_region_t *r)
343 return r->alignment == 0;
346 static inline int addr_in_region(const struct align_region_t *r, void *addr)
348 return ((addr >= r->start_data) && (addr < r->start_data + r->size));
351 /* num_elements == 0 indicates a large aligned region instead of a smaller
352 * region comprised of alignment-sized chunks. */
353 static struct align_region_t *allocate_region(int alignment, int num_elements,
354 size_t size, struct memory_type *type)
356 struct align_region_t *r;
357 size_t extra_space;
359 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
360 printf("%s(old align_regions=%p, alignment=%u, num_elements=%u, size=%zu)\n",
361 __func__, type->align_regions, alignment, num_elements, size);
362 #endif
364 r = malloc(sizeof(*r));
366 if (r == NULL)
367 return NULL;
369 memset(r, 0, sizeof(*r));
371 if (num_elements != 0) {
372 r->alignment = alignment;
373 r->size = num_elements * alignment;
374 r->free = num_elements;
375 /* Allocate enough memory for alignment requirements and
376 * metadata for each chunk. */
377 extra_space = num_elements;
378 } else {
379 /* Large aligned allocation. Set alignment = 0. */
380 r->alignment = 0;
381 r->size = size;
382 extra_space = 0;
385 r->start = alloc(r->size + alignment + extra_space, type);
387 if (r->start == NULL) {
388 free(r);
389 return NULL;
392 r->start_data = (void *)ALIGN_UP((uintptr_t)r->start + extra_space,
393 alignment);
395 /* Clear any (if requested) metadata. */
396 memset(r->start, 0, extra_space);
398 /* Link the region with the rest. */
399 r->next = type->align_regions;
400 type->align_regions = r;
402 return r;
405 static void try_free_region(struct align_region_t **prev_link)
407 struct align_region_t *r = *prev_link;
409 /* All large regions are immediately free-able. Non-large regions
410 * need to be checked for the fully freed state. */
411 if (!region_is_large(r)) {
412 if (r->free != r->size / r->alignment)
413 return;
416 /* Unlink region from link list. */
417 *prev_link = r->next;
419 /* Free the data and metadata. */
420 free(r->start);
421 free(r);
424 static int free_aligned(void* addr, struct memory_type *type)
426 struct align_region_t **prev_link = &type->align_regions;
428 while (*prev_link != NULL)
430 if (!addr_in_region(*prev_link, addr)) {
431 prev_link = &((*prev_link)->next);
432 continue;
435 if (region_is_large(*prev_link)) {
436 try_free_region(prev_link);
437 return 1;
440 int i = (addr-(*prev_link)->start_data)/(*prev_link)->alignment;
441 u8 *meta = (*prev_link)->start;
442 while (meta[i] == 2)
444 meta[i++] = 0;
445 (*prev_link)->free++;
447 meta[i] = 0;
448 (*prev_link)->free++;
449 try_free_region(prev_link);
450 return 1;
452 return 0;
455 static void *alloc_aligned(size_t align, size_t size, struct memory_type *type)
457 /* Define a large request to be 1024 bytes for either alignment or
458 * size of allocation. */
459 const size_t large_request = 1024;
461 if (size == 0) return 0;
462 if (type->align_regions == 0) {
463 type->align_regions = malloc(sizeof(struct align_region_t));
464 if (type->align_regions == NULL)
465 return NULL;
466 memset(type->align_regions, 0, sizeof(struct align_region_t));
468 struct align_region_t *reg = type->align_regions;
470 if (size >= large_request || align >= large_request) {
471 reg = allocate_region(align, 0, size, type);
472 if (reg == NULL)
473 return NULL;
474 return reg->start_data;
477 look_further:
478 while (reg != 0)
480 if ((reg->alignment == align) && (reg->free >= (size + align - 1)/align))
482 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
483 printf(" found memalign region. %x free, %x required\n", reg->free, (size + align - 1)/align);
484 #endif
485 break;
487 reg = reg->next;
489 if (reg == 0)
491 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
492 printf(" need to allocate a new memalign region\n");
493 #endif
494 /* get align regions */
495 reg = allocate_region(align, large_request/align, size, type);
496 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
497 printf(" ... returned %p\n", reg);
498 #endif
500 if (reg == 0) {
501 /* Nothing available. */
502 return (void *)NULL;
505 int i, count = 0, target = (size+align-1)/align;
506 for (i = 0; i < (reg->size/align); i++)
508 if (((u8*)reg->start)[i] == 0)
510 count++;
511 if (count == target) {
512 count = i+1-count;
513 for (i=0; i<target-1; i++)
515 ((u8*)reg->start)[count+i]=2;
517 ((u8*)reg->start)[count+target-1]=1;
518 reg->free -= target;
519 return reg->start_data+(align*count);
521 } else {
522 count = 0;
525 /* The free space in this region is fragmented,
526 so we will move on and try the next one: */
527 reg = reg->next;
528 goto look_further; // end condition is once a new region is allocated - it always has enough space
531 void *memalign(size_t align, size_t size)
533 return alloc_aligned(align, size, heap);
536 void *dma_memalign(size_t align, size_t size)
538 return alloc_aligned(align, size, dma);
541 /* This is for debugging purposes. */
542 #if IS_ENABLED(CONFIG_LP_DEBUG_MALLOC)
543 void print_malloc_map(void)
545 struct memory_type *type = heap;
546 void *ptr;
547 int free_memory;
549 again:
550 ptr = type->start;
551 free_memory = 0;
553 while (ptr < type->end) {
554 hdrtype_t hdr = *((hdrtype_t *) ptr);
556 if (!HAS_MAGIC(hdr)) {
557 if (type->magic_initialized)
558 printf("%s: Poisoned magic - we're toast\n", type->name);
559 else
560 printf("%s: No magic yet - going to initialize\n", type->name);
561 break;
564 /* FIXME: Verify the size of the block. */
566 printf("%s %x: %s (%x bytes)\n", type->name,
567 (unsigned int)(ptr - type->start),
568 hdr & FLAG_FREE ? "FREE" : "USED", SIZE(hdr));
570 if (hdr & FLAG_FREE)
571 free_memory += SIZE(hdr);
573 ptr += HDRSIZE + SIZE(hdr);
576 if (free_memory && (type->minimal_free > free_memory))
577 type->minimal_free = free_memory;
578 printf("%s: Maximum memory consumption: %u bytes\n", type->name,
579 (type->end - type->start) - HDRSIZE - type->minimal_free);
581 if (type != dma) {
582 type = dma;
583 goto again;
586 #endif