1. set SIZE_ALIGN to AROS_WORSTALIGN
[AROS.git] / rom / kernel / tlsf.c
blobe26b604f08fbf25f37deb6ef74c3611b72cca823
1 #include <exec/types.h>
2 #include <exec/memory.h>
3 #include <exec/memheaderext.h>
4 #include <proto/exec.h>
5 #include <string.h>
7 #include "tlsf.h"
8 #include "kernel_base.h"
9 #include "kernel_debug.h"
11 #define D(x)
13 #undef USE_MACROS
15 #include <stddef.h>
17 * Minimal alignment as required by AROS. In contrary to the default
18 * TLSF implementation, we do not allow smaller blocks here.
20 #define SIZE_ALIGN AROS_WORSTALIGN
23 * Settings for TLSF allocator:
24 * MAX_LOG2_SLI - amount of bits used for the second level list
25 * MAX_FLI - maximal allowable allocation size - 2^32 should be enough
27 #define MAX_LOG2_SLI (5)
28 #define MAX_SLI (1 << MAX_LOG2_SLI)
29 #define MAX_FLI (32)
30 #define FLI_OFFSET (6)
31 #define SMALL_BLOCK (2 << FLI_OFFSET)
33 #define REAL_FLI (MAX_FLI - FLI_OFFSET)
35 #define ROUNDUP(x) (((x) + SIZE_ALIGN - 1) & ~(SIZE_ALIGN - 1))
36 #define ROUNDDOWN(x) ((x) & ~(SIZE_ALIGN - 1))
38 /* Fields used in the block header length field to identify busy/free blocks */
39 #define THIS_FREE_MASK (IPTR)1
40 #define THIS_FREE (IPTR)1
41 #define THIS_BUSY (IPTR)0
43 #define PREV_FREE_MASK (IPTR)2
44 #define PREV_FREE (IPTR)2
45 #define PREV_BUSY (IPTR)0
47 #define SIZE_MASK (~(THIS_FREE_MASK | PREV_FREE_MASK))
49 #define likely(x) __builtin_expect(!!(x), 1)
50 #define unlikely(x) __builtin_expect(!!(x), 0)
52 /* free node links together all free blocks if similar size */
53 typedef struct free_node_s {
54 struct bhdr_s * prev;
55 struct bhdr_s * next;
56 } free_node_t;
58 /* block header in front of each block - both free and busy */
59 typedef struct hdr_s {
60 struct bhdr_s * prev;
61 IPTR length;
62 } hdr_t;
65 * Each block is defined by bhdr_t structure. Free blocks contain only
66 * the header which allows us to go through all memory blocks in the system.
67 * The free blocks contain additionally the node which chains them in one
68 * of the free block lists
70 typedef struct bhdr_s {
71 union {
72 hdr_t header;
73 UBYTE __min_align[SIZE_ALIGN];
75 union {
76 UBYTE mem[1];
77 free_node_t free_node;
79 } bhdr_t;
81 /* Memory area within the TLSF pool */
82 typedef struct tlsf_area_s {
83 struct tlsf_area_s * next; // Next memory area
84 bhdr_t * end; // Pointer to "end-of-area" block header
85 LONG autogrown; // Automatically allocated by TLSF pool
86 } tlsf_area_t;
88 typedef struct {
89 tlsf_area_t * memory_area;
91 IPTR total_size;
92 IPTR free_size;
94 ULONG flbitmap;
95 ULONG slbitmap[REAL_FLI];
97 IPTR autogrow_puddle_size;
98 APTR autogrow_data;
99 autogrow_get autogrow_get_fn;
100 autogrow_release autogrow_release_fn;
102 UBYTE autodestroy_self;
104 bhdr_t * matrix[REAL_FLI][MAX_SLI];
105 } tlsf_t;
107 static inline __attribute__((always_inline)) int LS(IPTR i)
109 if (sizeof(IPTR) == 4)
110 return __builtin_ffs(i) - 1;
111 else
112 return __builtin_ffsl(i) - 1;
115 static inline __attribute__((always_inline)) int MS(IPTR i)
117 if (sizeof(IPTR) == 4)
118 return 31 - __builtin_clz(i);
119 else
120 return 63 - __builtin_clzl(i);
123 static inline __attribute__((always_inline)) void SetBit(int nr, ULONG *ptr)
125 ptr[nr >> 5] |= (1 << (nr & 31));
128 static inline __attribute__((always_inline)) void ClrBit(int nr, ULONG *ptr)
130 ptr[nr >> 5] &= ~(1 << (nr & 31));
133 static inline __attribute__((always_inline)) void MAPPING_INSERT(IPTR r, int *fl, int *sl)
135 if (r < SMALL_BLOCK)
137 *fl = 0;
138 *sl = (int)(r / (SMALL_BLOCK / MAX_SLI));
140 else
142 *fl = MS(r);
143 *sl = (int)(((IPTR)r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI);
144 *fl -= FLI_OFFSET;
148 static inline __attribute__((always_inline)) void MAPPING_SEARCH(IPTR *r, int *fl, int *sl)
150 if (*r < SMALL_BLOCK)
152 *fl = 0;
153 *sl = (int)(*r / (SMALL_BLOCK / MAX_SLI));
155 else
157 IPTR tmp = ((IPTR)1 << (MS(*r) - MAX_LOG2_SLI)) - 1;
158 IPTR tr = *r + tmp;
160 *fl = MS(tr);
161 *sl = (int)(((IPTR)tr >> (*fl - MAX_LOG2_SLI)) - MAX_SLI);
162 *fl -= FLI_OFFSET;
163 //*r = tr & ~tmp;
167 static inline __attribute__((always_inline)) bhdr_t * FIND_SUITABLE_BLOCK(tlsf_t *tlsf, int *fl, int *sl)
169 IPTR bitmap_tmp = tlsf->slbitmap[*fl] & (~0 << *sl);
170 bhdr_t *b = NULL;
172 if (bitmap_tmp)
174 *sl = LS(bitmap_tmp);
175 b = tlsf->matrix[*fl][*sl];
177 else
179 bitmap_tmp = tlsf->flbitmap & (~0 << (*fl + 1));
180 if (likely(bitmap_tmp != 0))
182 *fl = LS(bitmap_tmp);
183 *sl = LS(tlsf->slbitmap[*fl]);
184 b = tlsf->matrix[*fl][*sl];
188 return b;
192 #ifdef USE_MACROS
194 #define GET_SIZE(b) ({ IPTR size = b->header.length & SIZE_MASK; size; })
195 #define GET_FLAGS(b) ({ IPTR flags = b->header.length & (THIS_FREE_MASK | PREV_FREE_MASK); flags; })
196 #define SET_SIZE(b, size) do{ b->header.length = GET_FLAGS(b) | (size); }while(0)
197 #define SET_FLAGS(b, flags) do{ b->header.length = GET_SIZE(b) | (flags); }while(0)
198 #define SET_SIZE_AND_FLAGS(b, size, flags) do{b->header.length = (size) | (flags);}while(0)
199 #define FREE_BLOCK(b) ((b->header.length & THIS_FREE_MASK) == THIS_FREE)
200 #define SET_FREE_BLOCK(b) do{b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_FREE;}while(0)
201 #define SET_BUSY_BLOCK(b) do{b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_BUSY;}while(0)
202 #define SET_FREE_PREV_BLOCK(b) do{b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_FREE;}while(0)
203 #define SET_BUSY_PREV_BLOCK(b) do{b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_BUSY;}while(0)
204 #define FREE_PREV_BLOCK(b) ((b->header.length & PREV_FREE_MASK) == PREV_FREE)
205 #define GET_NEXT_BHDR(hdr, size) ({ bhdr_t * __b = (bhdr_t *)((UBYTE *)&hdr->mem[0] + (size)); __b; })
206 #define MEM_TO_BHDR(ptr) ({ bhdr_t * b = (bhdr_t*)((void*)(ptr) - offsetof(bhdr_t, mem)); b; })
208 #define REMOVE_HEADER(tlsf, b, fl, sl) do{ \
209 if (b->free_node.next) \
210 b->free_node.next->free_node.prev = b->free_node.prev; \
211 if (b->free_node.prev) \
212 b->free_node.prev->free_node.next = b->free_node.next; \
213 if (tlsf->matrix[fl][sl] == b) { \
214 tlsf->matrix[fl][sl] = b->free_node.next; \
215 if (!tlsf->matrix[fl][sl]) \
216 ClrBit(sl, &tlsf->slbitmap[fl]); \
217 if (!tlsf->slbitmap[fl]) \
218 ClrBit(fl, &tlsf->flbitmap); \
219 } } while(0)
221 #define INSERT_FREE_BLOCK(tlsf, b) do { \
222 int fl, sl; MAPPING_INSERT(GET_SIZE(b), &fl, &sl); \
223 b->free_node.prev = NULL; \
224 b->free_node.next = tlsf->matrix[fl][sl]; \
225 if (tlsf->matrix[fl][sl]) \
226 tlsf->matrix[fl][sl]->free_node.prev = b; \
227 tlsf->matrix[fl][sl] = b; \
228 SetBit(fl, &tlsf->flbitmap); \
229 SetBit(sl, &tlsf->slbitmap[fl]); }while(0)
231 #else
233 static inline __attribute__((always_inline)) IPTR GET_SIZE(bhdr_t *b)
235 return b->header.length & SIZE_MASK;
238 static inline __attribute__((always_inline)) IPTR GET_FLAGS(bhdr_t *b)
240 return b->header.length & (THIS_FREE_MASK | PREV_FREE_MASK);
243 static inline __attribute__((always_inline)) void SET_SIZE(bhdr_t *b, IPTR size)
245 b->header.length = GET_FLAGS(b) | size;
248 static inline __attribute__((always_inline)) void SET_SIZE_AND_FLAGS(bhdr_t *b, IPTR size, IPTR flags)
250 b->header.length = size | flags;
253 static inline __attribute__((always_inline)) int FREE_BLOCK(bhdr_t *b)
255 return ((b->header.length & THIS_FREE_MASK) == THIS_FREE);
258 static inline __attribute__((always_inline)) void SET_FREE_BLOCK(bhdr_t *b)
260 b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_FREE;
263 static inline __attribute__((always_inline)) void SET_BUSY_BLOCK(bhdr_t *b)
265 b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_BUSY;
268 static inline __attribute__((always_inline)) void SET_FREE_PREV_BLOCK(bhdr_t *b)
270 b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_FREE;
273 static inline __attribute__((always_inline)) void SET_BUSY_PREV_BLOCK(bhdr_t *b)
275 b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_BUSY;
278 static inline __attribute__((always_inline)) int FREE_PREV_BLOCK(bhdr_t *b)
280 return ((b->header.length & PREV_FREE_MASK) == PREV_FREE);
283 static inline __attribute__((always_inline)) bhdr_t * GET_NEXT_BHDR(bhdr_t *hdr, IPTR size)
285 return (bhdr_t *)((UBYTE *)&hdr->mem[0] + size);
288 static inline __attribute__((always_inline)) bhdr_t * MEM_TO_BHDR(void *ptr)
290 return (bhdr_t *)(ptr - offsetof(bhdr_t, mem));
293 static inline __attribute__((always_inline)) void REMOVE_HEADER(tlsf_t *tlsf, bhdr_t *b, int fl, int sl)
295 if (b->free_node.next)
296 b->free_node.next->free_node.prev = b->free_node.prev;
297 if (b->free_node.prev)
298 b->free_node.prev->free_node.next = b->free_node.next;
300 if (tlsf->matrix[fl][sl] == b)
302 tlsf->matrix[fl][sl] = b->free_node.next;
303 if (!tlsf->matrix[fl][sl])
304 ClrBit(sl, &tlsf->slbitmap[fl]);
305 if (!tlsf->slbitmap[fl])
306 ClrBit(fl, &tlsf->flbitmap);
310 static inline __attribute__((always_inline)) void INSERT_FREE_BLOCK(tlsf_t *tlsf, bhdr_t *b)
312 int fl, sl;
314 MAPPING_INSERT(GET_SIZE(b), &fl, &sl);
316 b->free_node.prev = NULL;
317 b->free_node.next = tlsf->matrix[fl][sl];
319 if (tlsf->matrix[fl][sl])
320 tlsf->matrix[fl][sl]->free_node.prev = b;
322 tlsf->matrix[fl][sl] = b;
324 SetBit(fl, &tlsf->flbitmap);
325 SetBit(sl, &tlsf->slbitmap[fl]);
328 #endif /* USE_MACROS */
330 void * tlsf_malloc(struct MemHeaderExt *mhe, IPTR size, ULONG *flags)
332 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
333 int fl, sl;
334 bhdr_t *b = NULL;
336 size = ROUNDUP(size);
338 D(nbug("tlsf_malloc(%p, %ld)\n", tlsf, size));
340 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
341 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
343 /* Find the indices fl and sl for given size */
344 MAPPING_SEARCH(&size, &fl, &sl);
346 /* Find block of either the right size or larger */
347 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
349 D(nbug(" tlsf_malloc - adjusted size %ld\n", size));
351 /* No block found? Either failure or tlsf will get more memory. */
352 if (unlikely(!b))
354 D(nbug("tlsf_malloc out of memory\n"));
356 /* Do we have the autogrow feature? */
357 if (tlsf->autogrow_get_fn)
359 /* increase the size of requested block so that we can fit the headers too */
360 IPTR sz = size + 3 * ROUNDUP(sizeof(hdr_t));
362 /* Requested size less than puddle size? Get puddle size then */
363 if (sz < tlsf->autogrow_puddle_size)
364 sz = tlsf->autogrow_puddle_size;
366 D(nbug("querying for %d bytes\n"));
368 /* Try to get some memory */
369 void * ptr = tlsf->autogrow_get_fn(tlsf->autogrow_data, &sz);
371 /* Got it? Add to tlsf then */
372 if (ptr)
374 tlsf_add_memory(mhe, ptr, sz);
376 /* We know the newly added memory is first in the list. Set the autogrown feature there */
377 tlsf->memory_area->autogrown = 1;
379 /* Memory is there. Try to find the block again */
380 MAPPING_SEARCH(&size, &fl, &sl);
381 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
385 /* No block? FAILURE! */
386 if (!b)
388 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
389 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
391 return NULL;
395 /* Next header */
396 bhdr_t *next = GET_NEXT_BHDR(b, GET_SIZE(b));
398 /* Remove the found block from the free list */
399 REMOVE_HEADER(tlsf, b, fl, sl);
401 /* Is this block larger then requested? Try to split it then */
402 if (likely(GET_SIZE(b) > (size + ROUNDUP(sizeof(hdr_t)))))
404 /* New split block */
405 bhdr_t *sb = GET_NEXT_BHDR(b, size);
406 sb->header.prev = b;
408 /* Set size, this free and previous busy */
409 SET_SIZE_AND_FLAGS(sb, GET_SIZE(b) - size - ROUNDUP(sizeof(hdr_t)), THIS_FREE | PREV_BUSY);
411 /* The next header points to free block now */
412 next->header.prev = sb;
414 /* previous block (sb) is free */
415 SET_FREE_PREV_BLOCK(next);
417 /* Allocated block size truncated */
418 SET_SIZE(b, size);
420 D(nbug(" new splitted block of size %ld\n", GET_SIZE(sb)));
421 /* Free block is inserted to free list */
422 INSERT_FREE_BLOCK(tlsf, sb);
424 else
426 /* The block was of right size. Set it just busy in next pointer */
427 SET_BUSY_PREV_BLOCK(next);
430 /* The allocated block is busy */
431 SET_BUSY_BLOCK(b);
433 /* Clear the pointers just in case */
434 b->free_node.next = NULL;
435 b->free_node.prev = NULL;
437 /* Update counters */
438 tlsf->free_size -= GET_SIZE(b);
439 mhe->mhe_MemHeader.mh_Free = tlsf->free_size;
441 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
442 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
444 if (flags && (*flags & MEMF_CLEAR))
445 bzero(&b->mem[0], size);
447 /* And return memory */
448 return &b->mem[0];
451 static inline __attribute__((always_inline)) void MERGE(bhdr_t *b1, bhdr_t *b2)
453 /* Merging adjusts the size - it's sum of both sizes plus size of block header */
454 SET_SIZE(b1, GET_SIZE(b1) + GET_SIZE(b2) + ROUNDUP(sizeof(hdr_t)));
457 static inline __attribute__((always_inline)) bhdr_t * MERGE_PREV(tlsf_t *tlsf, bhdr_t *block)
459 /* Is previous block free? */
460 if (FREE_PREV_BLOCK(block))
462 int fl, sl;
463 bhdr_t *prev = block->header.prev;
465 /* Calculate index for removal */
466 MAPPING_INSERT(GET_SIZE(prev), &fl, &sl);
468 /* Do remove the header from the list */
469 REMOVE_HEADER(tlsf, prev, fl, sl);
471 /* Merge */
472 MERGE(prev, block);
474 return prev;
476 else
477 return block;
480 static inline __attribute__((always_inline)) bhdr_t * MERGE_NEXT(tlsf_t *tlsf, bhdr_t *block)
482 bhdr_t *next = GET_NEXT_BHDR(block, GET_SIZE(block));
484 /* Is next block free? */
485 if (FREE_BLOCK(next))
487 int fl, sl;
489 /* Calculate index for removal */
490 MAPPING_INSERT(GET_SIZE(next), &fl, &sl);
492 /* Remove the header from the list */
493 REMOVE_HEADER(tlsf, next, fl, sl);
495 /* merge blocks */
496 MERGE(block, next);
499 return block;
502 void tlsf_freevec(struct MemHeaderExt * mhe, APTR ptr)
504 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
505 bhdr_t *fb = MEM_TO_BHDR(ptr);
506 bhdr_t *next;
508 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
509 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
511 /* Mark block as free */
512 SET_FREE_BLOCK(fb);
514 /* adjust free size field on tlsf */
515 tlsf->free_size += GET_SIZE(fb);
517 /* Try to merge with previous and next blocks (if free) */
518 fb = MERGE_PREV(tlsf, fb);
519 fb = MERGE_NEXT(tlsf, fb);
521 /* Tell next block that previous one is free. Also update the prev link in case it changed */
522 next = GET_NEXT_BHDR(fb, GET_SIZE(fb));
523 SET_FREE_PREV_BLOCK(next);
524 next->header.prev = fb;
526 /* Insert free block into the proper list */
527 INSERT_FREE_BLOCK(tlsf, fb);
529 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
530 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
533 void tlsf_freemem(struct MemHeaderExt * mhe, APTR ptr, IPTR size)
535 (void)size;
536 tlsf_freevec(mhe, ptr);
539 void * tlsf_realloc(struct MemHeaderExt *mhe, APTR ptr, IPTR new_size)
541 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
542 bhdr_t *b;
543 bhdr_t *bnext;
544 int fl;
545 int sl;
547 /* NULL pointer? just allocate the memory */
548 if (unlikely(!ptr))
549 return tlsf_malloc(mhe, new_size, NULL);
551 /* size = 0? free memory */
552 if (unlikely(!new_size))
554 tlsf_freevec(mhe, ptr);
555 return NULL;
558 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
559 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
561 new_size = ROUNDUP(new_size);
563 b = MEM_TO_BHDR(ptr);
565 if (unlikely(new_size == GET_SIZE(b)))
566 return ptr;
568 bnext = GET_NEXT_BHDR(b, GET_SIZE(b));
570 /* Is new size smaller than the previous one? Try to split the block if this is the case */
571 if (new_size <= (GET_SIZE(b)))
573 /* New header starts right after the current block b */
574 bhdr_t * b1 = GET_NEXT_BHDR(b, new_size);
576 /* Update pointer and size */
577 b1->header.prev = b;
578 SET_SIZE_AND_FLAGS(b1, GET_SIZE(b) - new_size - ROUNDUP(sizeof(hdr_t)), THIS_FREE | PREV_BUSY);
580 /* Current block gets smaller */
581 SET_SIZE(b, new_size);
583 tlsf->free_size += GET_SIZE(b1);
585 /* Try to merge with next block */
586 b1 = MERGE_NEXT(tlsf, b1);
588 /* Tell next block that previous one is free. Also update the prev link in case it changed */
589 bnext = GET_NEXT_BHDR(b1, GET_SIZE(b1));
590 SET_FREE_PREV_BLOCK(bnext);
591 bnext->header.prev = b1;
593 /* Insert free block into the proper list */
594 INSERT_FREE_BLOCK(tlsf, b1);
596 else
598 /* Is next block free? Is there enough free space? */
599 if (FREE_BLOCK(bnext) && new_size <= GET_SIZE(b) + GET_SIZE(bnext) + ROUNDUP(sizeof(hdr_t)))
601 bhdr_t *b1;
602 IPTR rest_size = ROUNDUP(sizeof(hdr_t)) + GET_SIZE(bnext) + GET_SIZE(b) - new_size;
604 MAPPING_INSERT(GET_SIZE(bnext), &fl, &sl);
606 REMOVE_HEADER(tlsf, bnext, fl, sl);
608 if (rest_size > ROUNDUP(sizeof(hdr_t)))
610 rest_size -= ROUNDUP(sizeof(hdr_t));
612 SET_SIZE(b, new_size);
614 b1 = GET_NEXT_BHDR(b, GET_SIZE(b));
615 b1->header.prev = b;
617 SET_SIZE_AND_FLAGS(b1, rest_size, THIS_FREE | PREV_BUSY);
619 bnext = GET_NEXT_BHDR(b1, GET_SIZE(b1));
620 bnext->header.prev = b1;
621 SET_FREE_PREV_BLOCK(bnext);
623 INSERT_FREE_BLOCK(tlsf, b1);
625 else
627 if (rest_size)
628 SET_SIZE(b, new_size + ROUNDUP(sizeof(hdr_t)));
629 else
630 SET_SIZE(b, new_size);
632 bnext = GET_NEXT_BHDR(b, GET_SIZE(b));
633 bnext->header.prev = b;
634 SET_BUSY_PREV_BLOCK(bnext);
637 else
639 /* Next block was not free. Create new buffer and copy old contents there */
640 void * p = tlsf_malloc(mhe, new_size, NULL);
641 if (p)
643 CopyMemQuick(ptr, p, GET_SIZE(b));
644 tlsf_freevec(mhe, ptr);
645 b = MEM_TO_BHDR(p);
650 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
651 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
653 return b->mem;
657 void * tlsf_allocabs(struct MemHeaderExt * mhe, IPTR size, void * ptr)
659 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
660 UBYTE *region_start;
661 UBYTE *region_end;
663 int fl, sl;
664 IPTR sz = ROUNDUP(size);
666 D(nbug("[TLSF] allocabs(%p, %ld)\n", ptr, size));
668 region_start = ptr;
669 region_end = (UBYTE *)ptr + sz;
671 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
672 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
674 /* Start searching here. It doesn't make sense to go through regions which are smaller */
675 MAPPING_SEARCH(&sz, &fl, &sl);
677 /* Start looking now :) */
678 for (; fl < MAX_FLI; fl++)
680 for (; sl < MAX_SLI; sl++)
682 bhdr_t *b0 = tlsf->matrix[fl][sl];
684 /* If block was there, check it */
685 while (b0)
687 bhdr_t *b1 = GET_NEXT_BHDR(b0, GET_SIZE(b0));
689 /* The block has to contain _whole_ requested region, max exceed it in size though */
690 if (b0->mem <= region_start && (UBYTE *)b1 >= region_end)
692 /* block header of requested region */
693 bhdr_t *breg = MEM_TO_BHDR(ptr);
696 This is the block we're looking for. Unchain it from the bidirectional list of
697 free blocks now.
699 Previous entry's next will point to this block's next. If previous is NULL, matrix
700 will be set to block's next
702 if (b0->free_node.prev)
703 b0->free_node.prev->free_node.next = b0->free_node.next;
704 else
705 tlsf->matrix[fl][sl] = b0->free_node.next;
708 Next entry's prev will point to this block's previous.
710 if (b0->free_node.next)
711 b0->free_node.next->free_node.prev = b0->free_node.prev;
713 /* Empty SL matrix for size j? Clear bit */
714 if (!tlsf->matrix[fl][sl])
716 ClrBit(sl, &tlsf->slbitmap[fl]);
718 /* Empty entire SL matrix for given FL index? clear that bit too */
719 if (tlsf->slbitmap[fl])
720 ClrBit(fl, &tlsf->flbitmap);
723 b0->free_node.prev = NULL;
724 b0->free_node.next = NULL;
725 SET_BUSY_BLOCK(b0);
728 At this point the block is removed from free list and marked as used.
729 Now, split it if necessary...
732 /* begin of the block != begin of the block header of requested region? */
733 if (b0 != breg)
736 Adjust region's block header. Mark in size that previous (aka b0) is free.
737 Reduce the size of b0 as well as size of breg too.
739 breg->header.prev = b0;
740 SET_SIZE_AND_FLAGS(breg, GET_SIZE(b0)-((IPTR)breg - (IPTR)b0), PREV_FREE | THIS_BUSY);
742 /* Update the next block. Mark in size that previous (breg) is used */
743 b1->header.prev = breg;
744 SET_BUSY_PREV_BLOCK(b1);
746 /* b0's prev state is keept. b0 itself is marked as free block */
747 SET_FREE_BLOCK(b0);
748 SET_SIZE(b0, (IPTR)breg - (IPTR)b0->mem);
750 /* Insert b0 to free list */
751 MAPPING_INSERT(GET_SIZE(b0), &fl, &sl);
752 INSERT_FREE_BLOCK(tlsf, b0);
755 /* Is it necessary to split the requested region at the end? */
756 if ((SIZE_ALIGN + GET_SIZE(breg)) > size)
758 IPTR tmp_size = GET_SIZE(breg) - size - SIZE_ALIGN;
760 /* New region header directly at end of the requested region */
761 bhdr_t *b2 = GET_NEXT_BHDR(breg, size);
763 /* Adjust fields */
764 b2->header.prev = breg;
765 SET_SIZE_AND_FLAGS(b2, tmp_size, PREV_BUSY | THIS_FREE);
767 /* requested region's size is now smaller */
768 SET_SIZE(breg, size);
770 /* The next block header point to newly created one */
771 b1->header.prev = b2;
772 SET_FREE_PREV_BLOCK(b1);
774 /* Insert newly created block to free list */
775 MAPPING_INSERT(GET_SIZE(b2), &fl, &sl);
776 INSERT_FREE_BLOCK(tlsf, b2);
779 tlsf->free_size -= GET_SIZE(breg);
782 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
783 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
785 return breg->mem;
788 b0 = b0->free_node.next;
793 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
794 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
796 return NULL;
799 tlsf_area_t * init_memory_area(void * memory, IPTR size)
801 bhdr_t * hdr = (bhdr_t *)memory;
802 bhdr_t * b;
803 bhdr_t * bend;
805 tlsf_area_t * area;
807 size = ROUNDDOWN(size);
809 /* Prepare first header, which protects the tlst_area_t header */
810 hdr->header.length = ROUNDUP(sizeof(tlsf_area_t)) | THIS_BUSY | PREV_BUSY;
811 hdr->header.prev = NULL;
813 b = GET_NEXT_BHDR(hdr, ROUNDUP(sizeof(tlsf_area_t)));
814 b->header.prev = hdr;
815 b->header.length = (size - 3*ROUNDUP(sizeof(hdr_t)) - ROUNDUP(sizeof(tlsf_area_t))) | PREV_BUSY | THIS_BUSY;
817 bend = GET_NEXT_BHDR(b, GET_SIZE(b));
818 bend->header.length = 0 | THIS_BUSY | PREV_BUSY;
819 bend->header.prev = b;
821 area = (tlsf_area_t *)hdr->mem;
822 area->end = bend;
824 return area;
827 void tlsf_add_memory(struct MemHeaderExt *mhe, void *memory, IPTR size)
829 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
831 D(nbug("tlsf_add_memory(%p, %p, %d)\n", tlsf, memory, size));
833 if (memory && size > 3*ROUNDUP(sizeof(bhdr_t)))
835 tlsf_area_t *area = init_memory_area(memory, size);
836 bhdr_t *b;
838 D(nbug(" adding memory\n"));
840 area->next = tlsf->memory_area;
841 tlsf->memory_area = area;
843 /* User added memory. Not autogrown */
844 area->autogrown = 0;
846 b = MEM_TO_BHDR(area);
847 b = GET_NEXT_BHDR(b, GET_SIZE(b));
849 tlsf->total_size += size;
851 D(nbug(" total_size=%08x\n", tlsf->total_size));
853 tlsf_freevec(mhe, b->mem);
857 void tlsf_add_memory_and_merge(struct MemHeaderExt *mhe, void *memory, IPTR size)
859 tlsf_add_memory(mhe, memory, size);
860 // TODO: add memory and merge...
863 #if 0
864 void bzero(void *ptr, IPTR len)
866 UBYTE *p = (UBYTE *)ptr;
868 while(len--)
869 *p++ = 0;
871 #endif
873 void * tlsf_init(struct MemHeaderExt * mhe)
875 tlsf_t *tlsf = NULL;
876 void * ptr = mhe->mhe_MemHeader.mh_Lower;
878 /* if MemHeaderExt starts at the beginning of handled memory, advance the ptr */
879 if (mhe == ptr)
880 ptr += ROUNDUP(sizeof(struct MemHeaderExt));
882 /* Is there enough room for tlsf in the mem header itself? */
883 if (mhe->mhe_MemHeader.mh_Free >= (ROUNDUP(sizeof(tlsf_t)) + 3 * ROUNDUP(sizeof(bhdr_t))))
885 /* tlsf will be stored inside handled memory */
886 tlsf = (tlsf_t *)ptr;
888 ptr += ROUNDUP(sizeof(tlsf_t));
890 bzero(tlsf, sizeof(tlsf_t));
891 tlsf->autodestroy_self = 0;
893 else
895 /* No place for tlsf header in MemHeaderExt? Allocate it separately */
896 tlsf = AllocMem(sizeof(tlsf_t), MEMF_ANY);
898 if (tlsf)
900 bzero(tlsf, sizeof(tlsf_t));
901 tlsf->autodestroy_self = 1;
905 /* Store the tlsf pointer in UserData field */
906 mhe->mhe_UserData = tlsf;
908 if (tlsf && ptr < mhe->mhe_MemHeader.mh_Upper)
910 tlsf_add_memory(mhe, ptr, (IPTR)mhe->mhe_MemHeader.mh_Upper - (IPTR)ptr);
913 return tlsf;
916 void * tlsf_init_autogrow(struct MemHeaderExt * mhe, IPTR puddle_size, autogrow_get grow_function, autogrow_release release_function, APTR autogrow_data)
918 tlsf_t *tlsf = tlsf_init(mhe);
920 if (tlsf)
922 if (puddle_size < 4096)
923 puddle_size = 4096;
925 tlsf->autogrow_puddle_size = puddle_size;
926 tlsf->autogrow_data = autogrow_data;
927 tlsf->autogrow_get_fn = grow_function;
928 tlsf->autogrow_release_fn = release_function;
931 return tlsf;
934 void tlsf_destroy(struct MemHeaderExt * mhe)
936 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
938 D(nbug("tlsf_destroy(%p)\n", tlsf));
940 if (tlsf)
942 tlsf_area_t *area = tlsf->memory_area;
944 if (tlsf->autogrow_release_fn)
946 while(area)
948 tlsf_area_t *next = area->next;
949 bhdr_t *b;
950 void *begin;
951 void *end;
952 IPTR size;
955 Autogrown area? Release it here.
956 Otherwise it's the responsibility of add_memory_area caller
958 if (area->autogrown)
960 /* get the begin of this area */
961 begin = MEM_TO_BHDR(area);
963 /* get sentinel block */
964 b = area->end;
966 /* end of this area is end of sentinel block */
967 end = GET_NEXT_BHDR(b, 0);
969 /* calculate the size of area */
970 size = (IPTR)end - (IPTR)begin;
972 if (tlsf->autogrow_release_fn)
973 tlsf->autogrow_release_fn(tlsf->autogrow_data, begin, size);
976 area = next;
980 if (tlsf->autodestroy_self)
981 FreeMem(tlsf, sizeof(tlsf_t));
985 IPTR tlsf_avail(struct MemHeaderExt * mhe, ULONG requirements)
987 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
988 IPTR ret = 0;
990 if (requirements & MEMF_TOTAL)
991 ret = tlsf->total_size;
992 else if (requirements & MEMF_LARGEST)
994 bhdr_t *b = NULL;
996 if (tlsf->flbitmap)
998 int fl = MS(tlsf->flbitmap);
1000 if (tlsf->slbitmap[fl])
1002 int sl = MS(tlsf->slbitmap[fl]);
1004 b = tlsf->matrix[fl][sl];
1008 while (b)
1010 if (GET_SIZE(b) > ret)
1011 ret = GET_SIZE(b);
1013 b = b->free_node.next;
1016 else
1017 ret = tlsf->free_size;
1019 return ret;
1022 BOOL tlsf_in_bounds(struct MemHeaderExt * mhe, void * begin, void * end)
1024 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1025 tlsf_area_t *area;
1027 area = tlsf->memory_area;
1029 D(nbug("tlsf_in_bounds(%p, %p, %p)\n", tlsf, begin, end));
1031 while (area)
1033 D(nbug(" area %p\n"));
1035 * Do checks only if questioned memory ends before the end (sentinel bhdr)
1036 * of area
1038 if ((IPTR)end <= (IPTR)area->end)
1040 D(nbug(" end <= area->end (%p <= %p)\n", end, area->end));
1042 /* Get the bhdr of this area */
1043 bhdr_t *b = MEM_TO_BHDR(area);
1045 /* Forward to the begin of the memory */
1046 b = GET_NEXT_BHDR(b, GET_SIZE(b));
1048 /* requested memory starts at begin or after begin of the area */
1049 if ((IPTR)begin >= (IPTR)b->mem)
1051 D(nbug(" begin >= b->mem (%p >= %p)\n", begin, b->mem));
1052 return TRUE;
1056 area = area->next;
1059 return FALSE;
1076 #include "tlsf.h"
1078 static void destroy_Pool(struct MemHeaderExt *mhe)
1080 tlsf_destroy(mhe);
1083 static APTR fetch_more_ram(void * data, IPTR *size)
1085 struct MemHeaderExt *mhe = (struct MemHeaderExt *)data;
1087 D(nbug("[TLSF] fetch_more_ram(%p, %d)\n", mhe, *size));
1089 APTR ptr = AllocMem(*size, mhe->mhe_MemHeader.mh_Attributes);
1090 return ptr;
1093 static VOID release_ram(void * data, APTR ptr, IPTR size)
1095 D(nbug("[TLSF] release_ram(%p, %d)\n", ptr, size));
1097 FreeMem(ptr, size);
1100 static void * init_Pool(struct MemHeaderExt *mhe, IPTR puddleSize, IPTR initialSize)
1102 return tlsf_init_autogrow(mhe, puddleSize, fetch_more_ram, release_ram, mhe);
1105 void krnCreateTLSFMemHeader(CONST_STRPTR name, BYTE pri, APTR start, IPTR size, ULONG flags)
1107 /* If the end is less than (1 << 31), MEMF_31BIT is implied */
1108 if (((IPTR)start+size) < (1UL << 31))
1109 flags |= MEMF_31BIT;
1110 else
1111 flags &= ~MEMF_31BIT;
1113 flags |= MEMF_MANAGED;
1115 struct MemHeaderExt *mhe = start;
1117 mhe->mhe_DestroyPool = destroy_Pool;
1118 mhe->mhe_InitPool = init_Pool;
1120 mhe->mhe_Alloc = tlsf_malloc;
1121 mhe->mhe_AllocVec = tlsf_malloc;
1122 mhe->mhe_Free = tlsf_freemem;
1123 mhe->mhe_FreeVec = tlsf_freevec;
1124 mhe->mhe_AllocAbs = tlsf_allocabs;
1125 mhe->mhe_ReAlloc = tlsf_realloc;
1126 mhe->mhe_Avail = tlsf_avail;
1127 mhe->mhe_InBounds = tlsf_in_bounds;
1129 mhe->mhe_MemHeader.mh_Node.ln_Succ = NULL;
1130 mhe->mhe_MemHeader.mh_Node.ln_Pred = NULL;
1131 mhe->mhe_MemHeader.mh_Node.ln_Type = NT_MEMORY;
1132 mhe->mhe_MemHeader.mh_Node.ln_Name = (STRPTR)name;
1133 mhe->mhe_MemHeader.mh_Node.ln_Pri = pri;
1134 mhe->mhe_MemHeader.mh_Attributes = flags;
1135 /* The first MemChunk needs to be aligned. We do it by adding MEMHEADER_TOTAL. */
1136 mhe->mhe_MemHeader.mh_First = NULL;
1138 mhe->mhe_UserData = NULL;
1141 * mh_Lower and mh_Upper are informational only. Since our MemHeader resides
1142 * inside the region it describes, the region includes MemHeader.
1144 mhe->mhe_MemHeader.mh_Lower = start;
1145 mhe->mhe_MemHeader.mh_Upper = start + size;
1146 mhe->mhe_MemHeader.mh_Free = size;
1148 tlsf_init(mhe);