kernel.resource: TLSF - use pool requirements, not first header flags to grow the...
[AROS.git] / rom / kernel / tlsf.c
blob83318d122e15d0d8101b64d24114c74e859399a8
1 #include <exec/types.h>
2 #include <exec/memory.h>
3 #include <exec/memheaderext.h>
4 #include <proto/exec.h>
5 #include <string.h>
7 #include "tlsf.h"
8 #include "kernel_base.h"
9 #include "kernel_debug.h"
11 #define D(x)
13 #undef USE_MACROS
15 #include <stddef.h>
17 * Minimal alignment as required by AROS. In contrary to the default
18 * TLSF implementation, we do not allow smaller blocks here.
19 * Size needs to be aligned to at least 8, see THIS_FREE_MASK comment.
21 #define SIZE_ALIGN AROS_WORSTALIGN
24 * Settings for TLSF allocator:
25 * MAX_LOG2_SLI - amount of bits used for the second level list
26 * MAX_FLI - maximal allowable allocation size - 2^32 should be enough
28 #define MAX_LOG2_SLI (5)
29 #define MAX_SLI (1 << MAX_LOG2_SLI)
30 #define MAX_FLI (32)
31 #define FLI_OFFSET (6)
32 #define SMALL_BLOCK (2 << FLI_OFFSET)
34 #define REAL_FLI (MAX_FLI - FLI_OFFSET)
36 #define ROUNDUP(x) (((x) + SIZE_ALIGN - 1) & ~(SIZE_ALIGN - 1))
37 #define ROUNDDOWN(x) ((x) & ~(SIZE_ALIGN - 1))
39 /* Fields used in the block header length field to identify busy/free blocks */
40 #define THIS_FREE_MASK (IPTR)1
41 #define THIS_FREE (IPTR)1
42 #define THIS_BUSY (IPTR)0
44 #define PREV_FREE_MASK (IPTR)2
45 #define PREV_FREE (IPTR)2
46 #define PREV_BUSY (IPTR)0
48 #define SIZE_MASK (~(THIS_FREE_MASK | PREV_FREE_MASK))
50 #define likely(x) __builtin_expect(!!(x), 1)
51 #define unlikely(x) __builtin_expect(!!(x), 0)
53 /* Size of additional memory needed to manage new block */
54 #define HEADERS_SIZE (((3 * ROUNDUP(sizeof(hdr_t))) + ROUNDUP(sizeof(tlsf_area_t))))
56 /* free node links together all free blocks if similar size */
57 typedef struct free_node_s {
58 struct bhdr_s * prev;
59 struct bhdr_s * next;
60 } free_node_t;
62 /* block header in front of each block - both free and busy */
63 typedef struct hdr_s {
64 struct bhdr_s * prev;
65 IPTR length;
66 } hdr_t;
69 * Each block is defined by bhdr_t structure. Free blocks contain only
70 * the header which allows us to go through all memory blocks in the system.
71 * The free blocks contain additionally the node which chains them in one
72 * of the free block lists
74 typedef struct bhdr_s {
75 union {
76 hdr_t header;
77 UBYTE __min_align[SIZE_ALIGN];
79 union {
80 UBYTE mem[1];
81 free_node_t free_node;
83 } bhdr_t;
85 /* Memory area within the TLSF pool */
86 typedef struct tlsf_area_s {
87 struct tlsf_area_s * next; // Next memory area
88 bhdr_t * end; // Pointer to "end-of-area" block header
89 LONG autogrown; // Automatically allocated by TLSF pool
90 } tlsf_area_t;
92 typedef struct {
93 tlsf_area_t * memory_area;
95 IPTR total_size;
96 IPTR free_size;
98 ULONG flbitmap;
99 ULONG slbitmap[REAL_FLI];
101 IPTR autogrow_puddle_size;
102 ULONG autogrow_requirements;
103 APTR autogrow_data;
104 autogrow_get autogrow_get_fn;
105 autogrow_release autogrow_release_fn;
107 UBYTE autodestroy_self;
109 bhdr_t * matrix[REAL_FLI][MAX_SLI];
110 } tlsf_t;
112 static inline __attribute__((always_inline)) int LS(IPTR i)
114 if (sizeof(IPTR) == 4)
115 return __builtin_ffs(i) - 1;
116 else
117 return __builtin_ffsl(i) - 1;
120 static inline __attribute__((always_inline)) int MS(IPTR i)
122 if (sizeof(IPTR) == 4)
123 return 31 - __builtin_clz(i);
124 else
125 return 63 - __builtin_clzl(i);
128 static inline __attribute__((always_inline)) void SetBit(int nr, ULONG *ptr)
130 ptr[nr >> 5] |= (1 << (nr & 31));
133 static inline __attribute__((always_inline)) void ClrBit(int nr, ULONG *ptr)
135 ptr[nr >> 5] &= ~(1 << (nr & 31));
138 static inline __attribute__((always_inline)) void MAPPING_INSERT(IPTR r, int *fl, int *sl)
140 if (r < SMALL_BLOCK)
142 *fl = 0;
143 *sl = (int)(r / (SMALL_BLOCK / MAX_SLI));
145 else
147 *fl = MS(r);
148 *sl = (int)(((IPTR)r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI);
149 *fl -= FLI_OFFSET;
153 static inline __attribute__((always_inline)) void MAPPING_SEARCH(IPTR *r, int *fl, int *sl)
155 if (*r < SMALL_BLOCK)
157 *fl = 0;
158 *sl = (int)(*r / (SMALL_BLOCK / MAX_SLI));
160 else
162 IPTR tmp = ((IPTR)1 << (MS(*r) - MAX_LOG2_SLI)) - 1;
163 IPTR tr = *r + tmp;
165 *fl = MS(tr);
166 *sl = (int)(((IPTR)tr >> (*fl - MAX_LOG2_SLI)) - MAX_SLI);
167 *fl -= FLI_OFFSET;
168 *r = tr & ~tmp;
172 static inline __attribute__((always_inline)) bhdr_t * FIND_SUITABLE_BLOCK(tlsf_t *tlsf, int *fl, int *sl)
174 IPTR bitmap_tmp = tlsf->slbitmap[*fl] & (~0 << *sl);
175 bhdr_t *b = NULL;
177 if (bitmap_tmp)
179 *sl = LS(bitmap_tmp);
180 b = tlsf->matrix[*fl][*sl];
182 else
184 bitmap_tmp = tlsf->flbitmap & (~0 << (*fl + 1));
185 if (likely(bitmap_tmp != 0))
187 *fl = LS(bitmap_tmp);
188 *sl = LS(tlsf->slbitmap[*fl]);
189 b = tlsf->matrix[*fl][*sl];
193 return b;
197 #ifdef USE_MACROS
199 #define GET_SIZE(b) ({ IPTR size = b->header.length & SIZE_MASK; size; })
200 #define GET_FLAGS(b) ({ IPTR flags = b->header.length & (THIS_FREE_MASK | PREV_FREE_MASK); flags; })
201 #define SET_SIZE(b, size) do{ b->header.length = GET_FLAGS(b) | (size); }while(0)
202 #define SET_FLAGS(b, flags) do{ b->header.length = GET_SIZE(b) | (flags); }while(0)
203 #define SET_SIZE_AND_FLAGS(b, size, flags) do{b->header.length = (size) | (flags);}while(0)
204 #define FREE_BLOCK(b) ((b->header.length & THIS_FREE_MASK) == THIS_FREE)
205 #define SET_FREE_BLOCK(b) do{b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_FREE;}while(0)
206 #define SET_BUSY_BLOCK(b) do{b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_BUSY;}while(0)
207 #define SET_FREE_PREV_BLOCK(b) do{b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_FREE;}while(0)
208 #define SET_BUSY_PREV_BLOCK(b) do{b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_BUSY;}while(0)
209 #define FREE_PREV_BLOCK(b) ((b->header.length & PREV_FREE_MASK) == PREV_FREE)
210 #define GET_NEXT_BHDR(hdr, size) ({ bhdr_t * __b = (bhdr_t *)((UBYTE *)&hdr->mem[0] + (size)); __b; })
211 #define MEM_TO_BHDR(ptr) ({ bhdr_t * b = (bhdr_t*)((void*)(ptr) - offsetof(bhdr_t, mem)); b; })
213 #define REMOVE_HEADER(tlsf, b, fl, sl) do{ \
214 if (b->free_node.next) \
215 b->free_node.next->free_node.prev = b->free_node.prev; \
216 if (b->free_node.prev) \
217 b->free_node.prev->free_node.next = b->free_node.next; \
218 if (tlsf->matrix[fl][sl] == b) { \
219 tlsf->matrix[fl][sl] = b->free_node.next; \
220 if (!tlsf->matrix[fl][sl]) \
221 ClrBit(sl, &tlsf->slbitmap[fl]); \
222 if (!tlsf->slbitmap[fl]) \
223 ClrBit(fl, &tlsf->flbitmap); \
224 } } while(0)
226 #define INSERT_FREE_BLOCK(tlsf, b) do { \
227 int fl, sl; MAPPING_INSERT(GET_SIZE(b), &fl, &sl); \
228 b->free_node.prev = NULL; \
229 b->free_node.next = tlsf->matrix[fl][sl]; \
230 if (tlsf->matrix[fl][sl]) \
231 tlsf->matrix[fl][sl]->free_node.prev = b; \
232 tlsf->matrix[fl][sl] = b; \
233 SetBit(fl, &tlsf->flbitmap); \
234 SetBit(sl, &tlsf->slbitmap[fl]); }while(0)
236 #else
238 static inline __attribute__((always_inline)) IPTR GET_SIZE(bhdr_t *b)
240 return b->header.length & SIZE_MASK;
243 static inline __attribute__((always_inline)) IPTR GET_FLAGS(bhdr_t *b)
245 return b->header.length & (THIS_FREE_MASK | PREV_FREE_MASK);
248 static inline __attribute__((always_inline)) void SET_SIZE(bhdr_t *b, IPTR size)
250 b->header.length = GET_FLAGS(b) | size;
253 static inline __attribute__((always_inline)) void SET_SIZE_AND_FLAGS(bhdr_t *b, IPTR size, IPTR flags)
255 b->header.length = size | flags;
258 static inline __attribute__((always_inline)) int FREE_BLOCK(bhdr_t *b)
260 return ((b->header.length & THIS_FREE_MASK) == THIS_FREE);
263 static inline __attribute__((always_inline)) void SET_FREE_BLOCK(bhdr_t *b)
265 b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_FREE;
268 static inline __attribute__((always_inline)) void SET_BUSY_BLOCK(bhdr_t *b)
270 b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_BUSY;
273 static inline __attribute__((always_inline)) void SET_FREE_PREV_BLOCK(bhdr_t *b)
275 b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_FREE;
278 static inline __attribute__((always_inline)) void SET_BUSY_PREV_BLOCK(bhdr_t *b)
280 b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_BUSY;
283 static inline __attribute__((always_inline)) int FREE_PREV_BLOCK(bhdr_t *b)
285 return ((b->header.length & PREV_FREE_MASK) == PREV_FREE);
288 static inline __attribute__((always_inline)) bhdr_t * GET_NEXT_BHDR(bhdr_t *hdr, IPTR size)
290 return (bhdr_t *)((UBYTE *)&hdr->mem[0] + size);
293 static inline __attribute__((always_inline)) bhdr_t * MEM_TO_BHDR(void *ptr)
295 return (bhdr_t *)(ptr - offsetof(bhdr_t, mem));
298 static inline __attribute__((always_inline)) void REMOVE_HEADER(tlsf_t *tlsf, bhdr_t *b, int fl, int sl)
300 if (b->free_node.next)
301 b->free_node.next->free_node.prev = b->free_node.prev;
302 if (b->free_node.prev)
303 b->free_node.prev->free_node.next = b->free_node.next;
305 if (tlsf->matrix[fl][sl] == b)
307 tlsf->matrix[fl][sl] = b->free_node.next;
308 if (!tlsf->matrix[fl][sl])
309 ClrBit(sl, &tlsf->slbitmap[fl]);
310 if (!tlsf->slbitmap[fl])
311 ClrBit(fl, &tlsf->flbitmap);
315 static inline __attribute__((always_inline)) void INSERT_FREE_BLOCK(tlsf_t *tlsf, bhdr_t *b)
317 int fl, sl;
319 MAPPING_INSERT(GET_SIZE(b), &fl, &sl);
321 b->free_node.prev = NULL;
322 b->free_node.next = tlsf->matrix[fl][sl];
324 if (tlsf->matrix[fl][sl])
325 tlsf->matrix[fl][sl]->free_node.prev = b;
327 tlsf->matrix[fl][sl] = b;
329 SetBit(fl, &tlsf->flbitmap);
330 SetBit(sl, &tlsf->slbitmap[fl]);
333 #endif /* USE_MACROS */
335 void * tlsf_malloc(struct MemHeaderExt *mhe, IPTR size, ULONG *flags)
337 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
338 int fl, sl;
339 bhdr_t *b = NULL;
341 size = ROUNDUP(size);
343 D(nbug("tlsf_malloc(%p, %ld)\n", tlsf, size));
345 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
346 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
348 /* Find the indices fl and sl for given size */
349 MAPPING_SEARCH(&size, &fl, &sl);
351 /* Find block of either the right size or larger */
352 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
354 D(nbug(" tlsf_malloc - adjusted size %ld\n", size));
356 /* No block found? Either failure or tlsf will get more memory. */
357 if (unlikely(!b))
359 D(nbug("tlsf_malloc out of memory\n"));
361 /* Do we have the autogrow feature? */
362 if (tlsf->autogrow_get_fn)
364 /* Increase the size of requested block so that we can fit the headers too */
365 IPTR sz = size + HEADERS_SIZE;
367 /* Requested size less than puddle size? Get puddle size then */
368 if (sz < tlsf->autogrow_puddle_size)
369 sz = tlsf->autogrow_puddle_size;
371 D(nbug("querying for %d bytes\n", sz));
373 /* Try to get some memory */
374 void * ptr = tlsf->autogrow_get_fn(tlsf->autogrow_data, &sz);
376 /* Got it? Add to tlsf then */
377 if (ptr)
379 tlsf_add_memory(mhe, ptr, sz);
381 /* We know the newly added memory is first in the list. Set the autogrown feature there */
382 tlsf->memory_area->autogrown = 1;
384 /* Memory is there. Try to find the block again */
385 MAPPING_SEARCH(&size, &fl, &sl);
386 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
390 /* No block? FAILURE! */
391 if (!b)
393 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
394 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
396 return NULL;
400 /* Next header */
401 bhdr_t *next = GET_NEXT_BHDR(b, GET_SIZE(b));
403 /* Remove the found block from the free list */
404 REMOVE_HEADER(tlsf, b, fl, sl);
406 /* Is this block larger then requested? Try to split it then */
407 if (likely(GET_SIZE(b) > (size + ROUNDUP(sizeof(hdr_t)))))
409 /* New split block */
410 bhdr_t *sb = GET_NEXT_BHDR(b, size);
411 sb->header.prev = b;
413 /* Set size, this free and previous busy */
414 SET_SIZE_AND_FLAGS(sb, GET_SIZE(b) - size - ROUNDUP(sizeof(hdr_t)), THIS_FREE | PREV_BUSY);
416 /* The next header points to free block now */
417 next->header.prev = sb;
419 /* previous block (sb) is free */
420 SET_FREE_PREV_BLOCK(next);
422 /* Allocated block size truncated */
423 SET_SIZE(b, size);
425 D(nbug(" new splitted block of size %ld\n", GET_SIZE(sb)));
426 /* Free block is inserted to free list */
427 INSERT_FREE_BLOCK(tlsf, sb);
429 else
431 /* The block was of right size. Set it just busy in next pointer */
432 SET_BUSY_PREV_BLOCK(next);
435 /* The allocated block is busy */
436 SET_BUSY_BLOCK(b);
438 /* Clear the pointers just in case */
439 b->free_node.next = NULL;
440 b->free_node.prev = NULL;
442 /* Update counters */
443 tlsf->free_size -= GET_SIZE(b);
444 mhe->mhe_MemHeader.mh_Free = tlsf->free_size;
446 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
447 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
449 if (flags && (*flags & MEMF_CLEAR))
450 bzero(&b->mem[0], size);
452 /* And return memory */
453 return &b->mem[0];
456 static inline __attribute__((always_inline)) void MERGE(bhdr_t *b1, bhdr_t *b2)
458 /* Merging adjusts the size - it's sum of both sizes plus size of block header */
459 SET_SIZE(b1, GET_SIZE(b1) + GET_SIZE(b2) + ROUNDUP(sizeof(hdr_t)));
462 static inline __attribute__((always_inline)) bhdr_t * MERGE_PREV(tlsf_t *tlsf, bhdr_t *block)
464 /* Is previous block free? */
465 if (FREE_PREV_BLOCK(block))
467 int fl, sl;
468 bhdr_t *prev = block->header.prev;
470 /* Calculate index for removal */
471 MAPPING_INSERT(GET_SIZE(prev), &fl, &sl);
473 /* Do remove the header from the list */
474 REMOVE_HEADER(tlsf, prev, fl, sl);
476 /* Merge */
477 MERGE(prev, block);
479 return prev;
481 else
482 return block;
485 static inline __attribute__((always_inline)) bhdr_t * MERGE_NEXT(tlsf_t *tlsf, bhdr_t *block)
487 bhdr_t *next = GET_NEXT_BHDR(block, GET_SIZE(block));
489 /* Is next block free? */
490 if (FREE_BLOCK(next))
492 int fl, sl;
494 /* Calculate index for removal */
495 MAPPING_INSERT(GET_SIZE(next), &fl, &sl);
497 /* Remove the header from the list */
498 REMOVE_HEADER(tlsf, next, fl, sl);
500 /* merge blocks */
501 MERGE(block, next);
504 return block;
507 static void tlsf_release_memory_area(struct MemHeaderExt * mhe, tlsf_area_t * area)
509 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
510 tlsf_area_t *p = (tlsf_area_t *)(&tlsf->memory_area - offsetof(tlsf_area_t, next));
511 bhdr_t *b;
512 void *begin;
513 void *end;
514 IPTR size;
516 /* get the begin of this area */
517 begin = MEM_TO_BHDR(area);
519 /* get sentinel block */
520 b = area->end;
522 /* end of this area is end of sentinel block */
523 end = GET_NEXT_BHDR(b, 0);
525 /* calculate the size of area */
526 size = (IPTR)end - (IPTR)begin;
528 /* update counters */
529 tlsf->total_size -= size;
530 tlsf->free_size -= GET_SIZE(area->end->header.prev);
532 /* remove area from list */
533 for (;p->next != NULL; p = p->next)
534 if (p->next == area)
536 p->next = area->next;
537 break;
540 /* release */
541 if (tlsf->autogrow_release_fn)
542 tlsf->autogrow_release_fn(tlsf->autogrow_data, begin, size);
545 void tlsf_freevec(struct MemHeaderExt * mhe, APTR ptr)
547 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
548 bhdr_t *fb = MEM_TO_BHDR(ptr);
549 bhdr_t *next;
550 tlsf_area_t * area;
552 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
553 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
555 /* Mark block as free */
556 SET_FREE_BLOCK(fb);
558 /* adjust free size field on tlsf */
559 tlsf->free_size += GET_SIZE(fb);
561 /* Try to merge with previous and next blocks (if free) */
562 fb = MERGE_PREV(tlsf, fb);
563 fb = MERGE_NEXT(tlsf, fb);
565 /* Tell next block that previous one is free. Also update the prev link in case it changed */
566 next = GET_NEXT_BHDR(fb, GET_SIZE(fb));
567 SET_FREE_PREV_BLOCK(next);
568 next->header.prev = fb;
570 /* Check if this was the last used block of an autogrown area */
571 area = fb->header.prev->header.prev == NULL ? (tlsf_area_t *)fb->header.prev->mem : NULL;
572 if (area != NULL && area->end == next && area->autogrown == 1)
573 tlsf_release_memory_area(mhe, area);
574 else
575 /* Insert free block into the proper list */
576 INSERT_FREE_BLOCK(tlsf, fb);
578 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
579 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
582 void tlsf_freemem(struct MemHeaderExt * mhe, APTR ptr, IPTR size)
584 (void)size;
585 tlsf_freevec(mhe, ptr);
588 void * tlsf_realloc(struct MemHeaderExt *mhe, APTR ptr, IPTR new_size)
590 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
591 bhdr_t *b;
592 bhdr_t *bnext;
593 int fl;
594 int sl;
596 /* NULL pointer? just allocate the memory */
597 if (unlikely(!ptr))
598 return tlsf_malloc(mhe, new_size, NULL);
600 /* size = 0? free memory */
601 if (unlikely(!new_size))
603 tlsf_freevec(mhe, ptr);
604 return NULL;
607 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
608 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
610 new_size = ROUNDUP(new_size);
612 b = MEM_TO_BHDR(ptr);
614 if (unlikely(new_size == GET_SIZE(b)))
615 return ptr;
617 bnext = GET_NEXT_BHDR(b, GET_SIZE(b));
619 /* Is new size smaller than the previous one? Try to split the block if this is the case */
620 if (new_size <= (GET_SIZE(b)))
622 /* New header starts right after the current block b */
623 bhdr_t * b1 = GET_NEXT_BHDR(b, new_size);
625 /* Update pointer and size */
626 b1->header.prev = b;
627 SET_SIZE_AND_FLAGS(b1, GET_SIZE(b) - new_size - ROUNDUP(sizeof(hdr_t)), THIS_FREE | PREV_BUSY);
629 /* Current block gets smaller */
630 SET_SIZE(b, new_size);
632 tlsf->free_size += GET_SIZE(b1);
634 /* Try to merge with next block */
635 b1 = MERGE_NEXT(tlsf, b1);
637 /* Tell next block that previous one is free. Also update the prev link in case it changed */
638 bnext = GET_NEXT_BHDR(b1, GET_SIZE(b1));
639 SET_FREE_PREV_BLOCK(bnext);
640 bnext->header.prev = b1;
642 /* Insert free block into the proper list */
643 INSERT_FREE_BLOCK(tlsf, b1);
645 else
647 /* Is next block free? Is there enough free space? */
648 if (FREE_BLOCK(bnext) && new_size <= GET_SIZE(b) + GET_SIZE(bnext) + ROUNDUP(sizeof(hdr_t)))
650 bhdr_t *b1;
651 IPTR rest_size = ROUNDUP(sizeof(hdr_t)) + GET_SIZE(bnext) + GET_SIZE(b) - new_size;
653 MAPPING_INSERT(GET_SIZE(bnext), &fl, &sl);
655 REMOVE_HEADER(tlsf, bnext, fl, sl);
657 if (rest_size > ROUNDUP(sizeof(hdr_t)))
659 rest_size -= ROUNDUP(sizeof(hdr_t));
661 SET_SIZE(b, new_size);
663 b1 = GET_NEXT_BHDR(b, GET_SIZE(b));
664 b1->header.prev = b;
666 SET_SIZE_AND_FLAGS(b1, rest_size, THIS_FREE | PREV_BUSY);
668 bnext = GET_NEXT_BHDR(b1, GET_SIZE(b1));
669 bnext->header.prev = b1;
670 SET_FREE_PREV_BLOCK(bnext);
672 INSERT_FREE_BLOCK(tlsf, b1);
674 else
676 if (rest_size)
677 SET_SIZE(b, new_size + ROUNDUP(sizeof(hdr_t)));
678 else
679 SET_SIZE(b, new_size);
681 bnext = GET_NEXT_BHDR(b, GET_SIZE(b));
682 bnext->header.prev = b;
683 SET_BUSY_PREV_BLOCK(bnext);
686 else
688 /* Next block was not free. Create new buffer and copy old contents there */
689 void * p = tlsf_malloc(mhe, new_size, NULL);
690 if (p)
692 CopyMemQuick(ptr, p, GET_SIZE(b));
693 tlsf_freevec(mhe, ptr);
694 b = MEM_TO_BHDR(p);
699 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
700 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
702 return b->mem;
706 void * tlsf_allocabs(struct MemHeaderExt * mhe, IPTR size, void * ptr)
708 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
709 UBYTE *region_start;
710 UBYTE *region_end;
712 int fl, sl;
713 IPTR sz = ROUNDUP(size);
715 D(nbug("[TLSF] allocabs(%p, %ld)\n", ptr, size));
717 region_start = ptr;
718 region_end = (UBYTE *)ptr + sz;
720 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
721 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
723 /* Start searching here. It doesn't make sense to go through regions which are smaller */
724 MAPPING_SEARCH(&sz, &fl, &sl);
726 /* Start looking now :) */
727 for (; fl < MAX_FLI; fl++)
729 for (; sl < MAX_SLI; sl++)
731 bhdr_t *b0 = tlsf->matrix[fl][sl];
733 /* If block was there, check it */
734 while (b0)
736 bhdr_t *b1 = GET_NEXT_BHDR(b0, GET_SIZE(b0));
738 /* The block has to contain _whole_ requested region, max exceed it in size though */
739 if (b0->mem <= region_start && (UBYTE *)b1 >= region_end)
741 /* block header of requested region */
742 bhdr_t *breg = MEM_TO_BHDR(ptr);
745 This is the block we're looking for. Unchain it from the bidirectional list of
746 free blocks now.
748 Previous entry's next will point to this block's next. If previous is NULL, matrix
749 will be set to block's next
751 if (b0->free_node.prev)
752 b0->free_node.prev->free_node.next = b0->free_node.next;
753 else
754 tlsf->matrix[fl][sl] = b0->free_node.next;
757 Next entry's prev will point to this block's previous.
759 if (b0->free_node.next)
760 b0->free_node.next->free_node.prev = b0->free_node.prev;
762 /* Empty SL matrix for size j? Clear bit */
763 if (!tlsf->matrix[fl][sl])
765 ClrBit(sl, &tlsf->slbitmap[fl]);
767 /* Empty entire SL matrix for given FL index? clear that bit too */
768 if (tlsf->slbitmap[fl])
769 ClrBit(fl, &tlsf->flbitmap);
772 b0->free_node.prev = NULL;
773 b0->free_node.next = NULL;
774 SET_BUSY_BLOCK(b0);
777 At this point the block is removed from free list and marked as used.
778 Now, split it if necessary...
781 /* begin of the block != begin of the block header of requested region? */
782 if (b0 != breg)
785 Adjust region's block header. Mark in size that previous (aka b0) is free.
786 Reduce the size of b0 as well as size of breg too.
788 breg->header.prev = b0;
789 SET_SIZE_AND_FLAGS(breg, GET_SIZE(b0)-((IPTR)breg - (IPTR)b0), PREV_FREE | THIS_BUSY);
791 /* Update the next block. Mark in size that previous (breg) is used */
792 b1->header.prev = breg;
793 SET_BUSY_PREV_BLOCK(b1);
795 /* b0's prev state is keept. b0 itself is marked as free block */
796 SET_FREE_BLOCK(b0);
797 SET_SIZE(b0, (IPTR)breg - (IPTR)b0->mem);
799 /* Insert b0 to free list */
800 MAPPING_INSERT(GET_SIZE(b0), &fl, &sl);
801 INSERT_FREE_BLOCK(tlsf, b0);
804 /* Is it necessary to split the requested region at the end? */
805 if ((SIZE_ALIGN + GET_SIZE(breg)) > size)
807 IPTR tmp_size = GET_SIZE(breg) - size - SIZE_ALIGN;
809 /* New region header directly at end of the requested region */
810 bhdr_t *b2 = GET_NEXT_BHDR(breg, size);
812 /* Adjust fields */
813 b2->header.prev = breg;
814 SET_SIZE_AND_FLAGS(b2, tmp_size, PREV_BUSY | THIS_FREE);
816 /* requested region's size is now smaller */
817 SET_SIZE(breg, size);
819 /* The next block header point to newly created one */
820 b1->header.prev = b2;
821 SET_FREE_PREV_BLOCK(b1);
823 /* Insert newly created block to free list */
824 MAPPING_INSERT(GET_SIZE(b2), &fl, &sl);
825 INSERT_FREE_BLOCK(tlsf, b2);
828 tlsf->free_size -= GET_SIZE(breg);
831 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
832 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
834 return breg->mem;
837 b0 = b0->free_node.next;
842 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
843 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
845 return NULL;
848 /* Allocation of headers in memory:
849 * hdr
850 * header (ROUNDUP(sizeof(hdr_t))
851 * mem (ROUNDUP(sizeof(tlst_area_t))
853 * header (ROUNDUP(sizeof(hdr_t))
854 * free space (size - HEADERS_SIZE)
855 * bend
856 * header (ROUNDUP(sizeof(hdr_t))
858 tlsf_area_t * init_memory_area(void * memory, IPTR size)
860 bhdr_t * hdr = (bhdr_t *)memory;
861 bhdr_t * b;
862 bhdr_t * bend;
864 tlsf_area_t * area;
866 size = ROUNDDOWN(size);
868 /* Prepare first header, which protects the tlst_area_t header */
869 hdr->header.length = ROUNDUP(sizeof(tlsf_area_t)) | THIS_BUSY | PREV_BUSY;
870 hdr->header.prev = NULL;
872 b = GET_NEXT_BHDR(hdr, ROUNDUP(sizeof(tlsf_area_t)));
873 b->header.prev = hdr;
874 b->header.length = (size - HEADERS_SIZE) | PREV_BUSY | THIS_BUSY;
876 bend = GET_NEXT_BHDR(b, GET_SIZE(b));
877 bend->header.length = 0 | THIS_BUSY | PREV_BUSY;
878 bend->header.prev = b;
880 area = (tlsf_area_t *)hdr->mem;
881 area->end = bend;
883 return area;
886 void tlsf_add_memory(struct MemHeaderExt *mhe, void *memory, IPTR size)
888 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
890 D(nbug("tlsf_add_memory(%p, %p, %d)\n", tlsf, memory, size));
892 if (memory && size > HEADERS_SIZE)
894 tlsf_area_t *area = init_memory_area(memory, size);
895 bhdr_t *b;
897 D(nbug(" adding memory\n"));
899 area->next = tlsf->memory_area;
900 tlsf->memory_area = area;
902 /* User added memory. Not autogrown */
903 area->autogrown = 0;
905 b = MEM_TO_BHDR(area);
906 b = GET_NEXT_BHDR(b, GET_SIZE(b));
908 tlsf->total_size += size;
910 D(nbug(" total_size=%08x\n", tlsf->total_size));
912 /* Add the initialized memory */
913 tlsf_freevec(mhe, b->mem);
917 void tlsf_add_memory_and_merge(struct MemHeaderExt *mhe, void *memory, IPTR size)
919 tlsf_add_memory(mhe, memory, size);
920 // TODO: add memory and merge...
923 #if 0
924 void bzero(void *ptr, IPTR len)
926 UBYTE *p = (UBYTE *)ptr;
928 while(len--)
929 *p++ = 0;
931 #endif
933 void * tlsf_init(struct MemHeaderExt * mhe)
935 tlsf_t *tlsf = NULL;
936 void * ptr = mhe->mhe_MemHeader.mh_Lower;
938 /* if MemHeaderExt starts at the beginning of handled memory, advance the ptr */
939 if (mhe == ptr)
940 ptr += ROUNDUP(sizeof(struct MemHeaderExt));
942 /* Is there enough room for tlsf in the mem header itself? */
943 if (mhe->mhe_MemHeader.mh_Free >= (ROUNDUP(sizeof(tlsf_t)) + 3 * ROUNDUP(sizeof(bhdr_t))))
945 /* tlsf will be stored inside handled memory */
946 tlsf = (tlsf_t *)ptr;
948 ptr += ROUNDUP(sizeof(tlsf_t));
950 bzero(tlsf, sizeof(tlsf_t));
951 tlsf->autodestroy_self = 0;
953 else
955 /* No place for tlsf header in MemHeaderExt? Allocate it separately */
956 tlsf = AllocMem(sizeof(tlsf_t), MEMF_ANY);
958 if (tlsf)
960 bzero(tlsf, sizeof(tlsf_t));
961 tlsf->autodestroy_self = 1;
965 /* Store the tlsf pointer in UserData field */
966 mhe->mhe_UserData = tlsf;
968 if (tlsf && ptr < mhe->mhe_MemHeader.mh_Upper)
970 tlsf_add_memory(mhe, ptr, (IPTR)mhe->mhe_MemHeader.mh_Upper - (IPTR)ptr);
973 return tlsf;
976 static void * tlsf_init_autogrow(struct MemHeaderExt * mhe, IPTR puddle_size, ULONG requirements, autogrow_get grow_function, autogrow_release release_function, APTR autogrow_data)
978 tlsf_t *tlsf = tlsf_init(mhe);
980 if (tlsf)
982 if (puddle_size < 4096)
983 puddle_size = 4096;
985 tlsf->autogrow_puddle_size = puddle_size;
986 tlsf->autogrow_requirements = requirements;
987 tlsf->autogrow_data = autogrow_data;
988 tlsf->autogrow_get_fn = grow_function;
989 tlsf->autogrow_release_fn = release_function;
992 return tlsf;
995 void tlsf_destroy(struct MemHeaderExt * mhe)
997 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
999 D(nbug("tlsf_destroy(%p)\n", tlsf));
1001 if (tlsf)
1003 tlsf_area_t *area = tlsf->memory_area;
1005 if (tlsf->autogrow_release_fn)
1007 while(area)
1009 tlsf_area_t *next = area->next;
1012 Autogrown area? Release it here.
1013 Otherwise it's the responsibility of add_memory_area caller
1015 if (area->autogrown)
1016 tlsf_release_memory_area(mhe, area);
1018 area = next;
1022 if (tlsf->autodestroy_self)
1023 FreeMem(tlsf, sizeof(tlsf_t));
1027 IPTR tlsf_avail(struct MemHeaderExt * mhe, ULONG requirements)
1029 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1030 IPTR ret = 0;
1032 if (requirements & MEMF_TOTAL)
1033 ret = tlsf->total_size;
1034 else if (requirements & MEMF_LARGEST)
1036 bhdr_t *b = NULL;
1038 if (tlsf->flbitmap)
1040 int fl = MS(tlsf->flbitmap);
1042 if (tlsf->slbitmap[fl])
1044 int sl = MS(tlsf->slbitmap[fl]);
1046 b = tlsf->matrix[fl][sl];
1050 while (b)
1052 if (GET_SIZE(b) > ret)
1053 ret = GET_SIZE(b);
1055 b = b->free_node.next;
1058 else
1059 ret = tlsf->free_size;
1061 return ret;
1064 BOOL tlsf_in_bounds(struct MemHeaderExt * mhe, void * begin, void * end)
1066 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1067 tlsf_area_t *area;
1069 area = tlsf->memory_area;
1071 D(nbug("tlsf_in_bounds(%p, %p, %p)\n", tlsf, begin, end));
1073 while (area)
1075 D(nbug(" area %p\n"));
1077 * Do checks only if questioned memory ends before the end (sentinel bhdr)
1078 * of area
1080 if ((IPTR)end <= (IPTR)area->end)
1082 D(nbug(" end <= area->end (%p <= %p)\n", end, area->end));
1084 /* Get the bhdr of this area */
1085 bhdr_t *b = MEM_TO_BHDR(area);
1087 /* Forward to the begin of the memory */
1088 b = GET_NEXT_BHDR(b, GET_SIZE(b));
1090 /* requested memory starts at begin or after begin of the area */
1091 if ((IPTR)begin >= (IPTR)b->mem)
1093 D(nbug(" begin >= b->mem (%p >= %p)\n", begin, b->mem));
1094 return TRUE;
1098 area = area->next;
1101 return FALSE;
1105 static void destroy_Pool(struct MemHeaderExt *mhe)
1107 tlsf_destroy(mhe);
1110 static APTR fetch_more_ram(void * data, IPTR *size)
1112 struct MemHeaderExt *mhe = (struct MemHeaderExt *)data;
1113 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1115 D(nbug("[TLSF] fetch_more_ram(%p, %d)\n", mhe, *size));
1117 APTR ptr = AllocMem(*size, tlsf->autogrow_requirements);
1118 return ptr;
1121 static VOID release_ram(void * data, APTR ptr, IPTR size)
1123 D(nbug("[TLSF] release_ram(%p, %d)\n", ptr, size));
1125 FreeMem(ptr, size);
1128 static void * init_Pool(struct MemHeaderExt *mhe, IPTR puddleSize, IPTR initialSize)
1130 return tlsf_init_autogrow(mhe, puddleSize, (ULONG)mhe->mhe_MemHeader.mh_First, fetch_more_ram, release_ram, mhe);
1133 void krnCreateTLSFMemHeader(CONST_STRPTR name, BYTE pri, APTR start, IPTR size, ULONG flags)
1135 /* If the end is less than (1 << 31), MEMF_31BIT is implied */
1136 if (((IPTR)start+size) < (1UL << 31))
1137 flags |= MEMF_31BIT;
1138 else
1139 flags &= ~MEMF_31BIT;
1141 flags |= MEMF_MANAGED;
1143 struct MemHeaderExt *mhe = start;
1145 mhe->mhe_Magic = MEMHEADER_EXT_MAGIC;
1147 mhe->mhe_DestroyPool = destroy_Pool;
1148 mhe->mhe_InitPool = init_Pool;
1150 mhe->mhe_Alloc = tlsf_malloc;
1151 mhe->mhe_AllocVec = tlsf_malloc;
1152 mhe->mhe_Free = tlsf_freemem;
1153 mhe->mhe_FreeVec = tlsf_freevec;
1154 mhe->mhe_AllocAbs = tlsf_allocabs;
1155 mhe->mhe_ReAlloc = tlsf_realloc;
1156 mhe->mhe_Avail = tlsf_avail;
1157 mhe->mhe_InBounds = tlsf_in_bounds;
1159 mhe->mhe_MemHeader.mh_Node.ln_Succ = NULL;
1160 mhe->mhe_MemHeader.mh_Node.ln_Pred = NULL;
1161 mhe->mhe_MemHeader.mh_Node.ln_Type = NT_MEMORY;
1162 mhe->mhe_MemHeader.mh_Node.ln_Name = (STRPTR)name;
1163 mhe->mhe_MemHeader.mh_Node.ln_Pri = pri;
1164 mhe->mhe_MemHeader.mh_Attributes = flags;
1165 /* The first MemChunk needs to be aligned. We do it by adding MEMHEADER_TOTAL. */
1166 mhe->mhe_MemHeader.mh_First = NULL;
1168 mhe->mhe_UserData = NULL;
1171 * mh_Lower and mh_Upper are informational only. Since our MemHeader resides
1172 * inside the region it describes, the region includes MemHeader.
1174 mhe->mhe_MemHeader.mh_Lower = start;
1175 mhe->mhe_MemHeader.mh_Upper = start + size;
1176 mhe->mhe_MemHeader.mh_Free = size;
1178 tlsf_init(mhe);