kernel.resource: for AllocAbs, iterate over all higher order levels.
[AROS.git] / rom / kernel / tlsf.c
blob77c3282e348294f876ff0ff702bc3ca9a3957c1c
1 /*
2 Copyright © 1995-2014, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <exec/types.h>
7 #include <exec/memory.h>
8 #include <exec/memheaderext.h>
9 #include <proto/exec.h>
10 #include <string.h>
12 #include "tlsf.h"
13 #include "kernel_base.h"
14 #include "kernel_debug.h"
16 #define D(x)
18 #undef USE_MACROS
20 #include <stddef.h>
22 * Minimal alignment as required by AROS. In contrary to the default
23 * TLSF implementation, we do not allow smaller blocks here.
24 * Size needs to be aligned to at least 8, see THIS_FREE_MASK comment.
26 #define SIZE_ALIGN AROS_WORSTALIGN
29 * Settings for TLSF allocator:
30 * MAX_LOG2_SLI - amount of bits used for the second level list
31 * MAX_FLI - maximal allowable allocation size - 2^32 should be enough on 32bit systems
32 * 64bit systems use 128GB limit.
34 #define MAX_LOG2_SLI (5)
35 #define MAX_SLI (1 << MAX_LOG2_SLI)
36 #if __WORDSIZE == 64
37 #define MAX_FLI (32+5)
38 #else
39 #define MAX_FLI (32)
40 #endif
41 #define FLI_OFFSET (6)
42 #define SMALL_BLOCK (2 << FLI_OFFSET)
44 #define REAL_FLI (MAX_FLI - FLI_OFFSET)
46 #define ROUNDUP(x) (((x) + SIZE_ALIGN - 1) & ~(SIZE_ALIGN - 1))
47 #define ROUNDDOWN(x) ((x) & ~(SIZE_ALIGN - 1))
49 /* Fields used in the block header length field to identify busy/free blocks */
50 #define THIS_FREE_MASK (IPTR)1
51 #define THIS_FREE (IPTR)1
52 #define THIS_BUSY (IPTR)0
54 #define PREV_FREE_MASK (IPTR)2
55 #define PREV_FREE (IPTR)2
56 #define PREV_BUSY (IPTR)0
58 #define SIZE_MASK (~(THIS_FREE_MASK | PREV_FREE_MASK))
60 #define likely(x) __builtin_expect(!!(x), 1)
61 #define unlikely(x) __builtin_expect(!!(x), 0)
63 /* Size of additional memory needed to manage new block */
64 #define HEADERS_SIZE (((3 * ROUNDUP(sizeof(hdr_t))) + ROUNDUP(sizeof(tlsf_area_t))))
66 /* free node links together all free blocks if similar size */
67 typedef struct free_node_s {
68 struct bhdr_s * prev;
69 struct bhdr_s * next;
70 } free_node_t;
72 /* block header in front of each block - both free and busy */
73 typedef struct hdr_s {
74 struct bhdr_s * prev;
75 IPTR length;
76 } hdr_t;
79 * Each block is defined by bhdr_t structure. Free blocks contain only
80 * the header which allows us to go through all memory blocks in the system.
81 * The free blocks contain additionally the node which chains them in one
82 * of the free block lists
84 typedef struct bhdr_s {
85 union {
86 hdr_t header;
87 UBYTE __min_align[SIZE_ALIGN];
89 union {
90 UBYTE mem[1];
91 free_node_t free_node;
93 } bhdr_t;
95 /* Memory area within the TLSF pool */
96 typedef struct tlsf_area_s {
97 struct tlsf_area_s * next; // Next memory area
98 bhdr_t * end; // Pointer to "end-of-area" block header
99 LONG autogrown; // Automatically allocated by TLSF pool
100 } tlsf_area_t;
102 typedef struct {
103 tlsf_area_t * memory_area;
105 IPTR total_size;
106 IPTR free_size;
108 ULONG flbitmap;
109 ULONG slbitmap[REAL_FLI];
111 IPTR autogrow_puddle_size;
112 ULONG autogrow_requirements;
113 APTR autogrow_data;
114 autogrow_get autogrow_get_fn;
115 autogrow_release autogrow_release_fn;
117 UBYTE autodestroy_self;
119 bhdr_t * matrix[REAL_FLI][MAX_SLI];
120 } tlsf_t;
122 static inline __attribute__((always_inline)) int LS(IPTR i)
124 if (sizeof(IPTR) == 4)
125 return __builtin_ffs(i) - 1;
126 else
127 return __builtin_ffsl(i) - 1;
130 static inline __attribute__((always_inline)) int MS(IPTR i)
132 if (sizeof(IPTR) == 4)
133 return 31 - __builtin_clz(i);
134 else
135 return 63 - __builtin_clzl(i);
138 static inline __attribute__((always_inline)) void SetBit(int nr, ULONG *ptr)
140 ptr[nr >> 5] |= (1 << (nr & 31));
143 static inline __attribute__((always_inline)) void ClrBit(int nr, ULONG *ptr)
145 ptr[nr >> 5] &= ~(1 << (nr & 31));
148 static inline __attribute__((always_inline)) void MAPPING_INSERT(IPTR r, int *fl, int *sl)
150 if (r < SMALL_BLOCK)
152 *fl = 0;
153 *sl = (int)(r / (SMALL_BLOCK / MAX_SLI));
155 else
157 *fl = MS(r);
158 *sl = (int)(((IPTR)r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI);
159 *fl -= FLI_OFFSET;
163 static inline __attribute__((always_inline)) void MAPPING_SEARCH(IPTR *r, int *fl, int *sl)
165 if (*r < SMALL_BLOCK)
167 *fl = 0;
168 *sl = (int)(*r / (SMALL_BLOCK / MAX_SLI));
170 else
172 IPTR tmp = ((IPTR)1 << (MS(*r) - MAX_LOG2_SLI)) - 1;
173 IPTR tr = *r + tmp;
175 *fl = MS(tr);
176 *sl = (int)(((IPTR)tr >> (*fl - MAX_LOG2_SLI)) - MAX_SLI);
177 *fl -= FLI_OFFSET;
178 *r = tr & ~tmp;
182 static inline __attribute__((always_inline)) bhdr_t * FIND_SUITABLE_BLOCK(tlsf_t *tlsf, int *fl, int *sl)
184 IPTR bitmap_tmp = tlsf->slbitmap[*fl] & (~0 << *sl);
185 bhdr_t *b = NULL;
187 if (bitmap_tmp)
189 *sl = LS(bitmap_tmp);
190 b = tlsf->matrix[*fl][*sl];
192 else
194 bitmap_tmp = tlsf->flbitmap & (~0 << (*fl + 1));
195 if (likely(bitmap_tmp != 0))
197 *fl = LS(bitmap_tmp);
198 *sl = LS(tlsf->slbitmap[*fl]);
199 b = tlsf->matrix[*fl][*sl];
203 return b;
207 #ifdef USE_MACROS
209 #define GET_SIZE(b) ({ IPTR size = b->header.length & SIZE_MASK; size; })
210 #define GET_FLAGS(b) ({ IPTR flags = b->header.length & (THIS_FREE_MASK | PREV_FREE_MASK); flags; })
211 #define SET_SIZE(b, size) do{ b->header.length = GET_FLAGS(b) | (size); }while(0)
212 #define SET_FLAGS(b, flags) do{ b->header.length = GET_SIZE(b) | (flags); }while(0)
213 #define SET_SIZE_AND_FLAGS(b, size, flags) do{b->header.length = (size) | (flags);}while(0)
214 #define FREE_BLOCK(b) ((b->header.length & THIS_FREE_MASK) == THIS_FREE)
215 #define SET_FREE_BLOCK(b) do{b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_FREE;}while(0)
216 #define SET_BUSY_BLOCK(b) do{b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_BUSY;}while(0)
217 #define SET_FREE_PREV_BLOCK(b) do{b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_FREE;}while(0)
218 #define SET_BUSY_PREV_BLOCK(b) do{b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_BUSY;}while(0)
219 #define FREE_PREV_BLOCK(b) ((b->header.length & PREV_FREE_MASK) == PREV_FREE)
220 #define GET_NEXT_BHDR(hdr, size) ({ bhdr_t * __b = (bhdr_t *)((UBYTE *)&hdr->mem[0] + (size)); __b; })
221 #define MEM_TO_BHDR(ptr) ({ bhdr_t * b = (bhdr_t*)((void*)(ptr) - offsetof(bhdr_t, mem)); b; })
223 #define REMOVE_HEADER(tlsf, b, fl, sl) do{ \
224 if (b->free_node.next) \
225 b->free_node.next->free_node.prev = b->free_node.prev; \
226 if (b->free_node.prev) \
227 b->free_node.prev->free_node.next = b->free_node.next; \
228 if (tlsf->matrix[fl][sl] == b) { \
229 tlsf->matrix[fl][sl] = b->free_node.next; \
230 if (!tlsf->matrix[fl][sl]) \
231 ClrBit(sl, &tlsf->slbitmap[fl]); \
232 if (!tlsf->slbitmap[fl]) \
233 ClrBit(fl, &tlsf->flbitmap); \
234 } } while(0)
236 #define INSERT_FREE_BLOCK(tlsf, b) do { \
237 int fl, sl; MAPPING_INSERT(GET_SIZE(b), &fl, &sl); \
238 b->free_node.prev = NULL; \
239 b->free_node.next = tlsf->matrix[fl][sl]; \
240 if (tlsf->matrix[fl][sl]) \
241 tlsf->matrix[fl][sl]->free_node.prev = b; \
242 tlsf->matrix[fl][sl] = b; \
243 SetBit(fl, &tlsf->flbitmap); \
244 SetBit(sl, &tlsf->slbitmap[fl]); }while(0)
246 #else
248 static inline __attribute__((always_inline)) IPTR GET_SIZE(bhdr_t *b)
250 return b->header.length & SIZE_MASK;
253 static inline __attribute__((always_inline)) IPTR GET_FLAGS(bhdr_t *b)
255 return b->header.length & (THIS_FREE_MASK | PREV_FREE_MASK);
258 static inline __attribute__((always_inline)) void SET_SIZE(bhdr_t *b, IPTR size)
260 b->header.length = GET_FLAGS(b) | size;
263 static inline __attribute__((always_inline)) void SET_SIZE_AND_FLAGS(bhdr_t *b, IPTR size, IPTR flags)
265 b->header.length = size | flags;
268 static inline __attribute__((always_inline)) int FREE_BLOCK(bhdr_t *b)
270 return ((b->header.length & THIS_FREE_MASK) == THIS_FREE);
273 static inline __attribute__((always_inline)) void SET_FREE_BLOCK(bhdr_t *b)
275 b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_FREE;
278 static inline __attribute__((always_inline)) void SET_BUSY_BLOCK(bhdr_t *b)
280 b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_BUSY;
283 static inline __attribute__((always_inline)) void SET_FREE_PREV_BLOCK(bhdr_t *b)
285 b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_FREE;
288 static inline __attribute__((always_inline)) void SET_BUSY_PREV_BLOCK(bhdr_t *b)
290 b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_BUSY;
293 static inline __attribute__((always_inline)) int FREE_PREV_BLOCK(bhdr_t *b)
295 return ((b->header.length & PREV_FREE_MASK) == PREV_FREE);
298 static inline __attribute__((always_inline)) bhdr_t * GET_NEXT_BHDR(bhdr_t *hdr, IPTR size)
300 return (bhdr_t *)((UBYTE *)&hdr->mem[0] + size);
303 static inline __attribute__((always_inline)) bhdr_t * MEM_TO_BHDR(void *ptr)
305 return (bhdr_t *)(ptr - offsetof(bhdr_t, mem));
308 static inline __attribute__((always_inline)) void REMOVE_HEADER(tlsf_t *tlsf, bhdr_t *b, int fl, int sl)
310 if (b->free_node.next)
311 b->free_node.next->free_node.prev = b->free_node.prev;
312 if (b->free_node.prev)
313 b->free_node.prev->free_node.next = b->free_node.next;
315 if (tlsf->matrix[fl][sl] == b)
317 tlsf->matrix[fl][sl] = b->free_node.next;
318 if (!tlsf->matrix[fl][sl])
319 ClrBit(sl, &tlsf->slbitmap[fl]);
320 if (!tlsf->slbitmap[fl])
321 ClrBit(fl, &tlsf->flbitmap);
325 static inline __attribute__((always_inline)) void INSERT_FREE_BLOCK(tlsf_t *tlsf, bhdr_t *b)
327 int fl, sl;
329 MAPPING_INSERT(GET_SIZE(b), &fl, &sl);
331 b->free_node.prev = NULL;
332 b->free_node.next = tlsf->matrix[fl][sl];
334 if (tlsf->matrix[fl][sl])
335 tlsf->matrix[fl][sl]->free_node.prev = b;
337 tlsf->matrix[fl][sl] = b;
339 SetBit(fl, &tlsf->flbitmap);
340 SetBit(sl, &tlsf->slbitmap[fl]);
343 #endif /* USE_MACROS */
345 void * tlsf_malloc(struct MemHeaderExt *mhe, IPTR size, ULONG *flags)
347 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
348 int fl, sl;
349 bhdr_t *b = NULL;
351 size = ROUNDUP(size);
353 if (unlikely(!size)) return NULL;
355 D(nbug("[Kernel:TLSF] %s(%p, %ld)\n", __PRETTY_FUNCTION__, tlsf, size));
357 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
358 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
360 /* Find the indices fl and sl for given size */
361 MAPPING_SEARCH(&size, &fl, &sl);
363 /* Find block of either the right size or larger */
364 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
366 D(nbug("[Kernel:TLSF] %s: adjusted size %ld\n", __PRETTY_FUNCTION__, size));
368 /* No block found? Either failure or tlsf will get more memory. */
369 if (unlikely(!b))
371 D(nbug("[Kernel:TLSF] %s: out of memory\n", __PRETTY_FUNCTION__));
373 /* Do we have the autogrow feature? */
374 if (tlsf->autogrow_get_fn)
376 /* Increase the size of requested block so that we can fit the headers too */
377 IPTR sz = size + HEADERS_SIZE;
379 /* Requested size less than puddle size? Get puddle size then */
380 if (sz < tlsf->autogrow_puddle_size)
381 sz = tlsf->autogrow_puddle_size;
383 D(nbug("[Kernel:TLSF] %s: querying for %u bytes\n", __PRETTY_FUNCTION__, sz));
385 /* Try to get some memory */
386 void * ptr = tlsf->autogrow_get_fn(tlsf->autogrow_data, &sz);
388 /* Got it? Add to tlsf then */
389 if (ptr)
391 tlsf_add_memory(mhe, ptr, sz);
393 /* We know the newly added memory is first in the list. Set the autogrown feature there */
394 tlsf->memory_area->autogrown = 1;
396 /* Memory is there. Try to find the block again */
397 MAPPING_SEARCH(&size, &fl, &sl);
398 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
402 /* No block? FAILURE! */
403 if (!b)
405 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
406 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
408 return NULL;
412 /* Next header */
413 bhdr_t *next = GET_NEXT_BHDR(b, GET_SIZE(b));
415 /* Remove the found block from the free list */
416 REMOVE_HEADER(tlsf, b, fl, sl);
418 /* Is this block larger then requested? Try to split it then */
419 if (likely(GET_SIZE(b) > (size + ROUNDUP(sizeof(hdr_t)))))
421 /* New split block */
422 bhdr_t *sb = GET_NEXT_BHDR(b, size);
423 sb->header.prev = b;
425 /* Set size, this free and previous busy */
426 SET_SIZE_AND_FLAGS(sb, GET_SIZE(b) - size - ROUNDUP(sizeof(hdr_t)), THIS_FREE | PREV_BUSY);
428 /* The next header points to free block now */
429 next->header.prev = sb;
431 /* previous block (sb) is free */
432 SET_FREE_PREV_BLOCK(next);
434 /* Allocated block size truncated */
435 SET_SIZE(b, size);
437 D(nbug("[Kernel:TLSF] %s: block split, %ld bytes remaining\n", __PRETTY_FUNCTION__, GET_SIZE(sb)));
438 /* Free block is inserted to free list */
439 INSERT_FREE_BLOCK(tlsf, sb);
441 else
443 /* The block was of right size. Set it just busy in next pointer */
444 SET_BUSY_PREV_BLOCK(next);
447 /* The allocated block is busy */
448 SET_BUSY_BLOCK(b);
450 /* Clear the pointers just in case */
451 b->free_node.next = NULL;
452 b->free_node.prev = NULL;
454 /* Update counters */
455 tlsf->free_size -= GET_SIZE(b);
456 mhe->mhe_MemHeader.mh_Free = tlsf->free_size;
458 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
459 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
461 if (flags && (*flags & MEMF_CLEAR))
462 bzero(&b->mem[0], size);
464 /* And return memory */
465 return &b->mem[0];
468 static inline __attribute__((always_inline)) void MERGE(bhdr_t *b1, bhdr_t *b2)
470 /* Merging adjusts the size - it's sum of both sizes plus size of block header */
471 SET_SIZE(b1, GET_SIZE(b1) + GET_SIZE(b2) + ROUNDUP(sizeof(hdr_t)));
474 static inline __attribute__((always_inline)) bhdr_t * MERGE_PREV(tlsf_t *tlsf, bhdr_t *block)
476 /* Is previous block free? */
477 if (FREE_PREV_BLOCK(block))
479 int fl, sl;
480 bhdr_t *prev = block->header.prev;
482 /* Calculate index for removal */
483 MAPPING_INSERT(GET_SIZE(prev), &fl, &sl);
485 /* Do remove the header from the list */
486 REMOVE_HEADER(tlsf, prev, fl, sl);
488 /* Merge */
489 MERGE(prev, block);
491 return prev;
493 else
494 return block;
497 static inline __attribute__((always_inline)) bhdr_t * MERGE_NEXT(tlsf_t *tlsf, bhdr_t *block)
499 bhdr_t *next = GET_NEXT_BHDR(block, GET_SIZE(block));
501 /* Is next block free? */
502 if (FREE_BLOCK(next))
504 int fl, sl;
506 /* Calculate index for removal */
507 MAPPING_INSERT(GET_SIZE(next), &fl, &sl);
509 /* Remove the header from the list */
510 REMOVE_HEADER(tlsf, next, fl, sl);
512 /* merge blocks */
513 MERGE(block, next);
516 return block;
519 static void tlsf_release_memory_area(struct MemHeaderExt * mhe, tlsf_area_t * area)
521 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
522 tlsf_area_t *p = (tlsf_area_t *)(&tlsf->memory_area - offsetof(tlsf_area_t, next));
523 bhdr_t *b;
524 void *begin;
525 void *end;
526 IPTR size;
528 /* get the begin of this area */
529 begin = MEM_TO_BHDR(area);
531 /* get sentinel block */
532 b = area->end;
534 /* end of this area is end of sentinel block */
535 end = GET_NEXT_BHDR(b, 0);
537 /* calculate the size of area */
538 size = (IPTR)end - (IPTR)begin;
540 /* update counters */
541 tlsf->total_size -= size;
542 tlsf->free_size -= GET_SIZE(area->end->header.prev);
544 /* remove area from list */
545 for (;p->next != NULL; p = p->next)
546 if (p->next == area)
548 p->next = area->next;
549 break;
552 /* release */
553 if (tlsf->autogrow_release_fn)
554 tlsf->autogrow_release_fn(tlsf->autogrow_data, begin, size);
557 void * tlsf_malloc_aligned(struct MemHeaderExt *mhe, IPTR size, IPTR align, ULONG *flags)
559 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
560 void * ptr;
561 bhdr_t *b;
563 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
564 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
566 size = ROUNDUP(size);
568 D(nbug("[Kernel:TLSF] %s(%p, %lx, %u)\n", __PRETTY_FUNCTION__, mhe, size, align));
570 /* Adjust align to the top nearest power of two */
571 align = 1 << MS(align);
573 D(nbug("[Kernel:TLSF] %s: adjusted align = %u\n", __PRETTY_FUNCTION__, align));
575 ptr = tlsf_malloc(mhe, size+align, flags);
576 b = MEM_TO_BHDR(ptr);
578 D(nbug("[Kernel:TLSF] %s: allocated region @%p\n", __PRETTY_FUNCTION__, ptr));
580 if (align > SIZE_ALIGN)
582 void *aligned_ptr = (void *)(((IPTR)ptr + align - 1) & ~(align - 1));
583 bhdr_t *aligned_bhdr = MEM_TO_BHDR(aligned_ptr);
584 IPTR diff_begin = (IPTR)aligned_bhdr - (IPTR)b;
585 IPTR diff_end = (IPTR)GET_NEXT_BHDR(b, GET_SIZE(b)) - (IPTR)GET_NEXT_BHDR(aligned_bhdr, size);
587 SET_SIZE(aligned_bhdr, size);
589 if (aligned_ptr != ptr)
591 D(nbug("[Kernel:TLSF] %s: aligned ptr: %p\n", __PRETTY_FUNCTION__, aligned_ptr));
592 D(nbug("[Kernel:TLSF] %s: difference begin: %d\n", __PRETTY_FUNCTION__, diff_begin));
593 D(nbug("[Kernel:TLSF] %s: difference end: %d\n", __PRETTY_FUNCTION__, diff_end));
595 if (diff_begin > 0)
597 SET_SIZE(b, diff_begin - ROUNDUP(sizeof(hdr_t)));
599 tlsf->free_size += GET_SIZE(b);
601 aligned_bhdr->header.prev = b;
602 SET_FREE_PREV_BLOCK(aligned_bhdr);
603 SET_FREE_BLOCK(b);
605 b = MERGE_PREV(tlsf, b);
607 D(nbug("[Kernel:TLSF] %s: block @%p, b->next %p\n", __PRETTY_FUNCTION__, b, GET_NEXT_BHDR(b, GET_SIZE(b))));
609 /* Insert free block into the proper list */
610 INSERT_FREE_BLOCK(tlsf, b);
613 ptr = &aligned_bhdr->mem[0];
616 if (diff_end > 0)
618 bhdr_t *b1 = GET_NEXT_BHDR(aligned_bhdr, GET_SIZE(aligned_bhdr));
619 bhdr_t *next;
621 b1->header.prev = aligned_bhdr;
623 SET_SIZE(b1, diff_end - ROUNDUP(sizeof(hdr_t)));
624 SET_BUSY_PREV_BLOCK(b1);
625 SET_FREE_BLOCK(b1);
627 next = GET_NEXT_BHDR(b1, GET_SIZE(b1));
628 next->header.prev = b1;
629 SET_FREE_PREV_BLOCK(next);
631 b1 = MERGE_NEXT(tlsf, b1);
633 INSERT_FREE_BLOCK(tlsf, b1);
638 bhdr_t *b2 = b;
639 while(b2 && GET_SIZE(b2))
641 nbug("[Kernel:TLSF] %s: bhdr %p, mem %p, size=%08x, flags=%x, prev=%p\n",
642 __PRETTY_FUNCTION__, b2, &b2->mem[0], GET_SIZE(b2), GET_FLAGS(b2), b2->header.prev);
644 b2 = GET_NEXT_BHDR(b2, GET_SIZE(b2));
648 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
649 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
651 return ptr;
655 void tlsf_freevec(struct MemHeaderExt * mhe, APTR ptr)
657 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
658 bhdr_t *fb;
659 bhdr_t *next;
660 tlsf_area_t * area;
662 if (unlikely(!ptr))
663 return;
665 fb = MEM_TO_BHDR(ptr);
667 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
668 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
670 /* Mark block as free */
671 SET_FREE_BLOCK(fb);
673 /* adjust free size field on tlsf */
674 tlsf->free_size += GET_SIZE(fb);
676 /* Try to merge with previous and next blocks (if free) */
677 fb = MERGE_PREV(tlsf, fb);
678 fb = MERGE_NEXT(tlsf, fb);
680 /* Tell next block that previous one is free. Also update the prev link in case it changed */
681 next = GET_NEXT_BHDR(fb, GET_SIZE(fb));
682 SET_FREE_PREV_BLOCK(next);
683 next->header.prev = fb;
685 /* Check if this was the last used block of an autogrown area */
686 area = fb->header.prev->header.prev == NULL ? (tlsf_area_t *)fb->header.prev->mem : NULL;
687 if (area != NULL && area->end == next && area->autogrown == 1)
688 tlsf_release_memory_area(mhe, area);
689 else
691 /* Insert free block into the proper list */
692 INSERT_FREE_BLOCK(tlsf, fb);
695 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
696 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
699 void tlsf_freemem(struct MemHeaderExt * mhe, APTR ptr, IPTR size)
701 (void)size;
702 tlsf_freevec(mhe, ptr);
705 void * tlsf_realloc(struct MemHeaderExt *mhe, APTR ptr, IPTR new_size)
707 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
708 bhdr_t *b;
709 bhdr_t *bnext;
710 int fl;
711 int sl;
713 /* NULL pointer? just allocate the memory */
714 if (unlikely(!ptr))
715 return tlsf_malloc(mhe, new_size, NULL);
717 /* size = 0? free memory */
718 if (unlikely(!new_size))
720 tlsf_freevec(mhe, ptr);
721 return NULL;
724 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
725 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
727 new_size = ROUNDUP(new_size);
729 b = MEM_TO_BHDR(ptr);
731 if (unlikely(new_size == GET_SIZE(b)))
732 return ptr;
734 bnext = GET_NEXT_BHDR(b, GET_SIZE(b));
736 /* Is new size smaller than the previous one? Try to split the block if this is the case */
737 if (new_size <= (GET_SIZE(b)))
739 /* New header starts right after the current block b */
740 bhdr_t * b1 = GET_NEXT_BHDR(b, new_size);
742 /* Update pointer and size */
743 b1->header.prev = b;
744 SET_SIZE_AND_FLAGS(b1, GET_SIZE(b) - new_size - ROUNDUP(sizeof(hdr_t)), THIS_FREE | PREV_BUSY);
746 /* Current block gets smaller */
747 SET_SIZE(b, new_size);
749 tlsf->free_size += GET_SIZE(b1);
751 /* Try to merge with next block */
752 b1 = MERGE_NEXT(tlsf, b1);
754 /* Tell next block that previous one is free. Also update the prev link in case it changed */
755 bnext = GET_NEXT_BHDR(b1, GET_SIZE(b1));
756 SET_FREE_PREV_BLOCK(bnext);
757 bnext->header.prev = b1;
759 /* Insert free block into the proper list */
760 INSERT_FREE_BLOCK(tlsf, b1);
762 else
764 /* Is next block free? Is there enough free space? */
765 if (FREE_BLOCK(bnext) && new_size <= GET_SIZE(b) + GET_SIZE(bnext) + ROUNDUP(sizeof(hdr_t)))
767 bhdr_t *b1;
768 IPTR rest_size = ROUNDUP(sizeof(hdr_t)) + GET_SIZE(bnext) + GET_SIZE(b) - new_size;
770 MAPPING_INSERT(GET_SIZE(bnext), &fl, &sl);
772 REMOVE_HEADER(tlsf, bnext, fl, sl);
774 if (rest_size > ROUNDUP(sizeof(hdr_t)))
776 rest_size -= ROUNDUP(sizeof(hdr_t));
778 SET_SIZE(b, new_size);
780 b1 = GET_NEXT_BHDR(b, GET_SIZE(b));
781 b1->header.prev = b;
783 SET_SIZE_AND_FLAGS(b1, rest_size, THIS_FREE | PREV_BUSY);
785 bnext = GET_NEXT_BHDR(b1, GET_SIZE(b1));
786 bnext->header.prev = b1;
787 SET_FREE_PREV_BLOCK(bnext);
789 INSERT_FREE_BLOCK(tlsf, b1);
791 else
793 if (rest_size)
794 SET_SIZE(b, new_size + ROUNDUP(sizeof(hdr_t)));
795 else
796 SET_SIZE(b, new_size);
798 bnext = GET_NEXT_BHDR(b, GET_SIZE(b));
799 bnext->header.prev = b;
800 SET_BUSY_PREV_BLOCK(bnext);
803 else
805 /* Next block was not free. Create new buffer and copy old contents there */
806 void * p = tlsf_malloc(mhe, new_size, NULL);
807 if (p)
809 CopyMemQuick(ptr, p, GET_SIZE(b));
810 tlsf_freevec(mhe, ptr);
811 b = MEM_TO_BHDR(p);
816 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
817 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
819 return b->mem;
823 void * tlsf_allocabs(struct MemHeaderExt * mhe, IPTR size, void * ptr)
825 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
826 UBYTE *region_start;
827 UBYTE *region_end;
829 int fl, sl;
830 IPTR sz = ROUNDUP(size);
832 D(nbug("[Kernel:TLSF] %s(%p, %ld)\n", __PRETTY_FUNCTION__, ptr, size));
834 region_start = ptr;
835 region_end = (UBYTE *)ptr + sz;
837 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
838 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
840 /* Start searching here. It doesn't make sense to go through regions which are smaller */
841 MAPPING_SEARCH(&sz, &fl, &sl);
843 /* Start looking now :) */
844 for (; fl < MAX_FLI; fl++)
846 for (; sl < MAX_SLI; sl++)
848 bhdr_t *b0 = tlsf->matrix[fl][sl];
850 /* If block was there, check it */
851 while (b0)
853 bhdr_t *b1 = GET_NEXT_BHDR(b0, GET_SIZE(b0));
855 /* The block has to contain _whole_ requested region, max exceed it in size though */
856 if (b0->mem <= region_start && (UBYTE *)b1 >= region_end)
858 /* block header of requested region */
859 bhdr_t *breg = MEM_TO_BHDR(ptr);
862 This is the block we're looking for. Unchain it from the bidirectional list of
863 free blocks now.
865 Previous entry's next will point to this block's next. If previous is NULL, matrix
866 will be set to block's next
868 if (b0->free_node.prev)
869 b0->free_node.prev->free_node.next = b0->free_node.next;
870 else
871 tlsf->matrix[fl][sl] = b0->free_node.next;
874 Next entry's prev will point to this block's previous.
876 if (b0->free_node.next)
877 b0->free_node.next->free_node.prev = b0->free_node.prev;
879 /* Empty SL matrix for size j? Clear bit */
880 if (!tlsf->matrix[fl][sl])
882 ClrBit(sl, &tlsf->slbitmap[fl]);
884 /* Empty entire SL matrix for given FL index? clear that bit too */
885 if (tlsf->slbitmap[fl])
886 ClrBit(fl, &tlsf->flbitmap);
889 b0->free_node.prev = NULL;
890 b0->free_node.next = NULL;
891 SET_BUSY_BLOCK(b0);
894 At this point the block is removed from free list and marked as used.
895 Now, split it if necessary...
898 /* begin of the block != begin of the block header of requested region? */
899 if (b0 != breg)
902 Adjust region's block header. Mark in size that previous (aka b0) is free.
903 Reduce the size of b0 as well as size of breg too.
905 breg->header.prev = b0;
906 SET_SIZE_AND_FLAGS(breg, GET_SIZE(b0)-((IPTR)breg - (IPTR)b0), PREV_FREE | THIS_BUSY);
908 /* Update the next block. Mark in size that previous (breg) is used */
909 b1->header.prev = breg;
910 SET_BUSY_PREV_BLOCK(b1);
912 /* b0's prev state is keept. b0 itself is marked as free block */
913 SET_FREE_BLOCK(b0);
914 SET_SIZE(b0, (IPTR)breg - (IPTR)b0->mem);
916 /* Insert b0 to free list */
917 MAPPING_INSERT(GET_SIZE(b0), &fl, &sl);
918 INSERT_FREE_BLOCK(tlsf, b0);
921 /* Is it necessary to split the requested region at the end? */
922 if ((SIZE_ALIGN + GET_SIZE(breg)) > size)
924 IPTR tmp_size = GET_SIZE(breg) - size - SIZE_ALIGN;
926 /* New region header directly at end of the requested region */
927 bhdr_t *b2 = GET_NEXT_BHDR(breg, size);
929 /* Adjust fields */
930 b2->header.prev = breg;
931 SET_SIZE_AND_FLAGS(b2, tmp_size, PREV_BUSY | THIS_FREE);
933 /* requested region's size is now smaller */
934 SET_SIZE(breg, size);
936 /* The next block header point to newly created one */
937 b1->header.prev = b2;
938 SET_FREE_PREV_BLOCK(b1);
940 /* Insert newly created block to free list */
941 MAPPING_INSERT(GET_SIZE(b2), &fl, &sl);
942 INSERT_FREE_BLOCK(tlsf, b2);
945 tlsf->free_size -= GET_SIZE(breg);
947 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
948 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
950 return breg->mem;
953 b0 = b0->free_node.next;
956 /* Iterate through next level */
957 sl = 0;
960 if (mhe->mhe_MemHeader.mh_Attributes & MEMF_SEM_PROTECTED)
961 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
963 return NULL;
966 /* Allocation of headers in memory:
967 * hdr
968 * header (ROUNDUP(sizeof(hdr_t))
969 * mem (ROUNDUP(sizeof(tlst_area_t))
971 * header (ROUNDUP(sizeof(hdr_t))
972 * free space (size - HEADERS_SIZE)
973 * bend
974 * header (ROUNDUP(sizeof(hdr_t))
976 tlsf_area_t * init_memory_area(void * memory, IPTR size)
978 bhdr_t * hdr = (bhdr_t *)memory;
979 bhdr_t * b;
980 bhdr_t * bend;
982 tlsf_area_t * area;
984 size = ROUNDDOWN(size);
986 /* Prepare first header, which protects the tlst_area_t header */
987 hdr->header.length = ROUNDUP(sizeof(tlsf_area_t)) | THIS_BUSY | PREV_BUSY;
988 hdr->header.prev = NULL;
990 b = GET_NEXT_BHDR(hdr, ROUNDUP(sizeof(tlsf_area_t)));
991 b->header.prev = hdr;
992 b->header.length = (size - HEADERS_SIZE) | PREV_BUSY | THIS_BUSY;
994 bend = GET_NEXT_BHDR(b, GET_SIZE(b));
995 bend->header.length = 0 | THIS_BUSY | PREV_BUSY;
996 bend->header.prev = b;
998 area = (tlsf_area_t *)hdr->mem;
999 area->end = bend;
1001 return area;
1004 void tlsf_add_memory(struct MemHeaderExt *mhe, void *memory, IPTR size)
1006 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1008 D(nbug("[Kernel:TLSF] %s(%p, %p, %u)\n", __PRETTY_FUNCTION__, tlsf, memory, size));
1010 if (memory && size > HEADERS_SIZE)
1012 tlsf_area_t *area = init_memory_area(memory, size);
1013 bhdr_t *b;
1015 D(nbug("[Kernel:TLSF] %s: adding memory\n", __PRETTY_FUNCTION__));
1017 area->next = tlsf->memory_area;
1018 tlsf->memory_area = area;
1020 /* User added memory. Not autogrown */
1021 area->autogrown = 0;
1023 b = MEM_TO_BHDR(area);
1024 b = GET_NEXT_BHDR(b, GET_SIZE(b));
1026 tlsf->total_size += size;
1028 D(nbug("[Kernel:TLSF] %s: total_size=%08x\n", __PRETTY_FUNCTION__, tlsf->total_size));
1030 /* adjust the memheader if necessary */
1031 if (memory < mhe->mhe_MemHeader.mh_Lower)
1033 if ((memory + size) >= mhe->mhe_MemHeader.mh_Lower)
1034 mhe->mhe_MemHeader.mh_Free += (mhe->mhe_MemHeader.mh_Lower - memory);
1035 else
1036 mhe->mhe_MemHeader.mh_Free += size;
1037 mhe->mhe_MemHeader.mh_Lower = memory;
1039 else if ((memory + size) > mhe->mhe_MemHeader.mh_Upper)
1041 if (memory <= mhe->mhe_MemHeader.mh_Upper)
1042 mhe->mhe_MemHeader.mh_Free += ((memory + size) - mhe->mhe_MemHeader.mh_Upper);
1043 else
1044 mhe->mhe_MemHeader.mh_Free += size;
1045 mhe->mhe_MemHeader.mh_Upper = memory + size;
1048 /* Add the initialized memory */
1049 tlsf_freevec(mhe, b->mem);
1053 void tlsf_add_memory_and_merge(struct MemHeaderExt *mhe, void *memory, IPTR size)
1055 tlsf_add_memory(mhe, memory, size);
1056 // TODO: add memory and merge...
1059 #if 0
1060 void bzero(void *ptr, IPTR len)
1062 UBYTE *p = (UBYTE *)ptr;
1064 while(len--)
1065 *p++ = 0;
1067 #endif
1069 void * tlsf_init(struct MemHeaderExt * mhe)
1071 tlsf_t *tlsf = NULL;
1072 void * ptr = mhe->mhe_MemHeader.mh_Lower;
1074 /* if MemHeaderExt starts at the beginning of handled memory, advance the ptr */
1075 if (mhe == ptr)
1076 ptr += ROUNDUP(sizeof(struct MemHeaderExt));
1078 /* Is there enough room for tlsf in the mem header itself? */
1079 if (mhe->mhe_MemHeader.mh_Free >= (ROUNDUP(sizeof(tlsf_t)) + 3 * ROUNDUP(sizeof(bhdr_t))))
1081 /* tlsf will be stored inside handled memory */
1082 tlsf = (tlsf_t *)ptr;
1084 ptr += ROUNDUP(sizeof(tlsf_t));
1086 bzero(tlsf, sizeof(tlsf_t));
1087 tlsf->autodestroy_self = 0;
1089 else
1091 /* No place for tlsf header in MemHeaderExt? Allocate it separately */
1092 tlsf = AllocMem(sizeof(tlsf_t), MEMF_ANY);
1094 if (tlsf)
1096 bzero(tlsf, sizeof(tlsf_t));
1097 tlsf->autodestroy_self = 1;
1101 /* Store the tlsf pointer in UserData field */
1102 mhe->mhe_UserData = tlsf;
1104 if (tlsf && ptr < mhe->mhe_MemHeader.mh_Upper)
1106 tlsf_add_memory(mhe, ptr, (IPTR)mhe->mhe_MemHeader.mh_Upper - (IPTR)ptr);
1109 return tlsf;
1112 static void * tlsf_init_autogrow(struct MemHeaderExt * mhe, IPTR puddle_size, ULONG requirements, autogrow_get grow_function, autogrow_release release_function, APTR autogrow_data)
1114 tlsf_t *tlsf = tlsf_init(mhe);
1116 if (tlsf)
1118 if (puddle_size < 4096)
1119 puddle_size = 4096;
1121 tlsf->autogrow_puddle_size = puddle_size;
1122 tlsf->autogrow_requirements = requirements;
1123 tlsf->autogrow_data = autogrow_data;
1124 tlsf->autogrow_get_fn = grow_function;
1125 tlsf->autogrow_release_fn = release_function;
1128 return tlsf;
1131 void tlsf_destroy(struct MemHeaderExt * mhe)
1133 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1135 D(nbug("[Kernel:TLSF] %s(%p)\n", __PRETTY_FUNCTION__, tlsf));
1137 if (tlsf)
1139 tlsf_area_t *area = tlsf->memory_area;
1141 if (tlsf->autogrow_release_fn)
1143 while(area)
1145 tlsf_area_t *next = area->next;
1148 Autogrown area? Release it here.
1149 Otherwise it's the responsibility of add_memory_area caller
1151 if (area->autogrown)
1152 tlsf_release_memory_area(mhe, area);
1154 area = next;
1158 if (tlsf->autodestroy_self)
1159 FreeMem(tlsf, sizeof(tlsf_t));
1163 IPTR tlsf_avail(struct MemHeaderExt * mhe, ULONG requirements)
1165 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1166 IPTR ret = 0;
1168 if (requirements & MEMF_TOTAL)
1169 ret = tlsf->total_size;
1170 else if (requirements & MEMF_LARGEST)
1172 bhdr_t *b = NULL;
1174 if (tlsf->flbitmap)
1176 int fl = MS(tlsf->flbitmap);
1178 if (tlsf->slbitmap[fl])
1180 int sl = MS(tlsf->slbitmap[fl]);
1182 b = tlsf->matrix[fl][sl];
1186 while (b)
1188 if (GET_SIZE(b) > ret)
1189 ret = GET_SIZE(b);
1191 b = b->free_node.next;
1194 else
1195 ret = tlsf->free_size;
1197 return ret;
1200 BOOL tlsf_in_bounds(struct MemHeaderExt * mhe, void * begin, void * end)
1202 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1203 tlsf_area_t *area;
1205 area = tlsf->memory_area;
1207 D(nbug("[Kernel:TLSF] %s(%p, %p, %p)\n", __PRETTY_FUNCTION__, tlsf, begin, end));
1209 while (area)
1211 D(nbug("[Kernel:TLSF] %s: area %p\n", __PRETTY_FUNCTION__));
1213 * Do checks only if questioned memory ends before the end (sentinel bhdr)
1214 * of area
1216 if ((IPTR)end <= (IPTR)area->end)
1218 D(nbug("[Kernel:TLSF] %s end <= area->end (%p <= %p)\n", __PRETTY_FUNCTION__, end, area->end));
1220 /* Get the bhdr of this area */
1221 bhdr_t *b = MEM_TO_BHDR(area);
1223 /* Forward to the begin of the memory */
1224 b = GET_NEXT_BHDR(b, GET_SIZE(b));
1226 /* requested memory starts at begin or after begin of the area */
1227 if ((IPTR)begin >= (IPTR)b->mem)
1229 D(nbug("[Kernel:TLSF] %s begin >= b->mem (%p >= %p)\n", __PRETTY_FUNCTION__, begin, b->mem));
1230 return TRUE;
1234 area = area->next;
1237 return FALSE;
1241 static void destroy_Pool(struct MemHeaderExt *mhe)
1243 tlsf_destroy(mhe);
1246 static APTR fetch_more_ram(void * data, IPTR *size)
1248 struct MemHeaderExt *mhe = (struct MemHeaderExt *)data;
1249 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1251 D(nbug("[Kernel:TLSF] %s(%p, %u)\n", __PRETTY_FUNCTION__, mhe, *size));
1253 APTR ptr = AllocMem(*size, tlsf->autogrow_requirements);
1254 return ptr;
1257 static VOID release_ram(void * data, APTR ptr, IPTR size)
1259 D(nbug("[Kernel:TLSF] %s(%p, %u)\n", __PRETTY_FUNCTION__, ptr, size));
1261 FreeMem(ptr, size);
1264 static void * init_Pool(struct MemHeaderExt *mhe, IPTR puddleSize, IPTR initialSize)
1266 return tlsf_init_autogrow(mhe, puddleSize, (ULONG)(IPTR)mhe->mhe_MemHeader.mh_First, fetch_more_ram, release_ram, mhe);
1269 void krnCreateTLSFMemHeader(CONST_STRPTR name, BYTE pri, APTR start, IPTR size, ULONG flags)
1271 /* If the end is less than (1 << 31), MEMF_31BIT is implied */
1272 if (((IPTR)start+size) < (1UL << 31))
1273 flags |= MEMF_31BIT;
1274 else
1275 flags &= ~MEMF_31BIT;
1277 flags |= MEMF_MANAGED;
1279 struct MemHeaderExt *mhe = start;
1281 mhe->mhe_Magic = MEMHEADER_EXT_MAGIC;
1283 mhe->mhe_DestroyPool = destroy_Pool;
1284 mhe->mhe_InitPool = init_Pool;
1286 mhe->mhe_Alloc = tlsf_malloc;
1287 mhe->mhe_AllocVec = tlsf_malloc;
1288 mhe->mhe_AllocAligned = tlsf_malloc_aligned;
1289 mhe->mhe_AllocVecAligned=tlsf_malloc_aligned;
1290 mhe->mhe_Free = tlsf_freemem;
1291 mhe->mhe_FreeVec = tlsf_freevec;
1292 mhe->mhe_AllocAbs = tlsf_allocabs;
1293 mhe->mhe_ReAlloc = tlsf_realloc;
1294 mhe->mhe_Avail = tlsf_avail;
1295 mhe->mhe_InBounds = tlsf_in_bounds;
1297 mhe->mhe_MemHeader.mh_Node.ln_Succ = NULL;
1298 mhe->mhe_MemHeader.mh_Node.ln_Pred = NULL;
1299 mhe->mhe_MemHeader.mh_Node.ln_Type = NT_MEMORY;
1300 mhe->mhe_MemHeader.mh_Node.ln_Name = (STRPTR)name;
1301 mhe->mhe_MemHeader.mh_Node.ln_Pri = pri;
1302 mhe->mhe_MemHeader.mh_Attributes = flags;
1303 /* The first MemChunk needs to be aligned. We do it by adding MEMHEADER_TOTAL. */
1304 mhe->mhe_MemHeader.mh_First = NULL;
1306 mhe->mhe_UserData = NULL;
1309 * mh_Lower and mh_Upper are informational only. Since our MemHeader resides
1310 * inside the region it describes, the region includes MemHeader.
1312 mhe->mhe_MemHeader.mh_Lower = start;
1313 mhe->mhe_MemHeader.mh_Upper = start + size;
1314 mhe->mhe_MemHeader.mh_Free = size;
1316 D(nbug("[Kernel:TLSF] %s: 0x%p -> 0x%p\n", __PRETTY_FUNCTION__, mhe->mhe_MemHeader.mh_Lower, mhe->mhe_MemHeader.mh_Upper));
1318 tlsf_init(mhe);
1321 struct MemHeader * krnConvertMemHeaderToTLSF(struct MemHeader * source)
1323 struct MemChunk * mc = source->mh_First->mc_Next;
1324 APTR mh = source->mh_First;
1325 IPTR fsize = source->mh_First->mc_Bytes;
1327 if (source->mh_Attributes & MEMF_MANAGED)
1328 return NULL;
1330 /* First chunk will host the mem header */
1331 krnCreateTLSFMemHeader(source->mh_Node.ln_Name, source->mh_Node.ln_Pri, mh, fsize,
1332 source->mh_Attributes);
1333 /* source->mh_First is destroyed beyond this point */
1335 /* Add remaining chunks */
1336 while (mc)
1338 APTR p = mc->mc_Next;
1339 tlsf_add_memory(mh, mc, mc->mc_Bytes);
1340 /* mc is destroyed beyond this point */
1341 mc = p;
1344 return mh;