2 Copyright © 1995-2014, The AROS Development Team. All rights reserved.
6 #include <exec/types.h>
7 #include <exec/memory.h>
8 #include <exec/memheaderext.h>
9 #include <proto/exec.h>
13 #include "kernel_base.h"
14 #include "kernel_debug.h"
22 * Minimal alignment as required by AROS. In contrary to the default
23 * TLSF implementation, we do not allow smaller blocks here.
24 * Size needs to be aligned to at least 8, see THIS_FREE_MASK comment.
26 #define SIZE_ALIGN AROS_WORSTALIGN
29 * Settings for TLSF allocator:
30 * MAX_LOG2_SLI - amount of bits used for the second level list
31 * MAX_FLI - maximal allowable allocation size - 2^32 should be enough on 32bit systems
32 * 64bit systems use 128GB limit.
34 #define MAX_LOG2_SLI (5)
35 #define MAX_SLI (1 << MAX_LOG2_SLI)
37 #define MAX_FLI (32+5)
41 #define FLI_OFFSET (6)
42 #define SMALL_BLOCK (2 << FLI_OFFSET)
44 #define REAL_FLI (MAX_FLI - FLI_OFFSET)
46 #define ROUNDUP(x) (((x) + SIZE_ALIGN - 1) & ~(SIZE_ALIGN - 1))
47 #define ROUNDDOWN(x) ((x) & ~(SIZE_ALIGN - 1))
49 /* Fields used in the block header length field to identify busy/free blocks */
50 #define THIS_FREE_MASK (IPTR)1
51 #define THIS_FREE (IPTR)1
52 #define THIS_BUSY (IPTR)0
54 #define PREV_FREE_MASK (IPTR)2
55 #define PREV_FREE (IPTR)2
56 #define PREV_BUSY (IPTR)0
58 #define SIZE_MASK (~(THIS_FREE_MASK | PREV_FREE_MASK))
60 #define likely(x) __builtin_expect(!!(x), 1)
61 #define unlikely(x) __builtin_expect(!!(x), 0)
63 /* Size of additional memory needed to manage new block */
64 #define HEADERS_SIZE (((3 * ROUNDUP(sizeof(hdr_t))) + ROUNDUP(sizeof(tlsf_area_t))))
66 /* free node links together all free blocks if similar size */
67 typedef struct free_node_s
{
72 /* block header in front of each block - both free and busy */
73 typedef struct hdr_s
{
79 * Each block is defined by bhdr_t structure. Free blocks contain only
80 * the header which allows us to go through all memory blocks in the system.
81 * The free blocks contain additionally the node which chains them in one
82 * of the free block lists
84 typedef struct bhdr_s
{
87 UBYTE __min_align
[SIZE_ALIGN
];
91 free_node_t free_node
;
95 /* Memory area within the TLSF pool */
96 typedef struct tlsf_area_s
{
97 struct tlsf_area_s
* next
; // Next memory area
98 bhdr_t
* end
; // Pointer to "end-of-area" block header
99 LONG autogrown
; // Automatically allocated by TLSF pool
103 tlsf_area_t
* memory_area
;
109 ULONG slbitmap
[REAL_FLI
];
111 IPTR autogrow_puddle_size
;
112 ULONG autogrow_requirements
;
114 autogrow_get autogrow_get_fn
;
115 autogrow_release autogrow_release_fn
;
117 UBYTE autodestroy_self
;
119 bhdr_t
* matrix
[REAL_FLI
][MAX_SLI
];
122 static inline __attribute__((always_inline
)) int LS(IPTR i
)
124 if (sizeof(IPTR
) == 4)
125 return __builtin_ffs(i
) - 1;
127 return __builtin_ffsl(i
) - 1;
130 static inline __attribute__((always_inline
)) int MS(IPTR i
)
132 if (sizeof(IPTR
) == 4)
133 return 31 - __builtin_clz(i
);
135 return 63 - __builtin_clzl(i
);
138 static inline __attribute__((always_inline
)) void SetBit(int nr
, ULONG
*ptr
)
140 ptr
[nr
>> 5] |= (1 << (nr
& 31));
143 static inline __attribute__((always_inline
)) void ClrBit(int nr
, ULONG
*ptr
)
145 ptr
[nr
>> 5] &= ~(1 << (nr
& 31));
148 static inline __attribute__((always_inline
)) void MAPPING_INSERT(IPTR r
, int *fl
, int *sl
)
153 *sl
= (int)(r
/ (SMALL_BLOCK
/ MAX_SLI
));
158 *sl
= (int)(((IPTR
)r
>> (*fl
- MAX_LOG2_SLI
)) - MAX_SLI
);
163 static inline __attribute__((always_inline
)) void MAPPING_SEARCH(IPTR
*r
, int *fl
, int *sl
)
165 if (*r
< SMALL_BLOCK
)
168 *sl
= (int)(*r
/ (SMALL_BLOCK
/ MAX_SLI
));
172 IPTR tmp
= ((IPTR
)1 << (MS(*r
) - MAX_LOG2_SLI
)) - 1;
176 *sl
= (int)(((IPTR
)tr
>> (*fl
- MAX_LOG2_SLI
)) - MAX_SLI
);
182 static inline __attribute__((always_inline
)) bhdr_t
* FIND_SUITABLE_BLOCK(tlsf_t
*tlsf
, int *fl
, int *sl
)
184 IPTR bitmap_tmp
= tlsf
->slbitmap
[*fl
] & (~0 << *sl
);
189 *sl
= LS(bitmap_tmp
);
190 b
= tlsf
->matrix
[*fl
][*sl
];
194 bitmap_tmp
= tlsf
->flbitmap
& (~0 << (*fl
+ 1));
195 if (likely(bitmap_tmp
!= 0))
197 *fl
= LS(bitmap_tmp
);
198 *sl
= LS(tlsf
->slbitmap
[*fl
]);
199 b
= tlsf
->matrix
[*fl
][*sl
];
209 #define GET_SIZE(b) ({ IPTR size = b->header.length & SIZE_MASK; size; })
210 #define GET_FLAGS(b) ({ IPTR flags = b->header.length & (THIS_FREE_MASK | PREV_FREE_MASK); flags; })
211 #define SET_SIZE(b, size) do{ b->header.length = GET_FLAGS(b) | (size); }while(0)
212 #define SET_FLAGS(b, flags) do{ b->header.length = GET_SIZE(b) | (flags); }while(0)
213 #define SET_SIZE_AND_FLAGS(b, size, flags) do{b->header.length = (size) | (flags);}while(0)
214 #define FREE_BLOCK(b) ((b->header.length & THIS_FREE_MASK) == THIS_FREE)
215 #define SET_FREE_BLOCK(b) do{b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_FREE;}while(0)
216 #define SET_BUSY_BLOCK(b) do{b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_BUSY;}while(0)
217 #define SET_FREE_PREV_BLOCK(b) do{b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_FREE;}while(0)
218 #define SET_BUSY_PREV_BLOCK(b) do{b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_BUSY;}while(0)
219 #define FREE_PREV_BLOCK(b) ((b->header.length & PREV_FREE_MASK) == PREV_FREE)
220 #define GET_NEXT_BHDR(hdr, size) ({ bhdr_t * __b = (bhdr_t *)((UBYTE *)&hdr->mem[0] + (size)); __b; })
221 #define MEM_TO_BHDR(ptr) ({ bhdr_t * b = (bhdr_t*)((void*)(ptr) - offsetof(bhdr_t, mem)); b; })
223 #define REMOVE_HEADER(tlsf, b, fl, sl) do{ \
224 if (b->free_node.next) \
225 b->free_node.next->free_node.prev = b->free_node.prev; \
226 if (b->free_node.prev) \
227 b->free_node.prev->free_node.next = b->free_node.next; \
228 if (tlsf->matrix[fl][sl] == b) { \
229 tlsf->matrix[fl][sl] = b->free_node.next; \
230 if (!tlsf->matrix[fl][sl]) \
231 ClrBit(sl, &tlsf->slbitmap[fl]); \
232 if (!tlsf->slbitmap[fl]) \
233 ClrBit(fl, &tlsf->flbitmap); \
236 #define INSERT_FREE_BLOCK(tlsf, b) do { \
237 int fl, sl; MAPPING_INSERT(GET_SIZE(b), &fl, &sl); \
238 b->free_node.prev = NULL; \
239 b->free_node.next = tlsf->matrix[fl][sl]; \
240 if (tlsf->matrix[fl][sl]) \
241 tlsf->matrix[fl][sl]->free_node.prev = b; \
242 tlsf->matrix[fl][sl] = b; \
243 SetBit(fl, &tlsf->flbitmap); \
244 SetBit(sl, &tlsf->slbitmap[fl]); }while(0)
248 static inline __attribute__((always_inline
)) IPTR
GET_SIZE(bhdr_t
*b
)
250 return b
->header
.length
& SIZE_MASK
;
253 static inline __attribute__((always_inline
)) IPTR
GET_FLAGS(bhdr_t
*b
)
255 return b
->header
.length
& (THIS_FREE_MASK
| PREV_FREE_MASK
);
258 static inline __attribute__((always_inline
)) void SET_SIZE(bhdr_t
*b
, IPTR size
)
260 b
->header
.length
= GET_FLAGS(b
) | size
;
263 static inline __attribute__((always_inline
)) void SET_SIZE_AND_FLAGS(bhdr_t
*b
, IPTR size
, IPTR flags
)
265 b
->header
.length
= size
| flags
;
268 static inline __attribute__((always_inline
)) int FREE_BLOCK(bhdr_t
*b
)
270 return ((b
->header
.length
& THIS_FREE_MASK
) == THIS_FREE
);
273 static inline __attribute__((always_inline
)) void SET_FREE_BLOCK(bhdr_t
*b
)
275 b
->header
.length
= (b
->header
.length
& ~THIS_FREE_MASK
) | THIS_FREE
;
278 static inline __attribute__((always_inline
)) void SET_BUSY_BLOCK(bhdr_t
*b
)
280 b
->header
.length
= (b
->header
.length
& ~THIS_FREE_MASK
) | THIS_BUSY
;
283 static inline __attribute__((always_inline
)) void SET_FREE_PREV_BLOCK(bhdr_t
*b
)
285 b
->header
.length
= (b
->header
.length
& ~PREV_FREE_MASK
) | PREV_FREE
;
288 static inline __attribute__((always_inline
)) void SET_BUSY_PREV_BLOCK(bhdr_t
*b
)
290 b
->header
.length
= (b
->header
.length
& ~PREV_FREE_MASK
) | PREV_BUSY
;
293 static inline __attribute__((always_inline
)) int FREE_PREV_BLOCK(bhdr_t
*b
)
295 return ((b
->header
.length
& PREV_FREE_MASK
) == PREV_FREE
);
298 static inline __attribute__((always_inline
)) bhdr_t
* GET_NEXT_BHDR(bhdr_t
*hdr
, IPTR size
)
300 return (bhdr_t
*)((UBYTE
*)&hdr
->mem
[0] + size
);
303 static inline __attribute__((always_inline
)) bhdr_t
* MEM_TO_BHDR(void *ptr
)
305 return (bhdr_t
*)(ptr
- offsetof(bhdr_t
, mem
));
308 static inline __attribute__((always_inline
)) void REMOVE_HEADER(tlsf_t
*tlsf
, bhdr_t
*b
, int fl
, int sl
)
310 if (b
->free_node
.next
)
311 b
->free_node
.next
->free_node
.prev
= b
->free_node
.prev
;
312 if (b
->free_node
.prev
)
313 b
->free_node
.prev
->free_node
.next
= b
->free_node
.next
;
315 if (tlsf
->matrix
[fl
][sl
] == b
)
317 tlsf
->matrix
[fl
][sl
] = b
->free_node
.next
;
318 if (!tlsf
->matrix
[fl
][sl
])
319 ClrBit(sl
, &tlsf
->slbitmap
[fl
]);
320 if (!tlsf
->slbitmap
[fl
])
321 ClrBit(fl
, &tlsf
->flbitmap
);
325 static inline __attribute__((always_inline
)) void INSERT_FREE_BLOCK(tlsf_t
*tlsf
, bhdr_t
*b
)
329 MAPPING_INSERT(GET_SIZE(b
), &fl
, &sl
);
331 b
->free_node
.prev
= NULL
;
332 b
->free_node
.next
= tlsf
->matrix
[fl
][sl
];
334 if (tlsf
->matrix
[fl
][sl
])
335 tlsf
->matrix
[fl
][sl
]->free_node
.prev
= b
;
337 tlsf
->matrix
[fl
][sl
] = b
;
339 SetBit(fl
, &tlsf
->flbitmap
);
340 SetBit(sl
, &tlsf
->slbitmap
[fl
]);
343 #endif /* USE_MACROS */
345 void * tlsf_malloc(struct MemHeaderExt
*mhe
, IPTR size
, ULONG
*flags
)
347 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
351 size
= ROUNDUP(size
);
353 if (unlikely(!size
)) return NULL
;
355 D(nbug("[Kernel:TLSF] %s(%p, %ld)\n", __PRETTY_FUNCTION__
, tlsf
, size
));
357 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
358 ObtainSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
360 /* Find the indices fl and sl for given size */
361 MAPPING_SEARCH(&size
, &fl
, &sl
);
363 /* Find block of either the right size or larger */
364 b
= FIND_SUITABLE_BLOCK(tlsf
, &fl
, &sl
);
366 D(nbug("[Kernel:TLSF] %s: adjusted size %ld\n", __PRETTY_FUNCTION__
, size
));
368 /* No block found? Either failure or tlsf will get more memory. */
371 D(nbug("[Kernel:TLSF] %s: out of memory\n", __PRETTY_FUNCTION__
));
373 /* Do we have the autogrow feature? */
374 if (tlsf
->autogrow_get_fn
)
376 /* Increase the size of requested block so that we can fit the headers too */
377 IPTR sz
= size
+ HEADERS_SIZE
;
379 /* Requested size less than puddle size? Get puddle size then */
380 if (sz
< tlsf
->autogrow_puddle_size
)
381 sz
= tlsf
->autogrow_puddle_size
;
383 D(nbug("[Kernel:TLSF] %s: querying for %u bytes\n", __PRETTY_FUNCTION__
, sz
));
385 /* Try to get some memory */
386 void * ptr
= tlsf
->autogrow_get_fn(tlsf
->autogrow_data
, &sz
);
388 /* Got it? Add to tlsf then */
391 tlsf_add_memory(mhe
, ptr
, sz
);
393 /* We know the newly added memory is first in the list. Set the autogrown feature there */
394 tlsf
->memory_area
->autogrown
= 1;
396 /* Memory is there. Try to find the block again */
397 MAPPING_SEARCH(&size
, &fl
, &sl
);
398 b
= FIND_SUITABLE_BLOCK(tlsf
, &fl
, &sl
);
402 /* No block? FAILURE! */
405 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
406 ReleaseSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
413 bhdr_t
*next
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
415 /* Remove the found block from the free list */
416 REMOVE_HEADER(tlsf
, b
, fl
, sl
);
418 /* Is this block larger then requested? Try to split it then */
419 if (likely(GET_SIZE(b
) > (size
+ ROUNDUP(sizeof(hdr_t
)))))
421 /* New split block */
422 bhdr_t
*sb
= GET_NEXT_BHDR(b
, size
);
425 /* Set size, this free and previous busy */
426 SET_SIZE_AND_FLAGS(sb
, GET_SIZE(b
) - size
- ROUNDUP(sizeof(hdr_t
)), THIS_FREE
| PREV_BUSY
);
428 /* The next header points to free block now */
429 next
->header
.prev
= sb
;
431 /* previous block (sb) is free */
432 SET_FREE_PREV_BLOCK(next
);
434 /* Allocated block size truncated */
437 D(nbug("[Kernel:TLSF] %s: block split, %ld bytes remaining\n", __PRETTY_FUNCTION__
, GET_SIZE(sb
)));
438 /* Free block is inserted to free list */
439 INSERT_FREE_BLOCK(tlsf
, sb
);
443 /* The block was of right size. Set it just busy in next pointer */
444 SET_BUSY_PREV_BLOCK(next
);
447 /* The allocated block is busy */
450 /* Clear the pointers just in case */
451 b
->free_node
.next
= NULL
;
452 b
->free_node
.prev
= NULL
;
454 /* Update counters */
455 tlsf
->free_size
-= GET_SIZE(b
);
456 mhe
->mhe_MemHeader
.mh_Free
= tlsf
->free_size
;
458 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
459 ReleaseSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
461 if (flags
&& (*flags
& MEMF_CLEAR
))
462 bzero(&b
->mem
[0], size
);
464 /* And return memory */
468 static inline __attribute__((always_inline
)) void MERGE(bhdr_t
*b1
, bhdr_t
*b2
)
470 /* Merging adjusts the size - it's sum of both sizes plus size of block header */
471 SET_SIZE(b1
, GET_SIZE(b1
) + GET_SIZE(b2
) + ROUNDUP(sizeof(hdr_t
)));
474 static inline __attribute__((always_inline
)) bhdr_t
* MERGE_PREV(tlsf_t
*tlsf
, bhdr_t
*block
)
476 /* Is previous block free? */
477 if (FREE_PREV_BLOCK(block
))
480 bhdr_t
*prev
= block
->header
.prev
;
482 /* Calculate index for removal */
483 MAPPING_INSERT(GET_SIZE(prev
), &fl
, &sl
);
485 /* Do remove the header from the list */
486 REMOVE_HEADER(tlsf
, prev
, fl
, sl
);
497 static inline __attribute__((always_inline
)) bhdr_t
* MERGE_NEXT(tlsf_t
*tlsf
, bhdr_t
*block
)
499 bhdr_t
*next
= GET_NEXT_BHDR(block
, GET_SIZE(block
));
501 /* Is next block free? */
502 if (FREE_BLOCK(next
))
506 /* Calculate index for removal */
507 MAPPING_INSERT(GET_SIZE(next
), &fl
, &sl
);
509 /* Remove the header from the list */
510 REMOVE_HEADER(tlsf
, next
, fl
, sl
);
519 static void tlsf_release_memory_area(struct MemHeaderExt
* mhe
, tlsf_area_t
* area
)
521 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
522 tlsf_area_t
*p
= (tlsf_area_t
*)(&tlsf
->memory_area
- offsetof(tlsf_area_t
, next
));
528 /* get the begin of this area */
529 begin
= MEM_TO_BHDR(area
);
531 /* get sentinel block */
534 /* end of this area is end of sentinel block */
535 end
= GET_NEXT_BHDR(b
, 0);
537 /* calculate the size of area */
538 size
= (IPTR
)end
- (IPTR
)begin
;
540 /* update counters */
541 tlsf
->total_size
-= size
;
542 tlsf
->free_size
-= GET_SIZE(area
->end
->header
.prev
);
544 /* remove area from list */
545 for (;p
->next
!= NULL
; p
= p
->next
)
548 p
->next
= area
->next
;
553 if (tlsf
->autogrow_release_fn
)
554 tlsf
->autogrow_release_fn(tlsf
->autogrow_data
, begin
, size
);
557 void * tlsf_malloc_aligned(struct MemHeaderExt
*mhe
, IPTR size
, IPTR align
, ULONG
*flags
)
559 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
563 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
564 ObtainSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
566 size
= ROUNDUP(size
);
568 D(nbug("[Kernel:TLSF] %s(%p, %lx, %u)\n", __PRETTY_FUNCTION__
, mhe
, size
, align
));
570 /* Adjust align to the top nearest power of two */
571 align
= 1 << MS(align
);
573 D(nbug("[Kernel:TLSF] %s: adjusted align = %u\n", __PRETTY_FUNCTION__
, align
));
575 ptr
= tlsf_malloc(mhe
, size
+align
, flags
);
576 b
= MEM_TO_BHDR(ptr
);
578 D(nbug("[Kernel:TLSF] %s: allocated region @%p\n", __PRETTY_FUNCTION__
, ptr
));
580 if (align
> SIZE_ALIGN
)
582 void *aligned_ptr
= (void *)(((IPTR
)ptr
+ align
- 1) & ~(align
- 1));
583 bhdr_t
*aligned_bhdr
= MEM_TO_BHDR(aligned_ptr
);
584 IPTR diff_begin
= (IPTR
)aligned_bhdr
- (IPTR
)b
;
585 IPTR diff_end
= (IPTR
)GET_NEXT_BHDR(b
, GET_SIZE(b
)) - (IPTR
)GET_NEXT_BHDR(aligned_bhdr
, size
);
587 SET_SIZE(aligned_bhdr
, size
);
589 if (aligned_ptr
!= ptr
)
591 D(nbug("[Kernel:TLSF] %s: aligned ptr: %p\n", __PRETTY_FUNCTION__
, aligned_ptr
));
592 D(nbug("[Kernel:TLSF] %s: difference begin: %d\n", __PRETTY_FUNCTION__
, diff_begin
));
593 D(nbug("[Kernel:TLSF] %s: difference end: %d\n", __PRETTY_FUNCTION__
, diff_end
));
597 SET_SIZE(b
, diff_begin
- ROUNDUP(sizeof(hdr_t
)));
599 tlsf
->free_size
+= GET_SIZE(b
);
601 aligned_bhdr
->header
.prev
= b
;
602 SET_FREE_PREV_BLOCK(aligned_bhdr
);
605 b
= MERGE_PREV(tlsf
, b
);
607 D(nbug("[Kernel:TLSF] %s: block @%p, b->next %p\n", __PRETTY_FUNCTION__
, b
, GET_NEXT_BHDR(b
, GET_SIZE(b
))));
609 /* Insert free block into the proper list */
610 INSERT_FREE_BLOCK(tlsf
, b
);
613 ptr
= &aligned_bhdr
->mem
[0];
618 bhdr_t
*b1
= GET_NEXT_BHDR(aligned_bhdr
, GET_SIZE(aligned_bhdr
));
621 b1
->header
.prev
= aligned_bhdr
;
623 SET_SIZE(b1
, diff_end
- ROUNDUP(sizeof(hdr_t
)));
624 SET_BUSY_PREV_BLOCK(b1
);
627 next
= GET_NEXT_BHDR(b1
, GET_SIZE(b1
));
628 next
->header
.prev
= b1
;
629 SET_FREE_PREV_BLOCK(next
);
631 b1
= MERGE_NEXT(tlsf
, b1
);
633 INSERT_FREE_BLOCK(tlsf
, b1
);
639 while(b2
&& GET_SIZE(b2
))
641 nbug("[Kernel:TLSF] %s: bhdr %p, mem %p, size=%08x, flags=%x, prev=%p\n",
642 __PRETTY_FUNCTION__
, b2
, &b2
->mem
[0], GET_SIZE(b2
), GET_FLAGS(b2
), b2
->header
.prev
);
644 b2
= GET_NEXT_BHDR(b2
, GET_SIZE(b2
));
648 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
649 ReleaseSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
655 void tlsf_freevec(struct MemHeaderExt
* mhe
, APTR ptr
)
657 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
665 fb
= MEM_TO_BHDR(ptr
);
667 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
668 ObtainSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
670 /* Mark block as free */
673 /* adjust free size field on tlsf */
674 tlsf
->free_size
+= GET_SIZE(fb
);
676 /* Try to merge with previous and next blocks (if free) */
677 fb
= MERGE_PREV(tlsf
, fb
);
678 fb
= MERGE_NEXT(tlsf
, fb
);
680 /* Tell next block that previous one is free. Also update the prev link in case it changed */
681 next
= GET_NEXT_BHDR(fb
, GET_SIZE(fb
));
682 SET_FREE_PREV_BLOCK(next
);
683 next
->header
.prev
= fb
;
685 /* Check if this was the last used block of an autogrown area */
686 area
= fb
->header
.prev
->header
.prev
== NULL
? (tlsf_area_t
*)fb
->header
.prev
->mem
: NULL
;
687 if (area
!= NULL
&& area
->end
== next
&& area
->autogrown
== 1)
688 tlsf_release_memory_area(mhe
, area
);
691 /* Insert free block into the proper list */
692 INSERT_FREE_BLOCK(tlsf
, fb
);
695 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
696 ReleaseSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
699 void tlsf_freemem(struct MemHeaderExt
* mhe
, APTR ptr
, IPTR size
)
702 tlsf_freevec(mhe
, ptr
);
705 void * tlsf_realloc(struct MemHeaderExt
*mhe
, APTR ptr
, IPTR new_size
)
707 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
713 /* NULL pointer? just allocate the memory */
715 return tlsf_malloc(mhe
, new_size
, NULL
);
717 /* size = 0? free memory */
718 if (unlikely(!new_size
))
720 tlsf_freevec(mhe
, ptr
);
724 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
725 ObtainSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
727 new_size
= ROUNDUP(new_size
);
729 b
= MEM_TO_BHDR(ptr
);
731 if (unlikely(new_size
== GET_SIZE(b
)))
734 bnext
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
736 /* Is new size smaller than the previous one? Try to split the block if this is the case */
737 if (new_size
<= (GET_SIZE(b
)))
739 /* New header starts right after the current block b */
740 bhdr_t
* b1
= GET_NEXT_BHDR(b
, new_size
);
742 /* Update pointer and size */
744 SET_SIZE_AND_FLAGS(b1
, GET_SIZE(b
) - new_size
- ROUNDUP(sizeof(hdr_t
)), THIS_FREE
| PREV_BUSY
);
746 /* Current block gets smaller */
747 SET_SIZE(b
, new_size
);
749 tlsf
->free_size
+= GET_SIZE(b1
);
751 /* Try to merge with next block */
752 b1
= MERGE_NEXT(tlsf
, b1
);
754 /* Tell next block that previous one is free. Also update the prev link in case it changed */
755 bnext
= GET_NEXT_BHDR(b1
, GET_SIZE(b1
));
756 SET_FREE_PREV_BLOCK(bnext
);
757 bnext
->header
.prev
= b1
;
759 /* Insert free block into the proper list */
760 INSERT_FREE_BLOCK(tlsf
, b1
);
764 /* Is next block free? Is there enough free space? */
765 if (FREE_BLOCK(bnext
) && new_size
<= GET_SIZE(b
) + GET_SIZE(bnext
) + ROUNDUP(sizeof(hdr_t
)))
768 IPTR rest_size
= ROUNDUP(sizeof(hdr_t
)) + GET_SIZE(bnext
) + GET_SIZE(b
) - new_size
;
770 MAPPING_INSERT(GET_SIZE(bnext
), &fl
, &sl
);
772 REMOVE_HEADER(tlsf
, bnext
, fl
, sl
);
774 if (rest_size
> ROUNDUP(sizeof(hdr_t
)))
776 rest_size
-= ROUNDUP(sizeof(hdr_t
));
778 SET_SIZE(b
, new_size
);
780 b1
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
783 SET_SIZE_AND_FLAGS(b1
, rest_size
, THIS_FREE
| PREV_BUSY
);
785 bnext
= GET_NEXT_BHDR(b1
, GET_SIZE(b1
));
786 bnext
->header
.prev
= b1
;
787 SET_FREE_PREV_BLOCK(bnext
);
789 INSERT_FREE_BLOCK(tlsf
, b1
);
794 SET_SIZE(b
, new_size
+ ROUNDUP(sizeof(hdr_t
)));
796 SET_SIZE(b
, new_size
);
798 bnext
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
799 bnext
->header
.prev
= b
;
800 SET_BUSY_PREV_BLOCK(bnext
);
805 /* Next block was not free. Create new buffer and copy old contents there */
806 void * p
= tlsf_malloc(mhe
, new_size
, NULL
);
809 CopyMemQuick(ptr
, p
, GET_SIZE(b
));
810 tlsf_freevec(mhe
, ptr
);
816 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
817 ReleaseSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
823 void * tlsf_allocabs(struct MemHeaderExt
* mhe
, IPTR size
, void * ptr
)
825 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
832 D(nbug("[Kernel:TLSF] %s(%p, %ld)\n", __PRETTY_FUNCTION__
, ptr
, size
));
835 Returned memory needs to meet two requirements:
836 a) requested range is within returned memory (AllocAbs definition)
837 b) returned address is LONG aligned (needed by TLSF implementation)
839 region_start
= (UBYTE
*)((IPTR
)ptr
& SIZE_MASK
);
840 region_size
= (IPTR
)ROUNDUP((IPTR
)ptr
- (IPTR
)region_start
+ size
);
842 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
843 ObtainSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
845 /* Start searching here. It doesn't make sense to go through regions which are smaller */
846 MAPPING_SEARCH(®ion_size
, &fl
, &sl
);
848 region_end
= region_start
+ region_size
; /* region_size is modified in MAPPING_SEARCH */
850 /* Start looking now :) */
851 for (; fl
< MAX_FLI
; fl
++)
853 for (; sl
< MAX_SLI
; sl
++)
855 bhdr_t
*b0
= tlsf
->matrix
[fl
][sl
];
857 /* If block was there, check it */
860 bhdr_t
*b1
= GET_NEXT_BHDR(b0
, GET_SIZE(b0
));
862 /* The block has to contain _whole_ requested region, may exceed it in size though */
863 if (b0
->mem
<= region_start
&& (UBYTE
*)b1
>= region_end
)
865 /* block header of requested region */
866 bhdr_t
*breg
= MEM_TO_BHDR(region_start
);
869 This is the block we're looking for. Unchain it from the bidirectional list of
872 Previous entry's next will point to this block's next. If previous is NULL, matrix
873 will be set to block's next
875 if (b0
->free_node
.prev
)
876 b0
->free_node
.prev
->free_node
.next
= b0
->free_node
.next
;
878 tlsf
->matrix
[fl
][sl
] = b0
->free_node
.next
;
881 Next entry's prev will point to this block's previous.
883 if (b0
->free_node
.next
)
884 b0
->free_node
.next
->free_node
.prev
= b0
->free_node
.prev
;
886 /* Empty SL matrix for size j? Clear bit */
887 if (!tlsf
->matrix
[fl
][sl
])
889 ClrBit(sl
, &tlsf
->slbitmap
[fl
]);
891 /* Empty entire SL matrix for given FL index? clear that bit too */
892 if (!tlsf
->slbitmap
[fl
])
893 ClrBit(fl
, &tlsf
->flbitmap
);
896 b0
->free_node
.prev
= NULL
;
897 b0
->free_node
.next
= NULL
;
901 At this point the block is removed from free list and marked as used.
902 Now, split it if necessary...
905 /* begin of the block != begin of the block header of requested region? */
909 Adjust region's block header. Mark in size that previous (aka b0) is free.
910 Reduce the size of b0 as well as size of breg too.
912 breg
->header
.prev
= b0
;
913 SET_SIZE_AND_FLAGS(breg
, GET_SIZE(b0
)-((IPTR
)breg
- (IPTR
)b0
), PREV_FREE
| THIS_BUSY
);
915 /* Update the next block. Mark in size that previous (breg) is used */
916 b1
->header
.prev
= breg
;
917 SET_BUSY_PREV_BLOCK(b1
);
919 /* b0's prev state is keept. b0 itself is marked as free block */
921 SET_SIZE(b0
, (IPTR
)breg
- (IPTR
)b0
->mem
);
923 /* Insert b0 to free list */
924 MAPPING_INSERT(GET_SIZE(b0
), &fl
, &sl
);
925 INSERT_FREE_BLOCK(tlsf
, b0
);
928 /* Is it necessary to split the requested region at the end? */
929 if ((SIZE_ALIGN
+ GET_SIZE(breg
)) > region_size
)
931 IPTR tmp_size
= GET_SIZE(breg
) - region_size
- SIZE_ALIGN
;
933 /* New region header directly at end of the requested region */
934 bhdr_t
*b2
= GET_NEXT_BHDR(breg
, region_size
);
937 b2
->header
.prev
= breg
;
938 SET_SIZE_AND_FLAGS(b2
, tmp_size
, PREV_BUSY
| THIS_FREE
);
940 /* requested region's size is now smaller */
941 SET_SIZE(breg
, region_size
);
943 /* The next block header point to newly created one */
944 b1
->header
.prev
= b2
;
945 SET_FREE_PREV_BLOCK(b1
);
947 /* Insert newly created block to free list */
948 MAPPING_INSERT(GET_SIZE(b2
), &fl
, &sl
);
949 INSERT_FREE_BLOCK(tlsf
, b2
);
952 tlsf
->free_size
-= GET_SIZE(breg
);
954 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
955 ReleaseSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
960 b0
= b0
->free_node
.next
;
963 /* Iterate through next level */
967 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
968 ReleaseSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
973 /* Allocation of headers in memory:
975 * header (ROUNDUP(sizeof(hdr_t))
976 * mem (ROUNDUP(sizeof(tlst_area_t))
978 * header (ROUNDUP(sizeof(hdr_t))
979 * free space (size - HEADERS_SIZE)
981 * header (ROUNDUP(sizeof(hdr_t))
983 tlsf_area_t
* init_memory_area(void * memory
, IPTR size
)
985 bhdr_t
* hdr
= (bhdr_t
*)memory
;
991 size
= ROUNDDOWN(size
);
993 /* Prepare first header, which protects the tlst_area_t header */
994 hdr
->header
.length
= ROUNDUP(sizeof(tlsf_area_t
)) | THIS_BUSY
| PREV_BUSY
;
995 hdr
->header
.prev
= NULL
;
997 b
= GET_NEXT_BHDR(hdr
, ROUNDUP(sizeof(tlsf_area_t
)));
998 b
->header
.prev
= hdr
;
999 b
->header
.length
= (size
- HEADERS_SIZE
) | PREV_BUSY
| THIS_BUSY
;
1001 bend
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
1002 bend
->header
.length
= 0 | THIS_BUSY
| PREV_BUSY
;
1003 bend
->header
.prev
= b
;
1005 area
= (tlsf_area_t
*)hdr
->mem
;
1011 void tlsf_add_memory(struct MemHeaderExt
*mhe
, void *memory
, IPTR size
)
1013 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
1015 D(nbug("[Kernel:TLSF] %s(%p, %p, %u)\n", __PRETTY_FUNCTION__
, tlsf
, memory
, size
));
1017 if (memory
&& size
> HEADERS_SIZE
)
1019 tlsf_area_t
*area
= init_memory_area(memory
, size
);
1022 D(nbug("[Kernel:TLSF] %s: adding memory\n", __PRETTY_FUNCTION__
));
1024 area
->next
= tlsf
->memory_area
;
1025 tlsf
->memory_area
= area
;
1027 /* User added memory. Not autogrown */
1028 area
->autogrown
= 0;
1030 b
= MEM_TO_BHDR(area
);
1031 b
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
1033 tlsf
->total_size
+= size
;
1035 D(nbug("[Kernel:TLSF] %s: total_size=%08x\n", __PRETTY_FUNCTION__
, tlsf
->total_size
));
1037 /* adjust the memheader if necessary */
1038 if (memory
< mhe
->mhe_MemHeader
.mh_Lower
)
1040 if ((memory
+ size
) >= mhe
->mhe_MemHeader
.mh_Lower
)
1041 mhe
->mhe_MemHeader
.mh_Free
+= (mhe
->mhe_MemHeader
.mh_Lower
- memory
);
1043 mhe
->mhe_MemHeader
.mh_Free
+= size
;
1044 mhe
->mhe_MemHeader
.mh_Lower
= memory
;
1046 else if ((memory
+ size
) > mhe
->mhe_MemHeader
.mh_Upper
)
1048 if (memory
<= mhe
->mhe_MemHeader
.mh_Upper
)
1049 mhe
->mhe_MemHeader
.mh_Free
+= ((memory
+ size
) - mhe
->mhe_MemHeader
.mh_Upper
);
1051 mhe
->mhe_MemHeader
.mh_Free
+= size
;
1052 mhe
->mhe_MemHeader
.mh_Upper
= memory
+ size
;
1055 /* Add the initialized memory */
1056 tlsf_freevec(mhe
, b
->mem
);
1060 void tlsf_add_memory_and_merge(struct MemHeaderExt
*mhe
, void *memory
, IPTR size
)
1062 tlsf_add_memory(mhe
, memory
, size
);
1063 // TODO: add memory and merge...
1067 void bzero(void *ptr
, IPTR len
)
1069 UBYTE
*p
= (UBYTE
*)ptr
;
1076 void * tlsf_init(struct MemHeaderExt
* mhe
)
1078 tlsf_t
*tlsf
= NULL
;
1079 void * ptr
= mhe
->mhe_MemHeader
.mh_Lower
;
1081 /* if MemHeaderExt starts at the beginning of handled memory, advance the ptr */
1083 ptr
+= ROUNDUP(sizeof(struct MemHeaderExt
));
1085 /* Is there enough room for tlsf in the mem header itself? */
1086 if (mhe
->mhe_MemHeader
.mh_Free
>= (ROUNDUP(sizeof(tlsf_t
)) + 3 * ROUNDUP(sizeof(bhdr_t
))))
1088 /* tlsf will be stored inside handled memory */
1089 tlsf
= (tlsf_t
*)ptr
;
1091 ptr
+= ROUNDUP(sizeof(tlsf_t
));
1093 bzero(tlsf
, sizeof(tlsf_t
));
1094 tlsf
->autodestroy_self
= 0;
1098 /* No place for tlsf header in MemHeaderExt? Allocate it separately */
1099 tlsf
= AllocMem(sizeof(tlsf_t
), MEMF_ANY
);
1103 bzero(tlsf
, sizeof(tlsf_t
));
1104 tlsf
->autodestroy_self
= 1;
1108 /* Store the tlsf pointer in UserData field */
1109 mhe
->mhe_UserData
= tlsf
;
1111 if (tlsf
&& ptr
< mhe
->mhe_MemHeader
.mh_Upper
)
1113 tlsf_add_memory(mhe
, ptr
, (IPTR
)mhe
->mhe_MemHeader
.mh_Upper
- (IPTR
)ptr
);
1119 static void * tlsf_init_autogrow(struct MemHeaderExt
* mhe
, IPTR puddle_size
, ULONG requirements
, autogrow_get grow_function
, autogrow_release release_function
, APTR autogrow_data
)
1121 tlsf_t
*tlsf
= tlsf_init(mhe
);
1125 if (puddle_size
< 4096)
1128 tlsf
->autogrow_puddle_size
= puddle_size
;
1129 tlsf
->autogrow_requirements
= requirements
;
1130 tlsf
->autogrow_data
= autogrow_data
;
1131 tlsf
->autogrow_get_fn
= grow_function
;
1132 tlsf
->autogrow_release_fn
= release_function
;
1138 void tlsf_destroy(struct MemHeaderExt
* mhe
)
1140 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
1142 D(nbug("[Kernel:TLSF] %s(%p)\n", __PRETTY_FUNCTION__
, tlsf
));
1146 tlsf_area_t
*area
= tlsf
->memory_area
;
1148 if (tlsf
->autogrow_release_fn
)
1152 tlsf_area_t
*next
= area
->next
;
1155 Autogrown area? Release it here.
1156 Otherwise it's the responsibility of add_memory_area caller
1158 if (area
->autogrown
)
1159 tlsf_release_memory_area(mhe
, area
);
1165 if (tlsf
->autodestroy_self
)
1166 FreeMem(tlsf
, sizeof(tlsf_t
));
1170 IPTR
tlsf_avail(struct MemHeaderExt
* mhe
, ULONG requirements
)
1172 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
1175 if (requirements
& MEMF_TOTAL
)
1176 ret
= tlsf
->total_size
;
1177 else if (requirements
& MEMF_LARGEST
)
1183 int fl
= MS(tlsf
->flbitmap
);
1185 if (tlsf
->slbitmap
[fl
])
1187 int sl
= MS(tlsf
->slbitmap
[fl
]);
1189 b
= tlsf
->matrix
[fl
][sl
];
1195 if (GET_SIZE(b
) > ret
)
1198 b
= b
->free_node
.next
;
1202 ret
= tlsf
->free_size
;
1207 BOOL
tlsf_in_bounds(struct MemHeaderExt
* mhe
, void * begin
, void * end
)
1209 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
1212 area
= tlsf
->memory_area
;
1214 D(nbug("[Kernel:TLSF] %s(%p, %p, %p)\n", __PRETTY_FUNCTION__
, tlsf
, begin
, end
));
1218 D(nbug("[Kernel:TLSF] %s: area %p\n", __PRETTY_FUNCTION__
));
1220 * Do checks only if questioned memory ends before the end (sentinel bhdr)
1223 if ((IPTR
)end
<= (IPTR
)area
->end
)
1225 D(nbug("[Kernel:TLSF] %s end <= area->end (%p <= %p)\n", __PRETTY_FUNCTION__
, end
, area
->end
));
1227 /* Get the bhdr of this area */
1228 bhdr_t
*b
= MEM_TO_BHDR(area
);
1230 /* Forward to the begin of the memory */
1231 b
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
1233 /* requested memory starts at begin or after begin of the area */
1234 if ((IPTR
)begin
>= (IPTR
)b
->mem
)
1236 D(nbug("[Kernel:TLSF] %s begin >= b->mem (%p >= %p)\n", __PRETTY_FUNCTION__
, begin
, b
->mem
));
1248 static void destroy_Pool(struct MemHeaderExt
*mhe
)
1253 static APTR
fetch_more_ram(void * data
, IPTR
*size
)
1255 struct MemHeaderExt
*mhe
= (struct MemHeaderExt
*)data
;
1256 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
1258 D(nbug("[Kernel:TLSF] %s(%p, %u)\n", __PRETTY_FUNCTION__
, mhe
, *size
));
1260 APTR ptr
= AllocMem(*size
, tlsf
->autogrow_requirements
);
1264 static VOID
release_ram(void * data
, APTR ptr
, IPTR size
)
1266 D(nbug("[Kernel:TLSF] %s(%p, %u)\n", __PRETTY_FUNCTION__
, ptr
, size
));
1271 static void * init_Pool(struct MemHeaderExt
*mhe
, IPTR puddleSize
, IPTR initialSize
)
1273 return tlsf_init_autogrow(mhe
, puddleSize
, (ULONG
)(IPTR
)mhe
->mhe_MemHeader
.mh_First
, fetch_more_ram
, release_ram
, mhe
);
1276 void krnCreateTLSFMemHeader(CONST_STRPTR name
, BYTE pri
, APTR start
, IPTR size
, ULONG flags
)
1278 /* If the end is less than (1 << 31), MEMF_31BIT is implied */
1279 if (((IPTR
)start
+size
) < (1UL << 31))
1280 flags
|= MEMF_31BIT
;
1282 flags
&= ~MEMF_31BIT
;
1284 flags
|= MEMF_MANAGED
;
1286 struct MemHeaderExt
*mhe
= start
;
1288 mhe
->mhe_Magic
= MEMHEADER_EXT_MAGIC
;
1290 mhe
->mhe_DestroyPool
= destroy_Pool
;
1291 mhe
->mhe_InitPool
= init_Pool
;
1293 mhe
->mhe_Alloc
= tlsf_malloc
;
1294 mhe
->mhe_AllocVec
= tlsf_malloc
;
1295 mhe
->mhe_AllocAligned
= tlsf_malloc_aligned
;
1296 mhe
->mhe_AllocVecAligned
=tlsf_malloc_aligned
;
1297 mhe
->mhe_Free
= tlsf_freemem
;
1298 mhe
->mhe_FreeVec
= tlsf_freevec
;
1299 mhe
->mhe_AllocAbs
= tlsf_allocabs
;
1300 mhe
->mhe_ReAlloc
= tlsf_realloc
;
1301 mhe
->mhe_Avail
= tlsf_avail
;
1302 mhe
->mhe_InBounds
= tlsf_in_bounds
;
1304 mhe
->mhe_MemHeader
.mh_Node
.ln_Succ
= NULL
;
1305 mhe
->mhe_MemHeader
.mh_Node
.ln_Pred
= NULL
;
1306 mhe
->mhe_MemHeader
.mh_Node
.ln_Type
= NT_MEMORY
;
1307 mhe
->mhe_MemHeader
.mh_Node
.ln_Name
= (STRPTR
)name
;
1308 mhe
->mhe_MemHeader
.mh_Node
.ln_Pri
= pri
;
1309 mhe
->mhe_MemHeader
.mh_Attributes
= flags
;
1310 /* The first MemChunk needs to be aligned. We do it by adding MEMHEADER_TOTAL. */
1311 mhe
->mhe_MemHeader
.mh_First
= NULL
;
1313 mhe
->mhe_UserData
= NULL
;
1316 * mh_Lower and mh_Upper are informational only. Since our MemHeader resides
1317 * inside the region it describes, the region includes MemHeader.
1319 mhe
->mhe_MemHeader
.mh_Lower
= start
;
1320 mhe
->mhe_MemHeader
.mh_Upper
= start
+ size
;
1321 mhe
->mhe_MemHeader
.mh_Free
= size
;
1323 D(nbug("[Kernel:TLSF] %s: 0x%p -> 0x%p\n", __PRETTY_FUNCTION__
, mhe
->mhe_MemHeader
.mh_Lower
, mhe
->mhe_MemHeader
.mh_Upper
));
1328 struct MemHeader
* krnConvertMemHeaderToTLSF(struct MemHeader
* source
)
1330 struct MemChunk
* mc
= source
->mh_First
->mc_Next
;
1331 APTR mh
= source
->mh_First
;
1332 IPTR fsize
= source
->mh_First
->mc_Bytes
;
1334 if (source
->mh_Attributes
& MEMF_MANAGED
)
1337 /* First chunk will host the mem header */
1338 krnCreateTLSFMemHeader(source
->mh_Node
.ln_Name
, source
->mh_Node
.ln_Pri
, mh
, fsize
,
1339 source
->mh_Attributes
);
1340 /* source->mh_First is destroyed beyond this point */
1342 /* Add remaining chunks */
1345 APTR p
= mc
->mc_Next
;
1346 tlsf_add_memory(mh
, mc
, mc
->mc_Bytes
);
1347 /* mc is destroyed beyond this point */