A bit number was mistakenly used instead of a flag when setting notification
[AROS.git] / rom / kernel / tlsf.c
blob3052ba321ec9f4a35c657a7e5a4d6e54d8644ae3
1 /*
2 Copyright © 1995-2014, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <exec/types.h>
7 #include <exec/memory.h>
8 #include <exec/memheaderext.h>
9 #include <proto/exec.h>
10 #include <string.h>
12 #include "tlsf.h"
13 #include "kernel_base.h"
14 #include "kernel_debug.h"
16 #define D(x)
18 #undef USE_MACROS
20 #include <stddef.h>
22 * Minimal alignment as required by AROS. In contrary to the default
23 * TLSF implementation, we do not allow smaller blocks here.
24 * Size needs to be aligned to at least 8, see THIS_FREE_MASK comment.
26 #define SIZE_ALIGN AROS_WORSTALIGN
29 * Settings for TLSF allocator:
30 * MAX_LOG2_SLI - amount of bits used for the second level list
31 * MAX_FLI - maximal allowable allocation size - 2^32 should be enough on 32bit systems
32 * 64bit systems use 128GB limit.
34 #define MAX_LOG2_SLI (5)
35 #define MAX_SLI (1 << MAX_LOG2_SLI)
36 #if __WORDSIZE == 64
37 #define MAX_FLI (32+5)
38 #else
39 #define MAX_FLI (32)
40 #endif
41 #define FLI_OFFSET (6)
42 #define SMALL_BLOCK (2 << FLI_OFFSET)
44 #define REAL_FLI (MAX_FLI - FLI_OFFSET)
46 #define ROUNDUP(x) (((x) + SIZE_ALIGN - 1) & ~(SIZE_ALIGN - 1))
47 #define ROUNDDOWN(x) ((x) & ~(SIZE_ALIGN - 1))
49 /* Fields used in the block header length field to identify busy/free blocks */
50 #define THIS_FREE_MASK (IPTR)1
51 #define THIS_FREE (IPTR)1
52 #define THIS_BUSY (IPTR)0
54 #define PREV_FREE_MASK (IPTR)2
55 #define PREV_FREE (IPTR)2
56 #define PREV_BUSY (IPTR)0
58 #define SIZE_MASK (~(THIS_FREE_MASK | PREV_FREE_MASK))
60 #define likely(x) __builtin_expect(!!(x), 1)
61 #define unlikely(x) __builtin_expect(!!(x), 0)
63 /* Size of additional memory needed to manage new block */
64 #define HEADERS_SIZE (((3 * ROUNDUP(sizeof(hdr_t))) + ROUNDUP(sizeof(tlsf_area_t))))
66 /* free node links together all free blocks if similar size */
67 typedef struct free_node_s {
68 struct bhdr_s * prev;
69 struct bhdr_s * next;
70 } free_node_t;
72 /* block header in front of each block - both free and busy */
73 typedef struct hdr_s {
74 struct bhdr_s * prev;
75 IPTR length;
76 } hdr_t;
79 * Each block is defined by bhdr_t structure. Free blocks contain only
80 * the header which allows us to go through all memory blocks in the system.
81 * The free blocks contain additionally the node which chains them in one
82 * of the free block lists
84 typedef struct bhdr_s {
85 union {
86 hdr_t header;
87 UBYTE __min_align[SIZE_ALIGN];
89 union {
90 UBYTE mem[1];
91 free_node_t free_node;
93 } bhdr_t;
95 /* Memory area within the TLSF pool */
96 typedef struct tlsf_area_s {
97 struct tlsf_area_s * next; // Next memory area
98 bhdr_t * end; // Pointer to "end-of-area" block header
99 LONG autogrown; // Automatically allocated by TLSF pool
100 } tlsf_area_t;
102 typedef struct {
103 tlsf_area_t * memory_area;
105 IPTR total_size;
106 IPTR free_size;
108 ULONG flbitmap;
109 ULONG slbitmap[REAL_FLI];
111 IPTR autogrow_puddle_size;
112 ULONG autogrow_requirements;
113 APTR autogrow_data;
114 autogrow_get autogrow_get_fn;
115 autogrow_release autogrow_release_fn;
117 UBYTE autodestroy_self;
119 bhdr_t * matrix[REAL_FLI][MAX_SLI];
120 } tlsf_t;
122 static inline __attribute__((always_inline)) int LS(IPTR i)
124 if (sizeof(IPTR) == 4)
125 return __builtin_ffs(i) - 1;
126 else
127 return __builtin_ffsl(i) - 1;
130 static inline __attribute__((always_inline)) int MS(IPTR i)
132 if (sizeof(IPTR) == 4)
133 return 31 - __builtin_clz(i);
134 else
135 return 63 - __builtin_clzl(i);
138 static inline __attribute__((always_inline)) void SetBit(int nr, ULONG *ptr)
140 ptr[nr >> 5] |= (1 << (nr & 31));
143 static inline __attribute__((always_inline)) void ClrBit(int nr, ULONG *ptr)
145 ptr[nr >> 5] &= ~(1 << (nr & 31));
148 static inline __attribute__((always_inline)) void MAPPING_INSERT(IPTR r, int *fl, int *sl)
150 if (r < SMALL_BLOCK)
152 *fl = 0;
153 *sl = (int)(r / (SMALL_BLOCK / MAX_SLI));
155 else
157 *fl = MS(r);
158 *sl = (int)(((IPTR)r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI);
159 *fl -= FLI_OFFSET;
163 static inline __attribute__((always_inline)) void MAPPING_SEARCH(IPTR *r, int *fl, int *sl)
165 if (*r < SMALL_BLOCK)
167 *fl = 0;
168 *sl = (int)(*r / (SMALL_BLOCK / MAX_SLI));
170 else
172 IPTR tmp = ((IPTR)1 << (MS(*r) - MAX_LOG2_SLI)) - 1;
173 IPTR tr = *r + tmp;
175 *fl = MS(tr);
176 *sl = (int)(((IPTR)tr >> (*fl - MAX_LOG2_SLI)) - MAX_SLI);
177 *fl -= FLI_OFFSET;
178 *r = tr & ~tmp;
182 static inline __attribute__((always_inline)) bhdr_t * FIND_SUITABLE_BLOCK(tlsf_t *tlsf, int *fl, int *sl)
184 IPTR bitmap_tmp = tlsf->slbitmap[*fl] & (~0 << *sl);
185 bhdr_t *b = NULL;
187 if (bitmap_tmp)
189 *sl = LS(bitmap_tmp);
190 b = tlsf->matrix[*fl][*sl];
192 else
194 bitmap_tmp = tlsf->flbitmap & (~0 << (*fl + 1));
195 if (likely(bitmap_tmp != 0))
197 *fl = LS(bitmap_tmp);
198 *sl = LS(tlsf->slbitmap[*fl]);
199 b = tlsf->matrix[*fl][*sl];
203 return b;
207 #ifdef USE_MACROS
209 #define GET_SIZE(b) ({ IPTR size = b->header.length & SIZE_MASK; size; })
210 #define GET_FLAGS(b) ({ IPTR flags = b->header.length & (THIS_FREE_MASK | PREV_FREE_MASK); flags; })
211 #define SET_SIZE(b, size) do{ b->header.length = GET_FLAGS(b) | (size); }while(0)
212 #define SET_FLAGS(b, flags) do{ b->header.length = GET_SIZE(b) | (flags); }while(0)
213 #define SET_SIZE_AND_FLAGS(b, size, flags) do{b->header.length = (size) | (flags);}while(0)
214 #define FREE_BLOCK(b) ((b->header.length & THIS_FREE_MASK) == THIS_FREE)
215 #define SET_FREE_BLOCK(b) do{b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_FREE;}while(0)
216 #define SET_BUSY_BLOCK(b) do{b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_BUSY;}while(0)
217 #define SET_FREE_PREV_BLOCK(b) do{b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_FREE;}while(0)
218 #define SET_BUSY_PREV_BLOCK(b) do{b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_BUSY;}while(0)
219 #define FREE_PREV_BLOCK(b) ((b->header.length & PREV_FREE_MASK) == PREV_FREE)
220 #define GET_NEXT_BHDR(hdr, size) ({ bhdr_t * __b = (bhdr_t *)((UBYTE *)&hdr->mem[0] + (size)); __b; })
221 #define MEM_TO_BHDR(ptr) ({ bhdr_t * b = (bhdr_t*)((void*)(ptr) - offsetof(bhdr_t, mem)); b; })
223 #define REMOVE_HEADER(tlsf, b, fl, sl) do{ \
224 if (b->free_node.next) \
225 b->free_node.next->free_node.prev = b->free_node.prev; \
226 if (b->free_node.prev) \
227 b->free_node.prev->free_node.next = b->free_node.next; \
228 if (tlsf->matrix[fl][sl] == b) { \
229 tlsf->matrix[fl][sl] = b->free_node.next; \
230 if (!tlsf->matrix[fl][sl]) \
231 ClrBit(sl, &tlsf->slbitmap[fl]); \
232 if (!tlsf->slbitmap[fl]) \
233 ClrBit(fl, &tlsf->flbitmap); \
234 } } while(0)
236 #define INSERT_FREE_BLOCK(tlsf, b) do { \
237 int fl, sl; MAPPING_INSERT(GET_SIZE(b), &fl, &sl); \
238 b->free_node.prev = NULL; \
239 b->free_node.next = tlsf->matrix[fl][sl]; \
240 if (tlsf->matrix[fl][sl]) \
241 tlsf->matrix[fl][sl]->free_node.prev = b; \
242 tlsf->matrix[fl][sl] = b; \
243 SetBit(fl, &tlsf->flbitmap); \
244 SetBit(sl, &tlsf->slbitmap[fl]); }while(0)
246 #else
248 static inline __attribute__((always_inline)) IPTR GET_SIZE(bhdr_t *b)
250 return b->header.length & SIZE_MASK;
253 static inline __attribute__((always_inline)) IPTR GET_FLAGS(bhdr_t *b)
255 return b->header.length & (THIS_FREE_MASK | PREV_FREE_MASK);
258 static inline __attribute__((always_inline)) void SET_SIZE(bhdr_t *b, IPTR size)
260 b->header.length = GET_FLAGS(b) | size;
263 static inline __attribute__((always_inline)) void SET_SIZE_AND_FLAGS(bhdr_t *b, IPTR size, IPTR flags)
265 b->header.length = size | flags;
268 static inline __attribute__((always_inline)) int FREE_BLOCK(bhdr_t *b)
270 return ((b->header.length & THIS_FREE_MASK) == THIS_FREE);
273 static inline __attribute__((always_inline)) void SET_FREE_BLOCK(bhdr_t *b)
275 b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_FREE;
278 static inline __attribute__((always_inline)) void SET_BUSY_BLOCK(bhdr_t *b)
280 b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_BUSY;
283 static inline __attribute__((always_inline)) void SET_FREE_PREV_BLOCK(bhdr_t *b)
285 b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_FREE;
288 static inline __attribute__((always_inline)) void SET_BUSY_PREV_BLOCK(bhdr_t *b)
290 b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_BUSY;
293 static inline __attribute__((always_inline)) int FREE_PREV_BLOCK(bhdr_t *b)
295 return ((b->header.length & PREV_FREE_MASK) == PREV_FREE);
298 static inline __attribute__((always_inline)) bhdr_t * GET_NEXT_BHDR(bhdr_t *hdr, IPTR size)
300 return (bhdr_t *)((UBYTE *)&hdr->mem[0] + size);
303 static inline __attribute__((always_inline)) bhdr_t * MEM_TO_BHDR(void *ptr)
305 return (bhdr_t *)(ptr - offsetof(bhdr_t, mem));
308 static inline __attribute__((always_inline)) void REMOVE_HEADER(tlsf_t *tlsf, bhdr_t *b, int fl, int sl)
310 if (b->free_node.next)
311 b->free_node.next->free_node.prev = b->free_node.prev;
312 if (b->free_node.prev)
313 b->free_node.prev->free_node.next = b->free_node.next;
315 if (tlsf->matrix[fl][sl] == b)
317 tlsf->matrix[fl][sl] = b->free_node.next;
318 if (!tlsf->matrix[fl][sl])
319 ClrBit(sl, &tlsf->slbitmap[fl]);
320 if (!tlsf->slbitmap[fl])
321 ClrBit(fl, &tlsf->flbitmap);
325 static inline __attribute__((always_inline)) void INSERT_FREE_BLOCK(tlsf_t *tlsf, bhdr_t *b)
327 int fl, sl;
329 MAPPING_INSERT(GET_SIZE(b), &fl, &sl);
331 b->free_node.prev = NULL;
332 b->free_node.next = tlsf->matrix[fl][sl];
334 if (tlsf->matrix[fl][sl])
335 tlsf->matrix[fl][sl]->free_node.prev = b;
337 tlsf->matrix[fl][sl] = b;
339 SetBit(fl, &tlsf->flbitmap);
340 SetBit(sl, &tlsf->slbitmap[fl]);
343 #endif /* USE_MACROS */
345 void * tlsf_malloc(struct MemHeaderExt *mhe, IPTR size, ULONG *flags)
347 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
348 int fl, sl;
349 bhdr_t *b = NULL;
351 size = ROUNDUP(size);
353 if (unlikely(!size)) return NULL;
355 D(nbug("[Kernel:TLSF] %s(%p, %ld)\n", __PRETTY_FUNCTION__, tlsf, size));
357 if (((ULONG)(IPTR)mhe->mhe_MemHeader.mh_First) & MEMF_SEM_PROTECTED)
358 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
360 /* Find the indices fl and sl for given size */
361 MAPPING_SEARCH(&size, &fl, &sl);
363 /* Find block of either the right size or larger */
364 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
366 D(nbug("[Kernel:TLSF] %s: adjusted size %ld\n", __PRETTY_FUNCTION__, size));
368 /* No block found? Either failure or tlsf will get more memory. */
369 if (unlikely(!b))
371 D(nbug("[Kernel:TLSF] %s: out of memory\n", __PRETTY_FUNCTION__));
373 /* Do we have the autogrow feature? */
374 if (tlsf->autogrow_get_fn)
376 /* Increase the size of requested block so that we can fit the headers too */
377 IPTR sz = size + HEADERS_SIZE;
379 /* Requested size less than puddle size? Get puddle size then */
380 if (sz < tlsf->autogrow_puddle_size)
381 sz = tlsf->autogrow_puddle_size;
383 D(nbug("[Kernel:TLSF] %s: querying for %u bytes\n", __PRETTY_FUNCTION__, sz));
385 /* Try to get some memory */
386 void * ptr = tlsf->autogrow_get_fn(tlsf->autogrow_data, &sz);
388 /* Got it? Add to tlsf then */
389 if (ptr)
391 tlsf_add_memory(mhe, ptr, sz);
393 /* We know the newly added memory is first in the list. Set the autogrown feature there */
394 tlsf->memory_area->autogrown = 1;
396 /* Memory is there. Try to find the block again */
397 MAPPING_SEARCH(&size, &fl, &sl);
398 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
402 /* No block? FAILURE! */
403 if (!b)
405 if (((ULONG)(IPTR)mhe->mhe_MemHeader.mh_First) & MEMF_SEM_PROTECTED)
406 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
408 return NULL;
412 /* Next header */
413 bhdr_t *next = GET_NEXT_BHDR(b, GET_SIZE(b));
415 /* Remove the found block from the free list */
416 REMOVE_HEADER(tlsf, b, fl, sl);
418 /* Is this block larger then requested? Try to split it then */
419 if (likely(GET_SIZE(b) > (size + ROUNDUP(sizeof(hdr_t)))))
421 /* New split block */
422 bhdr_t *sb = GET_NEXT_BHDR(b, size);
423 sb->header.prev = b;
425 /* Set size, this free and previous busy */
426 SET_SIZE_AND_FLAGS(sb, GET_SIZE(b) - size - ROUNDUP(sizeof(hdr_t)), THIS_FREE | PREV_BUSY);
428 /* The next header points to free block now */
429 next->header.prev = sb;
431 /* previous block (sb) is free */
432 SET_FREE_PREV_BLOCK(next);
434 /* Allocated block size truncated */
435 SET_SIZE(b, size);
437 D(nbug("[Kernel:TLSF] %s: block split, %ld bytes remaining\n", __PRETTY_FUNCTION__, GET_SIZE(sb)));
438 /* Free block is inserted to free list */
439 INSERT_FREE_BLOCK(tlsf, sb);
441 else
443 /* The block was of right size. Set it just busy in next pointer */
444 SET_BUSY_PREV_BLOCK(next);
447 /* The allocated block is busy */
448 SET_BUSY_BLOCK(b);
450 /* Clear the pointers just in case */
451 b->free_node.next = NULL;
452 b->free_node.prev = NULL;
454 /* Update counters */
455 tlsf->free_size -= GET_SIZE(b);
456 mhe->mhe_MemHeader.mh_Free = tlsf->free_size;
458 if (((ULONG)(IPTR)mhe->mhe_MemHeader.mh_First) & MEMF_SEM_PROTECTED)
459 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
461 if (flags && (*flags & MEMF_CLEAR))
462 bzero(&b->mem[0], size);
464 /* And return memory */
465 return &b->mem[0];
468 static inline __attribute__((always_inline)) void MERGE(bhdr_t *b1, bhdr_t *b2)
470 /* Merging adjusts the size - it's sum of both sizes plus size of block header */
471 SET_SIZE(b1, GET_SIZE(b1) + GET_SIZE(b2) + ROUNDUP(sizeof(hdr_t)));
474 static inline __attribute__((always_inline)) bhdr_t * MERGE_PREV(tlsf_t *tlsf, bhdr_t *block)
476 /* Is previous block free? */
477 if (FREE_PREV_BLOCK(block))
479 int fl, sl;
480 bhdr_t *prev = block->header.prev;
482 /* Calculate index for removal */
483 MAPPING_INSERT(GET_SIZE(prev), &fl, &sl);
485 /* Do remove the header from the list */
486 REMOVE_HEADER(tlsf, prev, fl, sl);
488 /* Merge */
489 MERGE(prev, block);
491 return prev;
493 else
494 return block;
497 static inline __attribute__((always_inline)) bhdr_t * MERGE_NEXT(tlsf_t *tlsf, bhdr_t *block)
499 bhdr_t *next = GET_NEXT_BHDR(block, GET_SIZE(block));
501 /* Is next block free? */
502 if (FREE_BLOCK(next))
504 int fl, sl;
506 /* Calculate index for removal */
507 MAPPING_INSERT(GET_SIZE(next), &fl, &sl);
509 /* Remove the header from the list */
510 REMOVE_HEADER(tlsf, next, fl, sl);
512 /* merge blocks */
513 MERGE(block, next);
516 return block;
519 static void tlsf_release_memory_area(struct MemHeaderExt * mhe, tlsf_area_t * area)
521 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
522 tlsf_area_t *p = (tlsf_area_t *)(&tlsf->memory_area - offsetof(tlsf_area_t, next));
523 bhdr_t *b;
524 void *begin;
525 void *end;
526 IPTR size;
528 /* get the begin of this area */
529 begin = MEM_TO_BHDR(area);
531 /* get sentinel block */
532 b = area->end;
534 /* end of this area is end of sentinel block */
535 end = GET_NEXT_BHDR(b, 0);
537 /* calculate the size of area */
538 size = (IPTR)end - (IPTR)begin;
540 /* update counters */
541 tlsf->total_size -= size;
542 tlsf->free_size -= GET_SIZE(area->end->header.prev);
544 /* remove area from list */
545 for (;p->next != NULL; p = p->next)
546 if (p->next == area)
548 p->next = area->next;
549 break;
552 /* release */
553 if (tlsf->autogrow_release_fn)
554 tlsf->autogrow_release_fn(tlsf->autogrow_data, begin, size);
557 void * tlsf_malloc_aligned(struct MemHeaderExt *mhe, IPTR size, IPTR align, ULONG *flags)
559 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
560 void * ptr;
561 bhdr_t *b;
563 if (((ULONG)(IPTR)mhe->mhe_MemHeader.mh_First) & MEMF_SEM_PROTECTED)
564 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
566 size = ROUNDUP(size);
568 D(nbug("[Kernel:TLSF] %s(%p, %lx, %u)\n", __PRETTY_FUNCTION__, mhe, size, align));
570 /* Adjust align to the top nearest power of two */
571 align = 1 << MS(align);
573 D(nbug("[Kernel:TLSF] %s: adjusted align = %u\n", __PRETTY_FUNCTION__, align));
575 ptr = tlsf_malloc(mhe, size+align, flags);
577 if (!ptr)
579 if (((ULONG)(IPTR)mhe->mhe_MemHeader.mh_First) & MEMF_SEM_PROTECTED)
580 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
582 return ptr;
585 b = MEM_TO_BHDR(ptr);
587 D(nbug("[Kernel:TLSF] %s: allocated region @%p\n", __PRETTY_FUNCTION__, ptr));
589 if (align > SIZE_ALIGN)
591 void *aligned_ptr = (void *)(((IPTR)ptr + align - 1) & ~(align - 1));
592 bhdr_t *aligned_bhdr = MEM_TO_BHDR(aligned_ptr);
593 IPTR diff_begin = (IPTR)aligned_bhdr - (IPTR)b;
594 IPTR diff_end = (IPTR)GET_NEXT_BHDR(b, GET_SIZE(b)) - (IPTR)GET_NEXT_BHDR(aligned_bhdr, size);
596 SET_SIZE(aligned_bhdr, size);
598 if (aligned_ptr != ptr)
600 D(nbug("[Kernel:TLSF] %s: aligned ptr: %p\n", __PRETTY_FUNCTION__, aligned_ptr));
601 D(nbug("[Kernel:TLSF] %s: difference begin: %d\n", __PRETTY_FUNCTION__, diff_begin));
602 D(nbug("[Kernel:TLSF] %s: difference end: %d\n", __PRETTY_FUNCTION__, diff_end));
604 if (diff_begin > 0)
606 SET_SIZE(b, diff_begin - ROUNDUP(sizeof(hdr_t)));
608 tlsf->free_size += GET_SIZE(b);
610 aligned_bhdr->header.prev = b;
611 SET_FREE_PREV_BLOCK(aligned_bhdr);
612 SET_FREE_BLOCK(b);
614 b = MERGE_PREV(tlsf, b);
616 D(nbug("[Kernel:TLSF] %s: block @%p, b->next %p\n", __PRETTY_FUNCTION__, b, GET_NEXT_BHDR(b, GET_SIZE(b))));
618 /* Insert free block into the proper list */
619 INSERT_FREE_BLOCK(tlsf, b);
622 ptr = &aligned_bhdr->mem[0];
625 if (diff_end > 0)
627 bhdr_t *b1 = GET_NEXT_BHDR(aligned_bhdr, GET_SIZE(aligned_bhdr));
628 bhdr_t *next;
630 b1->header.prev = aligned_bhdr;
632 SET_SIZE(b1, diff_end - ROUNDUP(sizeof(hdr_t)));
633 SET_BUSY_PREV_BLOCK(b1);
634 SET_FREE_BLOCK(b1);
636 next = GET_NEXT_BHDR(b1, GET_SIZE(b1));
637 next->header.prev = b1;
638 SET_FREE_PREV_BLOCK(next);
640 b1 = MERGE_NEXT(tlsf, b1);
642 INSERT_FREE_BLOCK(tlsf, b1);
647 bhdr_t *b2 = b;
648 while(b2 && GET_SIZE(b2))
650 nbug("[Kernel:TLSF] %s: bhdr %p, mem %p, size=%08x, flags=%x, prev=%p\n",
651 __PRETTY_FUNCTION__, b2, &b2->mem[0], GET_SIZE(b2), GET_FLAGS(b2), b2->header.prev);
653 b2 = GET_NEXT_BHDR(b2, GET_SIZE(b2));
657 if (((ULONG)(IPTR)mhe->mhe_MemHeader.mh_First) & MEMF_SEM_PROTECTED)
658 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
660 return ptr;
664 void tlsf_freevec(struct MemHeaderExt * mhe, APTR ptr)
666 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
667 bhdr_t *fb;
668 bhdr_t *next;
669 tlsf_area_t * area;
671 if (unlikely(!ptr))
672 return;
674 fb = MEM_TO_BHDR(ptr);
676 if (((ULONG)(IPTR)mhe->mhe_MemHeader.mh_First) & MEMF_SEM_PROTECTED)
677 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
679 /* Mark block as free */
680 SET_FREE_BLOCK(fb);
682 /* adjust free size field on tlsf */
683 tlsf->free_size += GET_SIZE(fb);
685 /* Try to merge with previous and next blocks (if free) */
686 fb = MERGE_PREV(tlsf, fb);
687 fb = MERGE_NEXT(tlsf, fb);
689 /* Tell next block that previous one is free. Also update the prev link in case it changed */
690 next = GET_NEXT_BHDR(fb, GET_SIZE(fb));
691 SET_FREE_PREV_BLOCK(next);
692 next->header.prev = fb;
694 /* Check if this was the last used block of an autogrown area */
695 area = fb->header.prev->header.prev == NULL ? (tlsf_area_t *)fb->header.prev->mem : NULL;
696 if (area != NULL && area->end == next && area->autogrown == 1)
697 tlsf_release_memory_area(mhe, area);
698 else
700 /* Insert free block into the proper list */
701 INSERT_FREE_BLOCK(tlsf, fb);
704 if (((ULONG)(IPTR)mhe->mhe_MemHeader.mh_First) & MEMF_SEM_PROTECTED)
705 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
708 void tlsf_freemem(struct MemHeaderExt * mhe, APTR ptr, IPTR size)
710 (void)size;
711 tlsf_freevec(mhe, ptr);
714 void * tlsf_realloc(struct MemHeaderExt *mhe, APTR ptr, IPTR new_size)
716 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
717 bhdr_t *b;
718 bhdr_t *bnext;
719 int fl;
720 int sl;
722 /* NULL pointer? just allocate the memory */
723 if (unlikely(!ptr))
724 return tlsf_malloc(mhe, new_size, NULL);
726 /* size = 0? free memory */
727 if (unlikely(!new_size))
729 tlsf_freevec(mhe, ptr);
730 return NULL;
733 new_size = ROUNDUP(new_size);
735 b = MEM_TO_BHDR(ptr);
737 if (unlikely(new_size == GET_SIZE(b)))
738 return ptr;
740 if (((ULONG)(IPTR)mhe->mhe_MemHeader.mh_First) & MEMF_SEM_PROTECTED)
741 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
743 bnext = GET_NEXT_BHDR(b, GET_SIZE(b));
745 /* Is new size smaller than the previous one? Try to split the block if this is the case */
746 if (new_size <= (GET_SIZE(b)))
748 /* New header starts right after the current block b */
749 bhdr_t * b1 = GET_NEXT_BHDR(b, new_size);
751 /* Update pointer and size */
752 b1->header.prev = b;
753 SET_SIZE_AND_FLAGS(b1, GET_SIZE(b) - new_size - ROUNDUP(sizeof(hdr_t)), THIS_FREE | PREV_BUSY);
755 /* Current block gets smaller */
756 SET_SIZE(b, new_size);
758 tlsf->free_size += GET_SIZE(b1);
760 /* Try to merge with next block */
761 b1 = MERGE_NEXT(tlsf, b1);
763 /* Tell next block that previous one is free. Also update the prev link in case it changed */
764 bnext = GET_NEXT_BHDR(b1, GET_SIZE(b1));
765 SET_FREE_PREV_BLOCK(bnext);
766 bnext->header.prev = b1;
768 /* Insert free block into the proper list */
769 INSERT_FREE_BLOCK(tlsf, b1);
771 else
773 /* Is next block free? Is there enough free space? */
774 if (FREE_BLOCK(bnext) && new_size <= GET_SIZE(b) + GET_SIZE(bnext) + ROUNDUP(sizeof(hdr_t)))
776 bhdr_t *b1;
777 IPTR rest_size = ROUNDUP(sizeof(hdr_t)) + GET_SIZE(bnext) + GET_SIZE(b) - new_size;
779 MAPPING_INSERT(GET_SIZE(bnext), &fl, &sl);
781 REMOVE_HEADER(tlsf, bnext, fl, sl);
783 if (rest_size > ROUNDUP(sizeof(hdr_t)))
785 rest_size -= ROUNDUP(sizeof(hdr_t));
787 SET_SIZE(b, new_size);
789 b1 = GET_NEXT_BHDR(b, GET_SIZE(b));
790 b1->header.prev = b;
792 SET_SIZE_AND_FLAGS(b1, rest_size, THIS_FREE | PREV_BUSY);
794 bnext = GET_NEXT_BHDR(b1, GET_SIZE(b1));
795 bnext->header.prev = b1;
796 SET_FREE_PREV_BLOCK(bnext);
798 INSERT_FREE_BLOCK(tlsf, b1);
800 else
802 if (rest_size)
803 SET_SIZE(b, new_size + ROUNDUP(sizeof(hdr_t)));
804 else
805 SET_SIZE(b, new_size);
807 bnext = GET_NEXT_BHDR(b, GET_SIZE(b));
808 bnext->header.prev = b;
809 SET_BUSY_PREV_BLOCK(bnext);
812 else
814 /* Next block was not free. Create new buffer and copy old contents there */
815 void * p = tlsf_malloc(mhe, new_size, NULL);
816 if (p)
818 CopyMemQuick(ptr, p, GET_SIZE(b));
819 tlsf_freevec(mhe, ptr);
820 b = MEM_TO_BHDR(p);
825 if (((ULONG)(IPTR)mhe->mhe_MemHeader.mh_First) & MEMF_SEM_PROTECTED)
826 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
828 return b->mem;
832 void * tlsf_allocabs(struct MemHeaderExt * mhe, IPTR size, void * ptr)
834 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
835 UBYTE *region_start;
836 UBYTE *region_end;
837 IPTR region_size;
839 int fl, sl;
841 D(nbug("[Kernel:TLSF] %s(%p, %ld)\n", __PRETTY_FUNCTION__, ptr, size));
844 Returned memory needs to meet two requirements:
845 a) requested range is within returned memory (AllocAbs definition)
846 b) returned address is LONG aligned (needed by TLSF implementation)
848 region_start = (UBYTE *)((IPTR)ptr & SIZE_MASK);
849 region_size = (IPTR)ROUNDUP((IPTR)ptr - (IPTR)region_start + size);
851 if (((ULONG)(IPTR)mhe->mhe_MemHeader.mh_First) & MEMF_SEM_PROTECTED)
852 ObtainSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
854 /* Start searching here. It doesn't make sense to go through regions which are smaller */
855 MAPPING_SEARCH(&region_size, &fl, &sl);
857 region_end = region_start + region_size; /* region_size is modified in MAPPING_SEARCH */
859 /* Start looking now :) */
860 for (; fl < MAX_FLI; fl++)
862 for (; sl < MAX_SLI; sl++)
864 bhdr_t *b0 = tlsf->matrix[fl][sl];
866 /* If block was there, check it */
867 while (b0)
869 bhdr_t *b1 = GET_NEXT_BHDR(b0, GET_SIZE(b0));
871 /* The block has to contain _whole_ requested region, may exceed it in size though */
872 if (b0->mem <= region_start && (UBYTE *)b1 >= region_end)
874 /* block header of requested region */
875 bhdr_t *breg = MEM_TO_BHDR(region_start);
878 This is the block we're looking for. Unchain it from the bidirectional list of
879 free blocks now.
881 Previous entry's next will point to this block's next. If previous is NULL, matrix
882 will be set to block's next
884 if (b0->free_node.prev)
885 b0->free_node.prev->free_node.next = b0->free_node.next;
886 else
887 tlsf->matrix[fl][sl] = b0->free_node.next;
890 Next entry's prev will point to this block's previous.
892 if (b0->free_node.next)
893 b0->free_node.next->free_node.prev = b0->free_node.prev;
895 /* Empty SL matrix for size j? Clear bit */
896 if (!tlsf->matrix[fl][sl])
898 ClrBit(sl, &tlsf->slbitmap[fl]);
900 /* Empty entire SL matrix for given FL index? clear that bit too */
901 if (!tlsf->slbitmap[fl])
902 ClrBit(fl, &tlsf->flbitmap);
905 b0->free_node.prev = NULL;
906 b0->free_node.next = NULL;
907 SET_BUSY_BLOCK(b0);
910 At this point the block is removed from free list and marked as used.
911 Now, split it if necessary...
914 /* begin of the block != begin of the block header of requested region? */
915 if (b0 != breg)
918 Adjust region's block header. Mark in size that previous (aka b0) is free.
919 Reduce the size of b0 as well as size of breg too.
921 breg->header.prev = b0;
922 SET_SIZE_AND_FLAGS(breg, GET_SIZE(b0)-((IPTR)breg - (IPTR)b0), PREV_FREE | THIS_BUSY);
924 /* Update the next block. Mark in size that previous (breg) is used */
925 b1->header.prev = breg;
926 SET_BUSY_PREV_BLOCK(b1);
928 /* b0's prev state is keept. b0 itself is marked as free block */
929 SET_FREE_BLOCK(b0);
930 SET_SIZE(b0, (IPTR)breg - (IPTR)b0->mem);
932 /* Insert b0 to free list */
933 MAPPING_INSERT(GET_SIZE(b0), &fl, &sl);
934 INSERT_FREE_BLOCK(tlsf, b0);
937 /* Is it necessary to split the requested region at the end? */
938 if ((SIZE_ALIGN + GET_SIZE(breg)) > region_size)
940 IPTR tmp_size = GET_SIZE(breg) - region_size - SIZE_ALIGN;
942 /* New region header directly at end of the requested region */
943 bhdr_t *b2 = GET_NEXT_BHDR(breg, region_size);
945 /* Adjust fields */
946 b2->header.prev = breg;
947 SET_SIZE_AND_FLAGS(b2, tmp_size, PREV_BUSY | THIS_FREE);
949 /* requested region's size is now smaller */
950 SET_SIZE(breg, region_size);
952 /* The next block header point to newly created one */
953 b1->header.prev = b2;
954 SET_FREE_PREV_BLOCK(b1);
956 /* Insert newly created block to free list */
957 MAPPING_INSERT(GET_SIZE(b2), &fl, &sl);
958 INSERT_FREE_BLOCK(tlsf, b2);
961 tlsf->free_size -= GET_SIZE(breg);
963 if (((ULONG)(IPTR)mhe->mhe_MemHeader.mh_First) & MEMF_SEM_PROTECTED)
964 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
966 return breg->mem;
969 b0 = b0->free_node.next;
972 /* Iterate through next level */
973 sl = 0;
976 if (((ULONG)(IPTR)mhe->mhe_MemHeader.mh_First) & MEMF_SEM_PROTECTED)
977 ReleaseSemaphore((struct SignalSemaphore *)mhe->mhe_MemHeader.mh_Node.ln_Name);
979 return NULL;
982 /* Allocation of headers in memory:
983 * hdr
984 * header (ROUNDUP(sizeof(hdr_t))
985 * mem (ROUNDUP(sizeof(tlst_area_t))
987 * header (ROUNDUP(sizeof(hdr_t))
988 * free space (size - HEADERS_SIZE)
989 * bend
990 * header (ROUNDUP(sizeof(hdr_t))
992 tlsf_area_t * init_memory_area(void * memory, IPTR size)
994 bhdr_t * hdr = (bhdr_t *)memory;
995 bhdr_t * b;
996 bhdr_t * bend;
998 tlsf_area_t * area;
1000 size = ROUNDDOWN(size);
1002 /* Prepare first header, which protects the tlst_area_t header */
1003 hdr->header.length = ROUNDUP(sizeof(tlsf_area_t)) | THIS_BUSY | PREV_BUSY;
1004 hdr->header.prev = NULL;
1006 b = GET_NEXT_BHDR(hdr, ROUNDUP(sizeof(tlsf_area_t)));
1007 b->header.prev = hdr;
1008 b->header.length = (size - HEADERS_SIZE) | PREV_BUSY | THIS_BUSY;
1010 bend = GET_NEXT_BHDR(b, GET_SIZE(b));
1011 bend->header.length = 0 | THIS_BUSY | PREV_BUSY;
1012 bend->header.prev = b;
1014 area = (tlsf_area_t *)hdr->mem;
1015 area->end = bend;
1017 return area;
1020 void tlsf_add_memory(struct MemHeaderExt *mhe, void *memory, IPTR size)
1022 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1024 D(nbug("[Kernel:TLSF] %s(%p, %p, %u)\n", __PRETTY_FUNCTION__, tlsf, memory, size));
1026 if (memory && size > HEADERS_SIZE)
1028 tlsf_area_t *area = init_memory_area(memory, size);
1029 bhdr_t *b;
1031 D(nbug("[Kernel:TLSF] %s: adding memory\n", __PRETTY_FUNCTION__));
1033 area->next = tlsf->memory_area;
1034 tlsf->memory_area = area;
1036 /* User added memory. Not autogrown */
1037 area->autogrown = 0;
1039 b = MEM_TO_BHDR(area);
1040 b = GET_NEXT_BHDR(b, GET_SIZE(b));
1042 tlsf->total_size += size;
1044 D(nbug("[Kernel:TLSF] %s: total_size=%08x\n", __PRETTY_FUNCTION__, tlsf->total_size));
1046 /* adjust the memheader if necessary */
1047 #if 0
1048 if (memory < mhe->mhe_MemHeader.mh_Lower)
1050 if ((memory + size) >= mhe->mhe_MemHeader.mh_Lower)
1051 mhe->mhe_MemHeader.mh_Free += (mhe->mhe_MemHeader.mh_Lower - memory);
1052 else
1053 mhe->mhe_MemHeader.mh_Free += size;
1054 mhe->mhe_MemHeader.mh_Lower = memory;
1056 else if ((memory + size) > mhe->mhe_MemHeader.mh_Upper)
1058 if (memory <= mhe->mhe_MemHeader.mh_Upper)
1059 mhe->mhe_MemHeader.mh_Free += ((memory + size) - mhe->mhe_MemHeader.mh_Upper);
1060 else
1061 mhe->mhe_MemHeader.mh_Free += size;
1062 mhe->mhe_MemHeader.mh_Upper = memory + size;
1064 #endif
1066 mhe->mhe_MemHeader.mh_Free += size;
1068 /* Add the initialized memory */
1069 tlsf_freevec(mhe, b->mem);
1073 void tlsf_add_memory_and_merge(struct MemHeaderExt *mhe, void *memory, IPTR size)
1075 tlsf_add_memory(mhe, memory, size);
1076 // TODO: add memory and merge...
1079 #if 0
1080 void bzero(void *ptr, IPTR len)
1082 UBYTE *p = (UBYTE *)ptr;
1084 while(len--)
1085 *p++ = 0;
1087 #endif
1089 void * tlsf_init(struct MemHeaderExt * mhe)
1091 tlsf_t *tlsf = NULL;
1092 void * ptr = mhe->mhe_MemHeader.mh_Lower;
1094 /* if MemHeaderExt starts at the beginning of handled memory, advance the ptr */
1095 if (mhe == ptr)
1096 ptr += ROUNDUP(sizeof(struct MemHeaderExt));
1098 /* Is there enough room for tlsf in the mem header itself? */
1099 if (mhe->mhe_MemHeader.mh_Free >= (ROUNDUP(sizeof(tlsf_t)) + 3 * ROUNDUP(sizeof(bhdr_t))))
1101 /* tlsf will be stored inside handled memory */
1102 tlsf = (tlsf_t *)ptr;
1104 ptr += ROUNDUP(sizeof(tlsf_t));
1106 bzero(tlsf, sizeof(tlsf_t));
1107 tlsf->autodestroy_self = 0;
1109 else
1111 /* No place for tlsf header in MemHeaderExt? Allocate it separately */
1112 tlsf = AllocMem(sizeof(tlsf_t), MEMF_ANY);
1114 if (tlsf)
1116 bzero(tlsf, sizeof(tlsf_t));
1117 tlsf->autodestroy_self = 1;
1121 /* Store the tlsf pointer in UserData field */
1122 mhe->mhe_UserData = tlsf;
1124 if (tlsf && ptr < mhe->mhe_MemHeader.mh_Upper)
1126 tlsf_add_memory(mhe, ptr, (IPTR)mhe->mhe_MemHeader.mh_Upper - (IPTR)ptr);
1129 return tlsf;
1132 static void * tlsf_init_autogrow(struct MemHeaderExt * mhe, IPTR puddle_size, ULONG requirements, autogrow_get grow_function, autogrow_release release_function, APTR autogrow_data)
1134 tlsf_t *tlsf = tlsf_init(mhe);
1136 if (tlsf)
1138 if (puddle_size < 4096)
1139 puddle_size = 4096;
1141 tlsf->autogrow_puddle_size = puddle_size;
1142 tlsf->autogrow_requirements = requirements;
1143 tlsf->autogrow_data = autogrow_data;
1144 tlsf->autogrow_get_fn = grow_function;
1145 tlsf->autogrow_release_fn = release_function;
1148 return tlsf;
1151 void tlsf_destroy(struct MemHeaderExt * mhe)
1153 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1155 D(nbug("[Kernel:TLSF] %s(%p)\n", __PRETTY_FUNCTION__, tlsf));
1157 if (tlsf)
1159 tlsf_area_t *area = tlsf->memory_area;
1161 if (tlsf->autogrow_release_fn)
1163 while(area)
1165 tlsf_area_t *next = area->next;
1168 Autogrown area? Release it here.
1169 Otherwise it's the responsibility of add_memory_area caller
1171 if (area->autogrown)
1172 tlsf_release_memory_area(mhe, area);
1174 area = next;
1178 if (tlsf->autodestroy_self)
1179 FreeMem(tlsf, sizeof(tlsf_t));
1183 IPTR tlsf_avail(struct MemHeaderExt * mhe, ULONG requirements)
1185 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1186 IPTR ret = 0;
1188 if (requirements & MEMF_TOTAL)
1189 ret = tlsf->total_size;
1190 else if (requirements & MEMF_LARGEST)
1192 bhdr_t *b = NULL;
1194 if (tlsf->flbitmap)
1196 int fl = MS(tlsf->flbitmap);
1198 if (tlsf->slbitmap[fl])
1200 int sl = MS(tlsf->slbitmap[fl]);
1202 b = tlsf->matrix[fl][sl];
1206 while (b)
1208 if (GET_SIZE(b) > ret)
1209 ret = GET_SIZE(b);
1211 b = b->free_node.next;
1214 else
1215 ret = tlsf->free_size;
1217 return ret;
1220 BOOL tlsf_in_bounds(struct MemHeaderExt * mhe, void * begin, void * end)
1222 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1223 tlsf_area_t *area;
1225 area = tlsf->memory_area;
1227 D(nbug("[Kernel:TLSF] %s(%p, %p, %p)\n", __PRETTY_FUNCTION__, tlsf, begin, end));
1229 while (area)
1231 D(nbug("[Kernel:TLSF] %s: area %p\n", __PRETTY_FUNCTION__));
1233 * Do checks only if questioned memory ends before the end (sentinel bhdr)
1234 * of area
1236 if ((IPTR)end <= (IPTR)area->end)
1238 D(nbug("[Kernel:TLSF] %s end <= area->end (%p <= %p)\n", __PRETTY_FUNCTION__, end, area->end));
1240 /* Get the bhdr of this area */
1241 bhdr_t *b = MEM_TO_BHDR(area);
1243 /* Forward to the begin of the memory */
1244 b = GET_NEXT_BHDR(b, GET_SIZE(b));
1246 /* requested memory starts at begin or after begin of the area */
1247 if ((IPTR)begin >= (IPTR)b->mem)
1249 D(nbug("[Kernel:TLSF] %s begin >= b->mem (%p >= %p)\n", __PRETTY_FUNCTION__, begin, b->mem));
1250 return TRUE;
1254 area = area->next;
1257 return FALSE;
1261 static void destroy_Pool(struct MemHeaderExt *mhe)
1263 tlsf_destroy(mhe);
1266 static APTR fetch_more_ram(void * data, IPTR *size)
1268 struct MemHeaderExt *mhe = (struct MemHeaderExt *)data;
1269 tlsf_t *tlsf = (tlsf_t *)mhe->mhe_UserData;
1271 D(nbug("[Kernel:TLSF] %s(%p, %u)\n", __PRETTY_FUNCTION__, mhe, *size));
1273 APTR ptr = AllocMem(*size, tlsf->autogrow_requirements);
1274 return ptr;
1277 static VOID release_ram(void * data, APTR ptr, IPTR size)
1279 D(nbug("[Kernel:TLSF] %s(%p, %u)\n", __PRETTY_FUNCTION__, ptr, size));
1281 FreeMem(ptr, size);
1284 static void * init_Pool(struct MemHeaderExt *mhe, IPTR puddleSize, IPTR initialSize)
1286 return tlsf_init_autogrow(mhe, puddleSize, (ULONG)(IPTR)mhe->mhe_MemHeader.mh_First, fetch_more_ram, release_ram, mhe);
1289 void krnCreateTLSFMemHeader(CONST_STRPTR name, BYTE pri, APTR start, IPTR size, ULONG flags)
1291 /* If the end is less than (1 << 31), MEMF_31BIT is implied */
1292 if (((IPTR)start+size) < (1UL << 31))
1293 flags |= MEMF_31BIT;
1294 else
1295 flags &= ~MEMF_31BIT;
1297 flags |= MEMF_MANAGED;
1299 struct MemHeaderExt *mhe = start;
1301 mhe->mhe_Magic = MEMHEADER_EXT_MAGIC;
1303 mhe->mhe_DestroyPool = destroy_Pool;
1304 mhe->mhe_InitPool = init_Pool;
1306 mhe->mhe_Alloc = tlsf_malloc;
1307 mhe->mhe_AllocVec = tlsf_malloc;
1308 mhe->mhe_AllocAligned = tlsf_malloc_aligned;
1309 mhe->mhe_AllocVecAligned=tlsf_malloc_aligned;
1310 mhe->mhe_Free = tlsf_freemem;
1311 mhe->mhe_FreeVec = tlsf_freevec;
1312 mhe->mhe_AllocAbs = tlsf_allocabs;
1313 mhe->mhe_ReAlloc = tlsf_realloc;
1314 mhe->mhe_Avail = tlsf_avail;
1315 mhe->mhe_InBounds = tlsf_in_bounds;
1317 mhe->mhe_MemHeader.mh_Node.ln_Succ = NULL;
1318 mhe->mhe_MemHeader.mh_Node.ln_Pred = NULL;
1319 mhe->mhe_MemHeader.mh_Node.ln_Type = NT_MEMORY;
1320 mhe->mhe_MemHeader.mh_Node.ln_Name = (STRPTR)name;
1321 mhe->mhe_MemHeader.mh_Node.ln_Pri = pri;
1322 mhe->mhe_MemHeader.mh_Attributes = flags;
1323 /* mh_First is not valid. Also in current implementation it is used to transport pool requirements */
1324 mhe->mhe_MemHeader.mh_First = NULL;
1326 mhe->mhe_UserData = NULL;
1329 * mh_Lower and mh_Upper are informational only. Since our MemHeader resides
1330 * inside the region it describes, the region includes MemHeader.
1332 mhe->mhe_MemHeader.mh_Lower = start;
1333 mhe->mhe_MemHeader.mh_Upper = start + size;
1334 mhe->mhe_MemHeader.mh_Free = size;
1336 D(nbug("[Kernel:TLSF] %s: 0x%p -> 0x%p\n", __PRETTY_FUNCTION__, mhe->mhe_MemHeader.mh_Lower, mhe->mhe_MemHeader.mh_Upper));
1338 tlsf_init(mhe);
1341 struct MemHeader * krnConvertMemHeaderToTLSF(struct MemHeader * source)
1343 struct MemChunk * mc = source->mh_First->mc_Next;
1344 APTR mh = source->mh_First;
1345 IPTR fsize = source->mh_First->mc_Bytes;
1346 APTR mhUpper = source->mh_Upper; // Cache the mh_Upper value
1347 if (source->mh_Attributes & MEMF_MANAGED)
1348 return NULL;
1350 /* First chunk will host the mem header */
1351 krnCreateTLSFMemHeader(source->mh_Node.ln_Name, source->mh_Node.ln_Pri, mh, fsize,
1352 source->mh_Attributes);
1354 /* Restore cached mh_Upper value (informative field, only) */
1355 ((struct MemHeaderExt *)mh)->mhe_MemHeader.mh_Upper = mhUpper;
1357 /* source->mh_First is destroyed beyond this point */
1359 /* Add remaining chunks */
1360 while (mc)
1362 APTR p = mc->mc_Next;
1363 tlsf_add_memory(mh, mc, mc->mc_Bytes);
1364 /* mc is destroyed beyond this point */
1365 mc = p;
1368 return mh;