1 #include <exec/types.h>
2 #include <exec/memory.h>
3 #include <exec/memheaderext.h>
4 #include <proto/exec.h>
8 #include "kernel_base.h"
9 #include "kernel_debug.h"
17 * Minimal alignment as required by AROS. In contrary to the default
18 * TLSF implementation, we do not allow smaller blocks here.
20 #define SIZE_ALIGN AROS_WORSTALIGN
23 * Settings for TLSF allocator:
24 * MAX_LOG2_SLI - amount of bits used for the second level list
25 * MAX_FLI - maximal allowable allocation size - 2^32 should be enough
27 #define MAX_LOG2_SLI (5)
28 #define MAX_SLI (1 << MAX_LOG2_SLI)
30 #define FLI_OFFSET (6)
31 #define SMALL_BLOCK (2 << FLI_OFFSET)
33 #define REAL_FLI (MAX_FLI - FLI_OFFSET)
35 #define ROUNDUP(x) (((x) + SIZE_ALIGN - 1) & ~(SIZE_ALIGN - 1))
36 #define ROUNDDOWN(x) ((x) & ~(SIZE_ALIGN - 1))
38 /* Fields used in the block header length field to identify busy/free blocks */
39 #define THIS_FREE_MASK (IPTR)1
40 #define THIS_FREE (IPTR)1
41 #define THIS_BUSY (IPTR)0
43 #define PREV_FREE_MASK (IPTR)2
44 #define PREV_FREE (IPTR)2
45 #define PREV_BUSY (IPTR)0
47 #define SIZE_MASK (~(THIS_FREE_MASK | PREV_FREE_MASK))
49 #define likely(x) __builtin_expect(!!(x), 1)
50 #define unlikely(x) __builtin_expect(!!(x), 0)
52 /* free node links together all free blocks if similar size */
53 typedef struct free_node_s
{
58 /* block header in front of each block - both free and busy */
59 typedef struct hdr_s
{
65 * Each block is defined by bhdr_t structure. Free blocks contain only
66 * the header which allows us to go through all memory blocks in the system.
67 * The free blocks contain additionally the node which chains them in one
68 * of the free block lists
70 typedef struct bhdr_s
{
73 UBYTE __min_align
[SIZE_ALIGN
];
77 free_node_t free_node
;
81 /* Memory area within the TLSF pool */
82 typedef struct tlsf_area_s
{
83 struct tlsf_area_s
* next
; // Next memory area
84 bhdr_t
* end
; // Pointer to "end-of-area" block header
85 LONG autogrown
; // Automatically allocated by TLSF pool
89 tlsf_area_t
* memory_area
;
95 ULONG slbitmap
[REAL_FLI
];
97 IPTR autogrow_puddle_size
;
99 autogrow_get autogrow_get_fn
;
100 autogrow_release autogrow_release_fn
;
102 UBYTE autodestroy_self
;
104 bhdr_t
* matrix
[REAL_FLI
][MAX_SLI
];
107 static inline __attribute__((always_inline
)) int LS(IPTR i
)
109 if (sizeof(IPTR
) == 4)
110 return __builtin_ffs(i
) - 1;
112 return __builtin_ffsl(i
) - 1;
115 static inline __attribute__((always_inline
)) int MS(IPTR i
)
117 if (sizeof(IPTR
) == 4)
118 return 31 - __builtin_clz(i
);
120 return 63 - __builtin_clzl(i
);
123 static inline __attribute__((always_inline
)) void SetBit(int nr
, ULONG
*ptr
)
125 ptr
[nr
>> 5] |= (1 << (nr
& 31));
128 static inline __attribute__((always_inline
)) void ClrBit(int nr
, ULONG
*ptr
)
130 ptr
[nr
>> 5] &= ~(1 << (nr
& 31));
133 static inline __attribute__((always_inline
)) void MAPPING_INSERT(IPTR r
, int *fl
, int *sl
)
138 *sl
= (int)(r
/ (SMALL_BLOCK
/ MAX_SLI
));
143 *sl
= (int)(((IPTR
)r
>> (*fl
- MAX_LOG2_SLI
)) - MAX_SLI
);
148 static inline __attribute__((always_inline
)) void MAPPING_SEARCH(IPTR
*r
, int *fl
, int *sl
)
150 if (*r
< SMALL_BLOCK
)
153 *sl
= (int)(*r
/ (SMALL_BLOCK
/ MAX_SLI
));
157 IPTR tmp
= ((IPTR
)1 << (MS(*r
) - MAX_LOG2_SLI
)) - 1;
161 *sl
= (int)(((IPTR
)tr
>> (*fl
- MAX_LOG2_SLI
)) - MAX_SLI
);
167 static inline __attribute__((always_inline
)) bhdr_t
* FIND_SUITABLE_BLOCK(tlsf_t
*tlsf
, int *fl
, int *sl
)
169 IPTR bitmap_tmp
= tlsf
->slbitmap
[*fl
] & (~0 << *sl
);
174 *sl
= LS(bitmap_tmp
);
175 b
= tlsf
->matrix
[*fl
][*sl
];
179 bitmap_tmp
= tlsf
->flbitmap
& (~0 << (*fl
+ 1));
180 if (likely(bitmap_tmp
!= 0))
182 *fl
= LS(bitmap_tmp
);
183 *sl
= LS(tlsf
->slbitmap
[*fl
]);
184 b
= tlsf
->matrix
[*fl
][*sl
];
194 #define GET_SIZE(b) ({ IPTR size = b->header.length & SIZE_MASK; size; })
195 #define GET_FLAGS(b) ({ IPTR flags = b->header.length & (THIS_FREE_MASK | PREV_FREE_MASK); flags; })
196 #define SET_SIZE(b, size) do{ b->header.length = GET_FLAGS(b) | (size); }while(0)
197 #define SET_FLAGS(b, flags) do{ b->header.length = GET_SIZE(b) | (flags); }while(0)
198 #define SET_SIZE_AND_FLAGS(b, size, flags) do{b->header.length = (size) | (flags);}while(0)
199 #define FREE_BLOCK(b) ((b->header.length & THIS_FREE_MASK) == THIS_FREE)
200 #define SET_FREE_BLOCK(b) do{b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_FREE;}while(0)
201 #define SET_BUSY_BLOCK(b) do{b->header.length = (b->header.length & ~THIS_FREE_MASK) | THIS_BUSY;}while(0)
202 #define SET_FREE_PREV_BLOCK(b) do{b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_FREE;}while(0)
203 #define SET_BUSY_PREV_BLOCK(b) do{b->header.length = (b->header.length & ~PREV_FREE_MASK) | PREV_BUSY;}while(0)
204 #define FREE_PREV_BLOCK(b) ((b->header.length & PREV_FREE_MASK) == PREV_FREE)
205 #define GET_NEXT_BHDR(hdr, size) ({ bhdr_t * __b = (bhdr_t *)((UBYTE *)&hdr->mem[0] + (size)); __b; })
206 #define MEM_TO_BHDR(ptr) ({ bhdr_t * b = (bhdr_t*)((void*)(ptr) - offsetof(bhdr_t, mem)); b; })
208 #define REMOVE_HEADER(tlsf, b, fl, sl) do{ \
209 if (b->free_node.next) \
210 b->free_node.next->free_node.prev = b->free_node.prev; \
211 if (b->free_node.prev) \
212 b->free_node.prev->free_node.next = b->free_node.next; \
213 if (tlsf->matrix[fl][sl] == b) { \
214 tlsf->matrix[fl][sl] = b->free_node.next; \
215 if (!tlsf->matrix[fl][sl]) \
216 ClrBit(sl, &tlsf->slbitmap[fl]); \
217 if (!tlsf->slbitmap[fl]) \
218 ClrBit(fl, &tlsf->flbitmap); \
221 #define INSERT_FREE_BLOCK(tlsf, b) do { \
222 int fl, sl; MAPPING_INSERT(GET_SIZE(b), &fl, &sl); \
223 b->free_node.prev = NULL; \
224 b->free_node.next = tlsf->matrix[fl][sl]; \
225 if (tlsf->matrix[fl][sl]) \
226 tlsf->matrix[fl][sl]->free_node.prev = b; \
227 tlsf->matrix[fl][sl] = b; \
228 SetBit(fl, &tlsf->flbitmap); \
229 SetBit(sl, &tlsf->slbitmap[fl]); }while(0)
233 static inline __attribute__((always_inline
)) IPTR
GET_SIZE(bhdr_t
*b
)
235 return b
->header
.length
& SIZE_MASK
;
238 static inline __attribute__((always_inline
)) IPTR
GET_FLAGS(bhdr_t
*b
)
240 return b
->header
.length
& (THIS_FREE_MASK
| PREV_FREE_MASK
);
243 static inline __attribute__((always_inline
)) void SET_SIZE(bhdr_t
*b
, IPTR size
)
245 b
->header
.length
= GET_FLAGS(b
) | size
;
248 static inline __attribute__((always_inline
)) void SET_SIZE_AND_FLAGS(bhdr_t
*b
, IPTR size
, IPTR flags
)
250 b
->header
.length
= size
| flags
;
253 static inline __attribute__((always_inline
)) int FREE_BLOCK(bhdr_t
*b
)
255 return ((b
->header
.length
& THIS_FREE_MASK
) == THIS_FREE
);
258 static inline __attribute__((always_inline
)) void SET_FREE_BLOCK(bhdr_t
*b
)
260 b
->header
.length
= (b
->header
.length
& ~THIS_FREE_MASK
) | THIS_FREE
;
263 static inline __attribute__((always_inline
)) void SET_BUSY_BLOCK(bhdr_t
*b
)
265 b
->header
.length
= (b
->header
.length
& ~THIS_FREE_MASK
) | THIS_BUSY
;
268 static inline __attribute__((always_inline
)) void SET_FREE_PREV_BLOCK(bhdr_t
*b
)
270 b
->header
.length
= (b
->header
.length
& ~PREV_FREE_MASK
) | PREV_FREE
;
273 static inline __attribute__((always_inline
)) void SET_BUSY_PREV_BLOCK(bhdr_t
*b
)
275 b
->header
.length
= (b
->header
.length
& ~PREV_FREE_MASK
) | PREV_BUSY
;
278 static inline __attribute__((always_inline
)) int FREE_PREV_BLOCK(bhdr_t
*b
)
280 return ((b
->header
.length
& PREV_FREE_MASK
) == PREV_FREE
);
283 static inline __attribute__((always_inline
)) bhdr_t
* GET_NEXT_BHDR(bhdr_t
*hdr
, IPTR size
)
285 return (bhdr_t
*)((UBYTE
*)&hdr
->mem
[0] + size
);
288 static inline __attribute__((always_inline
)) bhdr_t
* MEM_TO_BHDR(void *ptr
)
290 return (bhdr_t
*)(ptr
- offsetof(bhdr_t
, mem
));
293 static inline __attribute__((always_inline
)) void REMOVE_HEADER(tlsf_t
*tlsf
, bhdr_t
*b
, int fl
, int sl
)
295 if (b
->free_node
.next
)
296 b
->free_node
.next
->free_node
.prev
= b
->free_node
.prev
;
297 if (b
->free_node
.prev
)
298 b
->free_node
.prev
->free_node
.next
= b
->free_node
.next
;
300 if (tlsf
->matrix
[fl
][sl
] == b
)
302 tlsf
->matrix
[fl
][sl
] = b
->free_node
.next
;
303 if (!tlsf
->matrix
[fl
][sl
])
304 ClrBit(sl
, &tlsf
->slbitmap
[fl
]);
305 if (!tlsf
->slbitmap
[fl
])
306 ClrBit(fl
, &tlsf
->flbitmap
);
310 static inline __attribute__((always_inline
)) void INSERT_FREE_BLOCK(tlsf_t
*tlsf
, bhdr_t
*b
)
314 MAPPING_INSERT(GET_SIZE(b
), &fl
, &sl
);
316 b
->free_node
.prev
= NULL
;
317 b
->free_node
.next
= tlsf
->matrix
[fl
][sl
];
319 if (tlsf
->matrix
[fl
][sl
])
320 tlsf
->matrix
[fl
][sl
]->free_node
.prev
= b
;
322 tlsf
->matrix
[fl
][sl
] = b
;
324 SetBit(fl
, &tlsf
->flbitmap
);
325 SetBit(sl
, &tlsf
->slbitmap
[fl
]);
328 #endif /* USE_MACROS */
330 void * tlsf_malloc(struct MemHeaderExt
*mhe
, IPTR size
, ULONG
*flags
)
332 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
336 size
= ROUNDUP(size
);
338 D(nbug("tlsf_malloc(%p, %ld)\n", tlsf
, size
));
340 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
341 ObtainSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
343 /* Find the indices fl and sl for given size */
344 MAPPING_SEARCH(&size
, &fl
, &sl
);
346 /* Find block of either the right size or larger */
347 b
= FIND_SUITABLE_BLOCK(tlsf
, &fl
, &sl
);
349 D(nbug(" tlsf_malloc - adjusted size %ld\n", size
));
351 /* No block found? Either failure or tlsf will get more memory. */
354 D(nbug("tlsf_malloc out of memory\n"));
356 /* Do we have the autogrow feature? */
357 if (tlsf
->autogrow_get_fn
)
359 /* increase the size of requested block so that we can fit the headers too */
360 IPTR sz
= size
+ 3 * ROUNDUP(sizeof(hdr_t
));
362 /* Requested size less than puddle size? Get puddle size then */
363 if (sz
< tlsf
->autogrow_puddle_size
)
364 sz
= tlsf
->autogrow_puddle_size
;
366 D(nbug("querying for %d bytes\n"));
368 /* Try to get some memory */
369 void * ptr
= tlsf
->autogrow_get_fn(tlsf
->autogrow_data
, &sz
);
371 /* Got it? Add to tlsf then */
374 tlsf_add_memory(mhe
, ptr
, sz
);
376 /* We know the newly added memory is first in the list. Set the autogrown feature there */
377 tlsf
->memory_area
->autogrown
= 1;
379 /* Memory is there. Try to find the block again */
380 MAPPING_SEARCH(&size
, &fl
, &sl
);
381 b
= FIND_SUITABLE_BLOCK(tlsf
, &fl
, &sl
);
385 /* No block? FAILURE! */
388 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
389 ReleaseSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
396 bhdr_t
*next
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
398 /* Remove the found block from the free list */
399 REMOVE_HEADER(tlsf
, b
, fl
, sl
);
401 /* Is this block larger then requested? Try to split it then */
402 if (likely(GET_SIZE(b
) > (size
+ ROUNDUP(sizeof(hdr_t
)))))
404 /* New split block */
405 bhdr_t
*sb
= GET_NEXT_BHDR(b
, size
);
408 /* Set size, this free and previous busy */
409 SET_SIZE_AND_FLAGS(sb
, GET_SIZE(b
) - size
- ROUNDUP(sizeof(hdr_t
)), THIS_FREE
| PREV_BUSY
);
411 /* The next header points to free block now */
412 next
->header
.prev
= sb
;
414 /* previous block (sb) is free */
415 SET_FREE_PREV_BLOCK(next
);
417 /* Allocated block size truncated */
420 D(nbug(" new splitted block of size %ld\n", GET_SIZE(sb
)));
421 /* Free block is inserted to free list */
422 INSERT_FREE_BLOCK(tlsf
, sb
);
426 /* The block was of right size. Set it just busy in next pointer */
427 SET_BUSY_PREV_BLOCK(next
);
430 /* The allocated block is busy */
433 /* Clear the pointers just in case */
434 b
->free_node
.next
= NULL
;
435 b
->free_node
.prev
= NULL
;
437 /* Update counters */
438 tlsf
->free_size
-= GET_SIZE(b
);
439 mhe
->mhe_MemHeader
.mh_Free
= tlsf
->free_size
;
441 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
442 ReleaseSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
444 if (flags
&& (*flags
& MEMF_CLEAR
))
445 bzero(&b
->mem
[0], size
);
447 /* And return memory */
451 static inline __attribute__((always_inline
)) void MERGE(bhdr_t
*b1
, bhdr_t
*b2
)
453 /* Merging adjusts the size - it's sum of both sizes plus size of block header */
454 SET_SIZE(b1
, GET_SIZE(b1
) + GET_SIZE(b2
) + ROUNDUP(sizeof(hdr_t
)));
457 static inline __attribute__((always_inline
)) bhdr_t
* MERGE_PREV(tlsf_t
*tlsf
, bhdr_t
*block
)
459 /* Is previous block free? */
460 if (FREE_PREV_BLOCK(block
))
463 bhdr_t
*prev
= block
->header
.prev
;
465 /* Calculate index for removal */
466 MAPPING_INSERT(GET_SIZE(prev
), &fl
, &sl
);
468 /* Do remove the header from the list */
469 REMOVE_HEADER(tlsf
, prev
, fl
, sl
);
480 static inline __attribute__((always_inline
)) bhdr_t
* MERGE_NEXT(tlsf_t
*tlsf
, bhdr_t
*block
)
482 bhdr_t
*next
= GET_NEXT_BHDR(block
, GET_SIZE(block
));
484 /* Is next block free? */
485 if (FREE_BLOCK(next
))
489 /* Calculate index for removal */
490 MAPPING_INSERT(GET_SIZE(next
), &fl
, &sl
);
492 /* Remove the header from the list */
493 REMOVE_HEADER(tlsf
, next
, fl
, sl
);
502 void tlsf_freevec(struct MemHeaderExt
* mhe
, APTR ptr
)
504 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
505 bhdr_t
*fb
= MEM_TO_BHDR(ptr
);
508 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
509 ObtainSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
511 /* Mark block as free */
514 /* adjust free size field on tlsf */
515 tlsf
->free_size
+= GET_SIZE(fb
);
517 /* Try to merge with previous and next blocks (if free) */
518 fb
= MERGE_PREV(tlsf
, fb
);
519 fb
= MERGE_NEXT(tlsf
, fb
);
521 /* Tell next block that previous one is free. Also update the prev link in case it changed */
522 next
= GET_NEXT_BHDR(fb
, GET_SIZE(fb
));
523 SET_FREE_PREV_BLOCK(next
);
524 next
->header
.prev
= fb
;
526 /* Insert free block into the proper list */
527 INSERT_FREE_BLOCK(tlsf
, fb
);
529 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
530 ReleaseSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
533 void tlsf_freemem(struct MemHeaderExt
* mhe
, APTR ptr
, IPTR size
)
536 tlsf_freevec(mhe
, ptr
);
539 void * tlsf_realloc(struct MemHeaderExt
*mhe
, APTR ptr
, IPTR new_size
)
541 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
547 /* NULL pointer? just allocate the memory */
549 return tlsf_malloc(mhe
, new_size
, NULL
);
551 /* size = 0? free memory */
552 if (unlikely(!new_size
))
554 tlsf_freevec(mhe
, ptr
);
558 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
559 ObtainSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
561 new_size
= ROUNDUP(new_size
);
563 b
= MEM_TO_BHDR(ptr
);
565 if (unlikely(new_size
== GET_SIZE(b
)))
568 bnext
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
570 /* Is new size smaller than the previous one? Try to split the block if this is the case */
571 if (new_size
<= (GET_SIZE(b
)))
573 /* New header starts right after the current block b */
574 bhdr_t
* b1
= GET_NEXT_BHDR(b
, new_size
);
576 /* Update pointer and size */
578 SET_SIZE_AND_FLAGS(b1
, GET_SIZE(b
) - new_size
- ROUNDUP(sizeof(hdr_t
)), THIS_FREE
| PREV_BUSY
);
580 /* Current block gets smaller */
581 SET_SIZE(b
, new_size
);
583 tlsf
->free_size
+= GET_SIZE(b1
);
585 /* Try to merge with next block */
586 b1
= MERGE_NEXT(tlsf
, b1
);
588 /* Tell next block that previous one is free. Also update the prev link in case it changed */
589 bnext
= GET_NEXT_BHDR(b1
, GET_SIZE(b1
));
590 SET_FREE_PREV_BLOCK(bnext
);
591 bnext
->header
.prev
= b1
;
593 /* Insert free block into the proper list */
594 INSERT_FREE_BLOCK(tlsf
, b1
);
598 /* Is next block free? Is there enough free space? */
599 if (FREE_BLOCK(bnext
) && new_size
<= GET_SIZE(b
) + GET_SIZE(bnext
) + ROUNDUP(sizeof(hdr_t
)))
602 IPTR rest_size
= ROUNDUP(sizeof(hdr_t
)) + GET_SIZE(bnext
) + GET_SIZE(b
) - new_size
;
604 MAPPING_INSERT(GET_SIZE(bnext
), &fl
, &sl
);
606 REMOVE_HEADER(tlsf
, bnext
, fl
, sl
);
608 if (rest_size
> ROUNDUP(sizeof(hdr_t
)))
610 rest_size
-= ROUNDUP(sizeof(hdr_t
));
612 SET_SIZE(b
, new_size
);
614 b1
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
617 SET_SIZE_AND_FLAGS(b1
, rest_size
, THIS_FREE
| PREV_BUSY
);
619 bnext
= GET_NEXT_BHDR(b1
, GET_SIZE(b1
));
620 bnext
->header
.prev
= b1
;
621 SET_FREE_PREV_BLOCK(bnext
);
623 INSERT_FREE_BLOCK(tlsf
, b1
);
628 SET_SIZE(b
, new_size
+ ROUNDUP(sizeof(hdr_t
)));
630 SET_SIZE(b
, new_size
);
632 bnext
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
633 bnext
->header
.prev
= b
;
634 SET_BUSY_PREV_BLOCK(bnext
);
639 /* Next block was not free. Create new buffer and copy old contents there */
640 void * p
= tlsf_malloc(mhe
, new_size
, NULL
);
643 CopyMemQuick(ptr
, p
, GET_SIZE(b
));
644 tlsf_freevec(mhe
, ptr
);
650 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
651 ReleaseSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
657 void * tlsf_allocabs(struct MemHeaderExt
* mhe
, IPTR size
, void * ptr
)
659 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
664 IPTR sz
= ROUNDUP(size
);
666 D(nbug("[TLSF] allocabs(%p, %ld)\n", ptr
, size
));
669 region_end
= (UBYTE
*)ptr
+ sz
;
671 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
672 ObtainSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
674 /* Start searching here. It doesn't make sense to go through regions which are smaller */
675 MAPPING_SEARCH(&sz
, &fl
, &sl
);
677 /* Start looking now :) */
678 for (; fl
< MAX_FLI
; fl
++)
680 for (; sl
< MAX_SLI
; sl
++)
682 bhdr_t
*b0
= tlsf
->matrix
[fl
][sl
];
684 /* If block was there, check it */
687 bhdr_t
*b1
= GET_NEXT_BHDR(b0
, GET_SIZE(b0
));
689 /* The block has to contain _whole_ requested region, max exceed it in size though */
690 if (b0
->mem
<= region_start
&& (UBYTE
*)b1
>= region_end
)
692 /* block header of requested region */
693 bhdr_t
*breg
= MEM_TO_BHDR(ptr
);
696 This is the block we're looking for. Unchain it from the bidirectional list of
699 Previous entry's next will point to this block's next. If previous is NULL, matrix
700 will be set to block's next
702 if (b0
->free_node
.prev
)
703 b0
->free_node
.prev
->free_node
.next
= b0
->free_node
.next
;
705 tlsf
->matrix
[fl
][sl
] = b0
->free_node
.next
;
708 Next entry's prev will point to this block's previous.
710 if (b0
->free_node
.next
)
711 b0
->free_node
.next
->free_node
.prev
= b0
->free_node
.prev
;
713 /* Empty SL matrix for size j? Clear bit */
714 if (!tlsf
->matrix
[fl
][sl
])
716 ClrBit(sl
, &tlsf
->slbitmap
[fl
]);
718 /* Empty entire SL matrix for given FL index? clear that bit too */
719 if (tlsf
->slbitmap
[fl
])
720 ClrBit(fl
, &tlsf
->flbitmap
);
723 b0
->free_node
.prev
= NULL
;
724 b0
->free_node
.next
= NULL
;
728 At this point the block is removed from free list and marked as used.
729 Now, split it if necessary...
732 /* begin of the block != begin of the block header of requested region? */
736 Adjust region's block header. Mark in size that previous (aka b0) is free.
737 Reduce the size of b0 as well as size of breg too.
739 breg
->header
.prev
= b0
;
740 SET_SIZE_AND_FLAGS(breg
, GET_SIZE(b0
)-((IPTR
)breg
- (IPTR
)b0
), PREV_FREE
| THIS_BUSY
);
742 /* Update the next block. Mark in size that previous (breg) is used */
743 b1
->header
.prev
= breg
;
744 SET_BUSY_PREV_BLOCK(b1
);
746 /* b0's prev state is keept. b0 itself is marked as free block */
748 SET_SIZE(b0
, (IPTR
)breg
- (IPTR
)b0
->mem
);
750 /* Insert b0 to free list */
751 MAPPING_INSERT(GET_SIZE(b0
), &fl
, &sl
);
752 INSERT_FREE_BLOCK(tlsf
, b0
);
755 /* Is it necessary to split the requested region at the end? */
756 if ((SIZE_ALIGN
+ GET_SIZE(breg
)) > size
)
758 IPTR tmp_size
= GET_SIZE(breg
) - size
- SIZE_ALIGN
;
760 /* New region header directly at end of the requested region */
761 bhdr_t
*b2
= GET_NEXT_BHDR(breg
, size
);
764 b2
->header
.prev
= breg
;
765 SET_SIZE_AND_FLAGS(b2
, tmp_size
, PREV_BUSY
| THIS_FREE
);
767 /* requested region's size is now smaller */
768 SET_SIZE(breg
, size
);
770 /* The next block header point to newly created one */
771 b1
->header
.prev
= b2
;
772 SET_FREE_PREV_BLOCK(b1
);
774 /* Insert newly created block to free list */
775 MAPPING_INSERT(GET_SIZE(b2
), &fl
, &sl
);
776 INSERT_FREE_BLOCK(tlsf
, b2
);
779 tlsf
->free_size
-= GET_SIZE(breg
);
782 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
783 ReleaseSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
788 b0
= b0
->free_node
.next
;
793 if (mhe
->mhe_MemHeader
.mh_Attributes
& MEMF_SEM_PROTECTED
)
794 ReleaseSemaphore((struct SignalSemaphore
*)mhe
->mhe_MemHeader
.mh_Node
.ln_Name
);
799 tlsf_area_t
* init_memory_area(void * memory
, IPTR size
)
801 bhdr_t
* hdr
= (bhdr_t
*)memory
;
807 size
= ROUNDDOWN(size
);
809 /* Prepare first header, which protects the tlst_area_t header */
810 hdr
->header
.length
= ROUNDUP(sizeof(tlsf_area_t
)) | THIS_BUSY
| PREV_BUSY
;
811 hdr
->header
.prev
= NULL
;
813 b
= GET_NEXT_BHDR(hdr
, ROUNDUP(sizeof(tlsf_area_t
)));
814 b
->header
.prev
= hdr
;
815 b
->header
.length
= (size
- 3*ROUNDUP(sizeof(hdr_t
)) - ROUNDUP(sizeof(tlsf_area_t
))) | PREV_BUSY
| THIS_BUSY
;
817 bend
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
818 bend
->header
.length
= 0 | THIS_BUSY
| PREV_BUSY
;
819 bend
->header
.prev
= b
;
821 area
= (tlsf_area_t
*)hdr
->mem
;
827 void tlsf_add_memory(struct MemHeaderExt
*mhe
, void *memory
, IPTR size
)
829 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
831 D(nbug("tlsf_add_memory(%p, %p, %d)\n", tlsf
, memory
, size
));
833 if (memory
&& size
> 3*ROUNDUP(sizeof(bhdr_t
)))
835 tlsf_area_t
*area
= init_memory_area(memory
, size
);
838 D(nbug(" adding memory\n"));
840 area
->next
= tlsf
->memory_area
;
841 tlsf
->memory_area
= area
;
843 /* User added memory. Not autogrown */
846 b
= MEM_TO_BHDR(area
);
847 b
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
849 tlsf
->total_size
+= size
;
851 D(nbug(" total_size=%08x\n", tlsf
->total_size
));
853 tlsf_freevec(mhe
, b
->mem
);
857 void tlsf_add_memory_and_merge(struct MemHeaderExt
*mhe
, void *memory
, IPTR size
)
859 tlsf_add_memory(mhe
, memory
, size
);
860 // TODO: add memory and merge...
864 void bzero(void *ptr
, IPTR len
)
866 UBYTE
*p
= (UBYTE
*)ptr
;
873 void * tlsf_init(struct MemHeaderExt
* mhe
)
876 void * ptr
= mhe
->mhe_MemHeader
.mh_Lower
;
878 /* if MemHeaderExt starts at the beginning of handled memory, advance the ptr */
880 ptr
+= ROUNDUP(sizeof(struct MemHeaderExt
));
882 /* Is there enough room for tlsf in the mem header itself? */
883 if (mhe
->mhe_MemHeader
.mh_Free
>= (ROUNDUP(sizeof(tlsf_t
)) + 3 * ROUNDUP(sizeof(bhdr_t
))))
885 /* tlsf will be stored inside handled memory */
886 tlsf
= (tlsf_t
*)ptr
;
888 ptr
+= ROUNDUP(sizeof(tlsf_t
));
890 bzero(tlsf
, sizeof(tlsf_t
));
891 tlsf
->autodestroy_self
= 0;
895 /* No place for tlsf header in MemHeaderExt? Allocate it separately */
896 tlsf
= AllocMem(sizeof(tlsf_t
), MEMF_ANY
);
900 bzero(tlsf
, sizeof(tlsf_t
));
901 tlsf
->autodestroy_self
= 1;
905 /* Store the tlsf pointer in UserData field */
906 mhe
->mhe_UserData
= tlsf
;
908 if (tlsf
&& ptr
< mhe
->mhe_MemHeader
.mh_Upper
)
910 tlsf_add_memory(mhe
, ptr
, (IPTR
)mhe
->mhe_MemHeader
.mh_Upper
- (IPTR
)ptr
);
916 void * tlsf_init_autogrow(struct MemHeaderExt
* mhe
, IPTR puddle_size
, autogrow_get grow_function
, autogrow_release release_function
, APTR autogrow_data
)
918 tlsf_t
*tlsf
= tlsf_init(mhe
);
922 if (puddle_size
< 4096)
925 tlsf
->autogrow_puddle_size
= puddle_size
;
926 tlsf
->autogrow_data
= autogrow_data
;
927 tlsf
->autogrow_get_fn
= grow_function
;
928 tlsf
->autogrow_release_fn
= release_function
;
934 void tlsf_destroy(struct MemHeaderExt
* mhe
)
936 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
938 D(nbug("tlsf_destroy(%p)\n", tlsf
));
942 tlsf_area_t
*area
= tlsf
->memory_area
;
944 if (tlsf
->autogrow_release_fn
)
948 tlsf_area_t
*next
= area
->next
;
955 Autogrown area? Release it here.
956 Otherwise it's the responsibility of add_memory_area caller
960 /* get the begin of this area */
961 begin
= MEM_TO_BHDR(area
);
963 /* get sentinel block */
966 /* end of this area is end of sentinel block */
967 end
= GET_NEXT_BHDR(b
, 0);
969 /* calculate the size of area */
970 size
= (IPTR
)end
- (IPTR
)begin
;
972 if (tlsf
->autogrow_release_fn
)
973 tlsf
->autogrow_release_fn(tlsf
->autogrow_data
, begin
, size
);
980 if (tlsf
->autodestroy_self
)
981 FreeMem(tlsf
, sizeof(tlsf_t
));
985 IPTR
tlsf_avail(struct MemHeaderExt
* mhe
, ULONG requirements
)
987 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
990 if (requirements
& MEMF_TOTAL
)
991 ret
= tlsf
->total_size
;
992 else if (requirements
& MEMF_LARGEST
)
998 int fl
= MS(tlsf
->flbitmap
);
1000 if (tlsf
->slbitmap
[fl
])
1002 int sl
= MS(tlsf
->slbitmap
[fl
]);
1004 b
= tlsf
->matrix
[fl
][sl
];
1010 if (GET_SIZE(b
) > ret
)
1013 b
= b
->free_node
.next
;
1017 ret
= tlsf
->free_size
;
1022 BOOL
tlsf_in_bounds(struct MemHeaderExt
* mhe
, void * begin
, void * end
)
1024 tlsf_t
*tlsf
= (tlsf_t
*)mhe
->mhe_UserData
;
1027 area
= tlsf
->memory_area
;
1029 D(nbug("tlsf_in_bounds(%p, %p, %p)\n", tlsf
, begin
, end
));
1033 D(nbug(" area %p\n"));
1035 * Do checks only if questioned memory ends before the end (sentinel bhdr)
1038 if ((IPTR
)end
<= (IPTR
)area
->end
)
1040 D(nbug(" end <= area->end (%p <= %p)\n", end
, area
->end
));
1042 /* Get the bhdr of this area */
1043 bhdr_t
*b
= MEM_TO_BHDR(area
);
1045 /* Forward to the begin of the memory */
1046 b
= GET_NEXT_BHDR(b
, GET_SIZE(b
));
1048 /* requested memory starts at begin or after begin of the area */
1049 if ((IPTR
)begin
>= (IPTR
)b
->mem
)
1051 D(nbug(" begin >= b->mem (%p >= %p)\n", begin
, b
->mem
));
1063 static void destroy_Pool(struct MemHeaderExt
*mhe
)
1068 static APTR
fetch_more_ram(void * data
, IPTR
*size
)
1070 struct MemHeaderExt
*mhe
= (struct MemHeaderExt
*)data
;
1072 D(nbug("[TLSF] fetch_more_ram(%p, %d)\n", mhe
, *size
));
1074 APTR ptr
= AllocMem(*size
, mhe
->mhe_MemHeader
.mh_Attributes
);
1078 static VOID
release_ram(void * data
, APTR ptr
, IPTR size
)
1080 D(nbug("[TLSF] release_ram(%p, %d)\n", ptr
, size
));
1085 static void * init_Pool(struct MemHeaderExt
*mhe
, IPTR puddleSize
, IPTR initialSize
)
1087 return tlsf_init_autogrow(mhe
, puddleSize
, fetch_more_ram
, release_ram
, mhe
);
1090 void krnCreateTLSFMemHeader(CONST_STRPTR name
, BYTE pri
, APTR start
, IPTR size
, ULONG flags
)
1092 /* If the end is less than (1 << 31), MEMF_31BIT is implied */
1093 if (((IPTR
)start
+size
) < (1UL << 31))
1094 flags
|= MEMF_31BIT
;
1096 flags
&= ~MEMF_31BIT
;
1098 flags
|= MEMF_MANAGED
;
1100 struct MemHeaderExt
*mhe
= start
;
1102 mhe
->mhe_Magic
= MEMHEADER_EXT_MAGIC
;
1104 mhe
->mhe_DestroyPool
= destroy_Pool
;
1105 mhe
->mhe_InitPool
= init_Pool
;
1107 mhe
->mhe_Alloc
= tlsf_malloc
;
1108 mhe
->mhe_AllocVec
= tlsf_malloc
;
1109 mhe
->mhe_Free
= tlsf_freemem
;
1110 mhe
->mhe_FreeVec
= tlsf_freevec
;
1111 mhe
->mhe_AllocAbs
= tlsf_allocabs
;
1112 mhe
->mhe_ReAlloc
= tlsf_realloc
;
1113 mhe
->mhe_Avail
= tlsf_avail
;
1114 mhe
->mhe_InBounds
= tlsf_in_bounds
;
1116 mhe
->mhe_MemHeader
.mh_Node
.ln_Succ
= NULL
;
1117 mhe
->mhe_MemHeader
.mh_Node
.ln_Pred
= NULL
;
1118 mhe
->mhe_MemHeader
.mh_Node
.ln_Type
= NT_MEMORY
;
1119 mhe
->mhe_MemHeader
.mh_Node
.ln_Name
= (STRPTR
)name
;
1120 mhe
->mhe_MemHeader
.mh_Node
.ln_Pri
= pri
;
1121 mhe
->mhe_MemHeader
.mh_Attributes
= flags
;
1122 /* The first MemChunk needs to be aligned. We do it by adding MEMHEADER_TOTAL. */
1123 mhe
->mhe_MemHeader
.mh_First
= NULL
;
1125 mhe
->mhe_UserData
= NULL
;
1128 * mh_Lower and mh_Upper are informational only. Since our MemHeader resides
1129 * inside the region it describes, the region includes MemHeader.
1131 mhe
->mhe_MemHeader
.mh_Lower
= start
;
1132 mhe
->mhe_MemHeader
.mh_Upper
= start
+ size
;
1133 mhe
->mhe_MemHeader
.mh_Free
= size
;