2 * netsniff-ng - the packet sniffing beast
3 * By Daniel Borkmann <daniel@netsniff-ng.org>
4 * Copyright 2011 Daniel Borkmann.
9 * Two Levels Segregate Fit memory allocator (TLSF), Version 2.4.6
10 * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
11 * Thanks to Ismael Ripoll for his suggestions and reviews
12 * Copyright (C) 2008, 2007, 2006, 2005, 2004
13 * This code is released using a dual license strategy: GPL/LGPL
14 * You can choose the licence that better fits your requirements.
15 * Released under the terms of the GNU General Public License Version 2.0
16 * Released under the terms of the GNU Lesser General Public License Version 2.1
21 * (Jul 28 2007) Herman ten Brugge <hermantenbrugge@home.nl>:
22 * - Add 64 bit support. It now runs on x86_64 and solaris64.
23 * - I also tested this on vxworks/32and solaris/32 and i386/32 processors.
24 * - Remove assembly code. I could not measure any performance difference
25 * on my core2 processor. This also makes the code more portable.
26 * - Moved defines/typedefs from tlsf.h to tlsf.c
27 * - Changed MIN_BLOCK_SIZE to sizeof (free_ptr_t) and BHDR_OVERHEAD to
28 * (sizeof (bhdr_t) - MIN_BLOCK_SIZE). This does not change the fact
29 * that the minumum size is still sizeof
31 * - Changed all C++ comment style to C style. (// -> /.* ... *./)
32 * - Used ls_bit instead of ffs and ms_bit instead of fls. I did this to
33 * avoid confusion with the standard ffs function which returns
35 * - Created set_bit/clear_bit fuctions because they are not present
37 * - Added locking support + extra file target.h to show how to use it.
38 * - Added get_used_size function (REMOVED in 2.4)
39 * - Added rtl_realloc and rtl_calloc function
40 * - Implemented realloc clever support.
41 * - Added some test code in the example directory.
42 * - Bug fixed (discovered by the rockbox project: www.rockbox.org).
43 * (Oct 23 2006) Adam Scislowicz:
44 * - Support for ARMv5 implemented
47 /*#define USE_SBRK (0) */
48 /*#define USE_MMAP (0) */
51 #define USE_PRINTF (1)
56 #ifndef TLSF_USE_LOCKS
57 #define TLSF_USE_LOCKS (1)
60 #ifndef TLSF_STATISTIC
61 #define TLSF_STATISTIC (0)
72 #ifndef CHECK_DOUBLE_FREE
73 #define CHECK_DOUBLE_FREE (0)
78 #define TLSF_MLOCK_T pthread_mutex_t
79 #define TLSF_CREATE_LOCK(l) pthread_mutex_init (l, NULL)
80 #define TLSF_DESTROY_LOCK(l) pthread_mutex_destroy(l)
81 #define TLSF_ACQUIRE_LOCK(l) pthread_mutex_lock(l)
82 #define TLSF_RELEASE_LOCK(l) pthread_mutex_unlock(l)
84 #define TLSF_CREATE_LOCK(_unused_) do{}while(0)
85 #define TLSF_DESTROY_LOCK(_unused_) do{}while(0)
86 #define TLSF_ACQUIRE_LOCK(_unused_) do{}while(0)
87 #define TLSF_RELEASE_LOCK(_unused_) do{}while(0)
91 #define TLSF_ADD_SIZE(tlsf, b) do { \
92 tlsf->used_size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD; \
93 if (tlsf->used_size > tlsf->max_size) { \
94 tlsf->max_size = tlsf->used_size; \
98 #define TLSF_REMOVE_SIZE(tlsf, b) do { \
99 tlsf->used_size -= (b->size & BLOCK_SIZE) + BHDR_OVERHEAD; \
102 #define TLSF_ADD_SIZE(tlsf, b) do{}while(0)
103 #define TLSF_REMOVE_SIZE(tlsf, b) do{}while(0)
106 #if USE_MMAP || USE_SBRK
111 #include <sys/mman.h>
116 #if !defined(__GNUC__)
122 /* The debug functions only can be used when _DEBUG_TLSF_ is set. */
124 #define _DEBUG_TLSF_ (0)
127 /*************************************************************************/
128 /* Definition of the structures used by TLSF */
130 /* Some IMPORTANT TLSF parameters */
131 /* Unlike the preview TLSF versions, now they are statics */
132 #define BLOCK_ALIGN (sizeof(void *) * 2)
135 #define MAX_LOG2_SLI (5)
136 #define MAX_SLI (1 << MAX_LOG2_SLI) /* MAX_SLI = 2^MAX_LOG2_SLI */
138 #define FLI_OFFSET (6) /* tlsf structure just will manage blocks bigger */
140 #define SMALL_BLOCK (128)
141 #define REAL_FLI (MAX_FLI - FLI_OFFSET)
142 #define MIN_BLOCK_SIZE (sizeof (free_ptr_t))
143 #define BHDR_OVERHEAD (sizeof (bhdr_t) - MIN_BLOCK_SIZE)
144 #define TLSF_SIGNATURE (0x2A59FA59)
146 #define PTR_MASK (sizeof(void *) - 1)
147 #define BLOCK_SIZE (0xFFFFFFFF - PTR_MASK)
149 #define GET_NEXT_BLOCK(_addr, _r) ((bhdr_t *) ((char *) (_addr) + (_r)))
150 #define MEM_ALIGN ((BLOCK_ALIGN) - 1)
151 #define ROUNDUP_SIZE(_r) (((_r) + MEM_ALIGN) & ~MEM_ALIGN)
152 #define ROUNDDOWN_SIZE(_r) ((_r) & ~MEM_ALIGN)
153 #define ROUNDUP(_x, _v) ((((~(_x)) + 1) & ((_v)-1)) + (_x))
155 #define BLOCK_STATE (0x1)
156 #define PREV_STATE (0x2)
158 /* bit 0 of the block size */
159 #define FREE_BLOCK (0x1)
160 #define USED_BLOCK (0x0)
162 /* bit 1 of the block size */
163 #define PREV_FREE (0x2)
164 #define PREV_USED (0x0)
166 #define DEFAULT_AREA_SIZE (1024*10)
169 #define PAGE_SIZE (getpagesize())
174 #define PRINT_MSG(fmt, args...) printf(fmt, ## args)
175 #define ERROR_MSG(fmt, args...) fprintf(stderr, fmt, ## args)
177 #if !defined(PRINT_MSG)
178 #define PRINT_MSG(fmt, args...)
180 #if !defined(ERROR_MSG)
181 #define ERROR_MSG(fmt, args...)
185 #ifndef ATTRIBUTE_UNUSED
186 #if defined(__GNUC__)
187 #define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
189 #define ATTRIBUTE_UNUSED
193 typedef unsigned int u32_t
; /* NOTE: Make sure that this type is 4 bytes long on your computer */
194 typedef unsigned char u8_t
; /* NOTE: Make sure that this type is 1 byte on your computer */
196 typedef struct free_ptr_struct
{
197 struct bhdr_struct
*prev
;
198 struct bhdr_struct
*next
;
201 typedef struct bhdr_struct
{
202 /* This pointer is just valid if the first bit of size is set */
203 struct bhdr_struct
*prev_hdr
;
204 /* The size is stored in bytes */
205 size_t size
; /* bit 0 indicates whether the block is used and */
206 /* bit 1 allows to know whether the previous block is free */
208 struct free_ptr_struct free_ptr
;
209 u8_t buffer
[1]; /*sizeof(struct free_ptr_struct)]; */
213 /* This structure is embedded at the beginning of each area, giving us
214 * enough information to cope with a set of areas */
216 typedef struct area_info_struct
{
218 struct area_info_struct
*next
;
221 typedef struct TLSF_struct
{
222 /* the TLSF's structure signature */
223 u32_t tlsf_signature
;
230 /* These can not be calculated outside tlsf because we
231 * do not know the sizes when freeing/reallocing memory. */
236 /* A linked list holding all the existing areas */
237 area_info_t
*area_head
;
239 /* the first-level bitmap */
240 /* This array should have a size of REAL_FLI bits */
243 /* the second-level bitmap */
244 u32_t sl_bitmap
[REAL_FLI
];
246 bhdr_t
*matrix
[REAL_FLI
][MAX_SLI
];
249 /******************************************************************/
250 /************** Helping functions **************************/
251 /******************************************************************/
252 static __inline__
void set_bit(int nr
, u32_t
* addr
);
253 static __inline__
void clear_bit(int nr
, u32_t
* addr
);
254 static __inline__
int ls_bit(int x
);
255 static __inline__
int ms_bit(int x
);
256 static __inline__
void MAPPING_SEARCH(size_t * _r
, int *_fl
, int *_sl
);
257 static __inline__
void MAPPING_INSERT(size_t _r
, int *_fl
, int *_sl
);
258 static __inline__ bhdr_t
*FIND_SUITABLE_BLOCK(tlsf_t
* _tlsf
, int *_fl
,
260 static __inline__ bhdr_t
*process_area(void *area
, size_t size
);
261 #if USE_SBRK || USE_MMAP
262 static __inline__
void *get_new_area(size_t * size
);
265 static const int table
[] = {
266 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
269 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
272 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
275 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
278 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
281 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
284 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
287 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
292 static __inline__
int ls_bit(int i
)
295 unsigned int x
= i
& -i
;
297 a
= x
<= 0xffff ? (x
<= 0xff ? 0 : 8) : (x
<= 0xffffff ? 16 : 24);
298 return table
[x
>> a
] + a
;
301 static __inline__
int ms_bit(int i
)
304 unsigned int x
= (unsigned int)i
;
306 a
= x
<= 0xffff ? (x
<= 0xff ? 0 : 8) : (x
<= 0xffffff ? 16 : 24);
307 return table
[x
>> a
] + a
;
310 static __inline__
void set_bit(int nr
, u32_t
* addr
)
312 addr
[nr
>> 5] |= 1 << (nr
& 0x1f);
315 static __inline__
void clear_bit(int nr
, u32_t
* addr
)
317 addr
[nr
>> 5] &= ~(1 << (nr
& 0x1f));
320 static __inline__
void MAPPING_SEARCH(size_t * _r
, int *_fl
, int *_sl
)
324 if (*_r
< SMALL_BLOCK
) {
326 *_sl
= *_r
/ (SMALL_BLOCK
/ MAX_SLI
);
328 _t
= (1 << (ms_bit(*_r
) - MAX_LOG2_SLI
)) - 1;
331 *_sl
= (*_r
>> (*_fl
- MAX_LOG2_SLI
)) - MAX_SLI
;
333 /*if ((*_fl -= FLI_OFFSET) < 0) // FL wil be always >0!
340 static __inline__
void MAPPING_INSERT(size_t _r
, int *_fl
, int *_sl
)
342 if (_r
< SMALL_BLOCK
) {
344 *_sl
= _r
/ (SMALL_BLOCK
/ MAX_SLI
);
347 *_sl
= (_r
>> (*_fl
- MAX_LOG2_SLI
)) - MAX_SLI
;
352 static __inline__ bhdr_t
*FIND_SUITABLE_BLOCK(tlsf_t
* _tlsf
, int *_fl
,
355 u32_t _tmp
= _tlsf
->sl_bitmap
[*_fl
] & (~0 << *_sl
);
360 _b
= _tlsf
->matrix
[*_fl
][*_sl
];
362 *_fl
= ls_bit(_tlsf
->fl_bitmap
& (~0 << (*_fl
+ 1)));
363 if (*_fl
> 0) { /* likely */
364 *_sl
= ls_bit(_tlsf
->sl_bitmap
[*_fl
]);
365 _b
= _tlsf
->matrix
[*_fl
][*_sl
];
371 #define EXTRACT_BLOCK_HDR(_b, _tlsf, _fl, _sl) do { \
372 _tlsf -> matrix [_fl] [_sl] = _b -> ptr.free_ptr.next; \
373 if (_tlsf -> matrix[_fl][_sl]) \
374 _tlsf -> matrix[_fl][_sl] -> ptr.free_ptr.prev = NULL; \
376 clear_bit (_sl, &_tlsf -> sl_bitmap [_fl]); \
377 if (!_tlsf -> sl_bitmap [_fl]) \
378 clear_bit (_fl, &_tlsf -> fl_bitmap); \
380 _b -> ptr.free_ptr.prev = NULL; \
381 _b -> ptr.free_ptr.next = NULL; \
384 #define EXTRACT_BLOCK(_b, _tlsf, _fl, _sl) do { \
385 if (_b -> ptr.free_ptr.next) \
386 _b -> ptr.free_ptr.next -> ptr.free_ptr.prev = _b -> ptr.free_ptr.prev; \
387 if (_b -> ptr.free_ptr.prev) \
388 _b -> ptr.free_ptr.prev -> ptr.free_ptr.next = _b -> ptr.free_ptr.next; \
389 if (_tlsf -> matrix [_fl][_sl] == _b) { \
390 _tlsf -> matrix [_fl][_sl] = _b -> ptr.free_ptr.next; \
391 if (!_tlsf -> matrix [_fl][_sl]) { \
392 clear_bit (_sl, &_tlsf -> sl_bitmap[_fl]); \
393 if (!_tlsf -> sl_bitmap [_fl]) \
394 clear_bit (_fl, &_tlsf -> fl_bitmap); \
397 _b -> ptr.free_ptr.prev = NULL; \
398 _b -> ptr.free_ptr.next = NULL; \
401 #define INSERT_BLOCK(_b, _tlsf, _fl, _sl) do { \
402 _b -> ptr.free_ptr.prev = NULL; \
403 _b -> ptr.free_ptr.next = _tlsf -> matrix [_fl][_sl]; \
404 if (_tlsf -> matrix [_fl][_sl]) \
405 _tlsf -> matrix [_fl][_sl] -> ptr.free_ptr.prev = _b; \
406 _tlsf -> matrix [_fl][_sl] = _b; \
407 set_bit (_sl, &_tlsf -> sl_bitmap [_fl]); \
408 set_bit (_fl, &_tlsf -> fl_bitmap); \
411 #if USE_SBRK || USE_MMAP
412 static __inline__
void *get_new_area(size_t * size
)
417 area
= (void *)sbrk(0);
418 if (((void *)sbrk(*size
)) != ((void *)-1))
422 #ifndef MAP_ANONYMOUS
423 /* https://dev.openwrt.org/ticket/322 */
424 #define MAP_ANONYMOUS MAP_ANON
428 *size
= ROUNDUP(*size
, PAGE_SIZE
);
430 mmap(0, *size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANONYMOUS
,
431 -1, 0)) != MAP_FAILED
)
438 static __inline__ bhdr_t
*process_area(void *area
, size_t size
)
443 ib
= (bhdr_t
*) area
;
445 (sizeof(area_info_t
) <
446 MIN_BLOCK_SIZE
) ? MIN_BLOCK_SIZE
:
447 ROUNDUP_SIZE(sizeof(area_info_t
)) | USED_BLOCK
| PREV_USED
;
448 b
= (bhdr_t
*) GET_NEXT_BLOCK(ib
->ptr
.buffer
, ib
->size
& BLOCK_SIZE
);
450 ROUNDDOWN_SIZE(size
- 3 * BHDR_OVERHEAD
-
451 (ib
->size
& BLOCK_SIZE
)) | USED_BLOCK
| PREV_USED
;
452 b
->ptr
.free_ptr
.prev
= b
->ptr
.free_ptr
.next
= 0;
453 lb
= GET_NEXT_BLOCK(b
->ptr
.buffer
, b
->size
& BLOCK_SIZE
);
455 lb
->size
= 0 | USED_BLOCK
| PREV_FREE
;
456 ai
= (area_info_t
*) ib
->ptr
.buffer
;
462 /******************************************************************/
463 /******************** Begin of the allocator code *****************/
464 /******************************************************************/
466 static char *mp
= NULL
; /* Default memory pool. */
468 /******************************************************************/
469 size_t init_memory_pool(size_t mem_pool_size
, void *mem_pool
)
471 /******************************************************************/
475 if (!mem_pool
|| !mem_pool_size
476 || mem_pool_size
< sizeof(tlsf_t
) + BHDR_OVERHEAD
* 8) {
477 ERROR_MSG("init_memory_pool (): memory_pool invalid\n");
481 if (((unsigned long)mem_pool
& PTR_MASK
)) {
483 ("init_memory_pool (): mem_pool must be aligned to a word\n");
486 tlsf
= (tlsf_t
*) mem_pool
;
487 /* Check if already initialised */
488 if (tlsf
->tlsf_signature
== TLSF_SIGNATURE
) {
489 mp
= (char *)mem_pool
;
490 b
= GET_NEXT_BLOCK(mp
, ROUNDUP_SIZE(sizeof(tlsf_t
)));
491 return b
->size
& BLOCK_SIZE
;
494 mp
= (char *)mem_pool
;
496 /* Zeroing the memory pool */
497 memset(mem_pool
, 0, sizeof(tlsf_t
));
499 tlsf
->tlsf_signature
= TLSF_SIGNATURE
;
501 TLSF_CREATE_LOCK(&tlsf
->lock
);
503 ib
= process_area(GET_NEXT_BLOCK
504 (mem_pool
, ROUNDUP_SIZE(sizeof(tlsf_t
))),
505 ROUNDDOWN_SIZE(mem_pool_size
- sizeof(tlsf_t
)));
506 b
= GET_NEXT_BLOCK(ib
->ptr
.buffer
, ib
->size
& BLOCK_SIZE
);
507 free_ex(b
->ptr
.buffer
, tlsf
);
508 tlsf
->area_head
= (area_info_t
*) ib
->ptr
.buffer
;
511 tlsf
->used_size
= mem_pool_size
- (b
->size
& BLOCK_SIZE
);
512 tlsf
->max_size
= tlsf
->used_size
;
515 return (b
->size
& BLOCK_SIZE
);
518 /******************************************************************/
519 size_t add_new_area(void *area
, size_t area_size
, void *mem_pool
)
521 /******************************************************************/
522 tlsf_t
*tlsf
= (tlsf_t
*) mem_pool
;
523 area_info_t
*ptr
, *ptr_prev
, *ai
;
524 bhdr_t
*ib0
, *b0
, *lb0
, *ib1
, *b1
, *lb1
, *next_b
;
526 memset(area
, 0, area_size
);
527 ptr
= tlsf
->area_head
;
530 ib0
= process_area(area
, area_size
);
531 b0
= GET_NEXT_BLOCK(ib0
->ptr
.buffer
, ib0
->size
& BLOCK_SIZE
);
532 lb0
= GET_NEXT_BLOCK(b0
->ptr
.buffer
, b0
->size
& BLOCK_SIZE
);
534 /* Before inserting the new area, we have to merge this area with the
535 already existing ones */
538 ib1
= (bhdr_t
*) ((char *)ptr
- BHDR_OVERHEAD
);
539 b1
= GET_NEXT_BLOCK(ib1
->ptr
.buffer
, ib1
->size
& BLOCK_SIZE
);
542 /* Merging the new area with the next physically contigous one */
543 if ((unsigned long)ib1
== (unsigned long)lb0
+ BHDR_OVERHEAD
) {
544 if (tlsf
->area_head
== ptr
) {
545 tlsf
->area_head
= ptr
->next
;
548 ptr_prev
->next
= ptr
->next
;
553 ROUNDDOWN_SIZE((b0
->size
& BLOCK_SIZE
) +
554 (ib1
->size
& BLOCK_SIZE
) +
556 BHDR_OVERHEAD
) | USED_BLOCK
|
565 /* Merging the new area with the previous physically contigous
567 if ((unsigned long)lb1
->ptr
.buffer
== (unsigned long)ib0
) {
568 if (tlsf
->area_head
== ptr
) {
569 tlsf
->area_head
= ptr
->next
;
572 ptr_prev
->next
= ptr
->next
;
577 ROUNDDOWN_SIZE((b0
->size
& BLOCK_SIZE
) +
578 (ib0
->size
& BLOCK_SIZE
) +
580 BHDR_OVERHEAD
) | USED_BLOCK
| (lb1
->
584 GET_NEXT_BLOCK(lb1
->ptr
.buffer
,
585 lb1
->size
& BLOCK_SIZE
);
586 next_b
->prev_hdr
= lb1
;
596 /* Inserting the area in the list of linked areas */
597 ai
= (area_info_t
*) ib0
->ptr
.buffer
;
598 ai
->next
= tlsf
->area_head
;
600 tlsf
->area_head
= ai
;
601 free_ex(b0
->ptr
.buffer
, mem_pool
);
602 return (b0
->size
& BLOCK_SIZE
);
605 /******************************************************************/
606 size_t get_used_size(void *mem_pool ATTRIBUTE_UNUSED
)
608 /******************************************************************/
610 return ((tlsf_t
*) mem_pool
)->used_size
;
616 /******************************************************************/
617 size_t get_max_size(void *mem_pool ATTRIBUTE_UNUSED
)
619 /******************************************************************/
621 return ((tlsf_t
*) mem_pool
)->max_size
;
627 /******************************************************************/
628 void destroy_memory_pool(void *mem_pool
)
630 /******************************************************************/
631 tlsf_t
*tlsf
= (tlsf_t
*) mem_pool
;
633 tlsf
->tlsf_signature
= 0;
635 TLSF_DESTROY_LOCK(&tlsf
->lock
);
639 /******************************************************************/
640 void *tlsf_malloc(size_t size
)
642 /******************************************************************/
645 #if USE_MMAP || USE_SBRK
650 area_size
= sizeof(tlsf_t
) + BHDR_OVERHEAD
* 8; /* Just a safety constant */
653 DEFAULT_AREA_SIZE
) ? area_size
: DEFAULT_AREA_SIZE
;
654 area
= get_new_area(&area_size
);
655 if (area
== ((void *)~0))
656 return NULL
; /* Not enough system memory */
657 init_memory_pool(area_size
, area
);
661 TLSF_ACQUIRE_LOCK(&((tlsf_t
*) mp
)->lock
);
663 ret
= malloc_ex(size
, mp
);
665 TLSF_RELEASE_LOCK(&((tlsf_t
*) mp
)->lock
);
670 /******************************************************************/
671 void tlsf_free(void *ptr
)
673 /******************************************************************/
675 TLSF_ACQUIRE_LOCK(&((tlsf_t
*) mp
)->lock
);
679 TLSF_RELEASE_LOCK(&((tlsf_t
*) mp
)->lock
);
683 /******************************************************************/
684 void *tlsf_realloc(void *ptr
, size_t size
)
686 /******************************************************************/
689 #if USE_MMAP || USE_SBRK
691 return tlsf_malloc(size
);
695 TLSF_ACQUIRE_LOCK(&((tlsf_t
*) mp
)->lock
);
697 ret
= realloc_ex(ptr
, size
, mp
);
699 TLSF_RELEASE_LOCK(&((tlsf_t
*) mp
)->lock
);
704 /******************************************************************/
705 void *tlsf_calloc(size_t nelem
, size_t elem_size
)
707 /******************************************************************/
710 TLSF_ACQUIRE_LOCK(&((tlsf_t
*) mp
)->lock
);
712 ret
= calloc_ex(nelem
, elem_size
, mp
);
714 TLSF_RELEASE_LOCK(&((tlsf_t
*) mp
)->lock
);
719 /******************************************************************/
720 void *malloc_ex(size_t size
, void *mem_pool
)
722 /******************************************************************/
723 tlsf_t
*tlsf
= (tlsf_t
*) mem_pool
;
724 bhdr_t
*b
, *b2
, *next_b
;
728 size
= (size
< MIN_BLOCK_SIZE
) ? MIN_BLOCK_SIZE
: ROUNDUP_SIZE(size
);
730 /* Rounding up the requested size and calculating fl and sl */
731 MAPPING_SEARCH(&size
, &fl
, &sl
);
733 /* Searching a free block, recall that this function changes the values of fl and sl,
734 so they are not longer valid when the function fails */
735 b
= FIND_SUITABLE_BLOCK(tlsf
, &fl
, &sl
);
736 #if USE_MMAP || USE_SBRK
740 /* Growing the pool size when needed */
741 area_size
= size
+ BHDR_OVERHEAD
* 8; /* size plus enough room for the requered headers. */
744 DEFAULT_AREA_SIZE
) ? area_size
: DEFAULT_AREA_SIZE
;
745 area
= get_new_area(&area_size
); /* Call sbrk or mmap */
746 if (area
== ((void *)~0))
747 return NULL
; /* Not enough system memory */
748 add_new_area(area
, area_size
, mem_pool
);
749 /* Rounding up the requested size and calculating fl and sl */
750 MAPPING_SEARCH(&size
, &fl
, &sl
);
751 /* Searching a free block */
752 b
= FIND_SUITABLE_BLOCK(tlsf
, &fl
, &sl
);
756 return NULL
; /* Not found */
758 EXTRACT_BLOCK_HDR(b
, tlsf
, fl
, sl
);
761 next_b
= GET_NEXT_BLOCK(b
->ptr
.buffer
, b
->size
& BLOCK_SIZE
);
762 /* Should the block be split? */
763 tmp_size
= (b
->size
& BLOCK_SIZE
) - size
;
764 if (tmp_size
>= sizeof(bhdr_t
)) {
765 tmp_size
-= BHDR_OVERHEAD
;
766 b2
= GET_NEXT_BLOCK(b
->ptr
.buffer
, size
);
767 b2
->size
= tmp_size
| FREE_BLOCK
| PREV_USED
;
768 next_b
->prev_hdr
= b2
;
769 MAPPING_INSERT(tmp_size
, &fl
, &sl
);
770 INSERT_BLOCK(b2
, tlsf
, fl
, sl
);
772 b
->size
= size
| (b
->size
& PREV_STATE
);
774 next_b
->size
&= (~PREV_FREE
);
775 b
->size
&= (~FREE_BLOCK
); /* Now it's used */
778 TLSF_ADD_SIZE(tlsf
, b
);
780 return (void *)b
->ptr
.buffer
;
783 /******************************************************************/
784 void free_ex(void *ptr
, void *mem_pool
)
786 /******************************************************************/
787 tlsf_t
*tlsf
= (tlsf_t
*) mem_pool
;
794 b
= (bhdr_t
*) ((char *)ptr
- BHDR_OVERHEAD
);
796 #ifdef CHECK_DOUBLE_FREE
797 if (b
->size
& FREE_BLOCK
) {
798 ERROR_MSG("free_ex(): double free %p\n", ptr
);
803 b
->size
|= FREE_BLOCK
;
805 TLSF_REMOVE_SIZE(tlsf
, b
);
807 b
->ptr
.free_ptr
.prev
= NULL
;
808 b
->ptr
.free_ptr
.next
= NULL
;
809 tmp_b
= GET_NEXT_BLOCK(b
->ptr
.buffer
, b
->size
& BLOCK_SIZE
);
810 if (tmp_b
->size
& FREE_BLOCK
) {
811 MAPPING_INSERT(tmp_b
->size
& BLOCK_SIZE
, &fl
, &sl
);
812 EXTRACT_BLOCK(tmp_b
, tlsf
, fl
, sl
);
813 b
->size
+= (tmp_b
->size
& BLOCK_SIZE
) + BHDR_OVERHEAD
;
815 if (b
->size
& PREV_FREE
) {
817 MAPPING_INSERT(tmp_b
->size
& BLOCK_SIZE
, &fl
, &sl
);
818 EXTRACT_BLOCK(tmp_b
, tlsf
, fl
, sl
);
819 tmp_b
->size
+= (b
->size
& BLOCK_SIZE
) + BHDR_OVERHEAD
;
822 MAPPING_INSERT(b
->size
& BLOCK_SIZE
, &fl
, &sl
);
823 INSERT_BLOCK(b
, tlsf
, fl
, sl
);
825 tmp_b
= GET_NEXT_BLOCK(b
->ptr
.buffer
, b
->size
& BLOCK_SIZE
);
826 tmp_b
->size
|= PREV_FREE
;
830 /******************************************************************/
831 void *realloc_ex(void *ptr
, size_t new_size
, void *mem_pool
)
833 /******************************************************************/
834 tlsf_t
*tlsf
= (tlsf_t
*) mem_pool
;
837 bhdr_t
*b
, *tmp_b
, *next_b
;
843 return (void *)malloc_ex(new_size
, mem_pool
);
846 } else if (!new_size
) {
847 free_ex(ptr
, mem_pool
);
851 b
= (bhdr_t
*) ((char *)ptr
- BHDR_OVERHEAD
);
853 #ifdef CHECK_DOUBLE_FREE
854 if (b
->size
& FREE_BLOCK
) {
855 ERROR_MSG("realloc_ex(): invalid pointer %p\n", ptr
);
856 return (void *)malloc_ex(new_size
, mem_pool
);
860 next_b
= GET_NEXT_BLOCK(b
->ptr
.buffer
, b
->size
& BLOCK_SIZE
);
863 MIN_BLOCK_SIZE
) ? MIN_BLOCK_SIZE
: ROUNDUP_SIZE(new_size
);
864 tmp_size
= (b
->size
& BLOCK_SIZE
);
865 if (new_size
<= tmp_size
) {
866 TLSF_REMOVE_SIZE(tlsf
, b
);
867 if (next_b
->size
& FREE_BLOCK
) {
868 MAPPING_INSERT(next_b
->size
& BLOCK_SIZE
, &fl
, &sl
);
869 EXTRACT_BLOCK(next_b
, tlsf
, fl
, sl
);
870 tmp_size
+= (next_b
->size
& BLOCK_SIZE
) + BHDR_OVERHEAD
;
872 GET_NEXT_BLOCK(next_b
->ptr
.buffer
,
873 next_b
->size
& BLOCK_SIZE
);
874 /* We allways reenter this free block because tmp_size will
875 be greater then sizeof (bhdr_t) */
877 tmp_size
-= new_size
;
878 if (tmp_size
>= sizeof(bhdr_t
)) {
879 tmp_size
-= BHDR_OVERHEAD
;
880 tmp_b
= GET_NEXT_BLOCK(b
->ptr
.buffer
, new_size
);
881 tmp_b
->size
= tmp_size
| FREE_BLOCK
| PREV_USED
;
882 next_b
->prev_hdr
= tmp_b
;
883 next_b
->size
|= PREV_FREE
;
884 MAPPING_INSERT(tmp_size
, &fl
, &sl
);
885 INSERT_BLOCK(tmp_b
, tlsf
, fl
, sl
);
886 b
->size
= new_size
| (b
->size
& PREV_STATE
);
888 TLSF_ADD_SIZE(tlsf
, b
);
889 return (void *)b
->ptr
.buffer
;
891 if ((next_b
->size
& FREE_BLOCK
)) {
892 if (new_size
<= (tmp_size
+ (next_b
->size
& BLOCK_SIZE
))) {
893 TLSF_REMOVE_SIZE(tlsf
, b
);
894 MAPPING_INSERT(next_b
->size
& BLOCK_SIZE
, &fl
, &sl
);
895 EXTRACT_BLOCK(next_b
, tlsf
, fl
, sl
);
896 b
->size
+= (next_b
->size
& BLOCK_SIZE
) + BHDR_OVERHEAD
;
898 GET_NEXT_BLOCK(b
->ptr
.buffer
, b
->size
& BLOCK_SIZE
);
899 next_b
->prev_hdr
= b
;
900 next_b
->size
&= ~PREV_FREE
;
901 tmp_size
= (b
->size
& BLOCK_SIZE
) - new_size
;
902 if (tmp_size
>= sizeof(bhdr_t
)) {
903 tmp_size
-= BHDR_OVERHEAD
;
904 tmp_b
= GET_NEXT_BLOCK(b
->ptr
.buffer
, new_size
);
905 tmp_b
->size
= tmp_size
| FREE_BLOCK
| PREV_USED
;
906 next_b
->prev_hdr
= tmp_b
;
907 next_b
->size
|= PREV_FREE
;
908 MAPPING_INSERT(tmp_size
, &fl
, &sl
);
909 INSERT_BLOCK(tmp_b
, tlsf
, fl
, sl
);
910 b
->size
= new_size
| (b
->size
& PREV_STATE
);
912 TLSF_ADD_SIZE(tlsf
, b
);
913 return (void *)b
->ptr
.buffer
;
917 if (!(ptr_aux
= malloc_ex(new_size
, mem_pool
))) {
922 ((b
->size
& BLOCK_SIZE
) >
923 new_size
) ? new_size
: (b
->size
& BLOCK_SIZE
);
925 memcpy(ptr_aux
, ptr
, cpsize
);
927 free_ex(ptr
, mem_pool
);
931 /******************************************************************/
932 void *calloc_ex(size_t nelem
, size_t elem_size
, void *mem_pool
)
934 /******************************************************************/
937 if (nelem
<= 0 || elem_size
<= 0)
940 if (!(ptr
= malloc_ex(nelem
* elem_size
, mem_pool
)))
942 memset(ptr
, 0, nelem
* elem_size
);
949 /*************** DEBUG FUNCTIONS **************/
951 /* The following functions have been designed to ease the debugging of */
952 /* the TLSF structure. For non-developing purposes, it may be they */
953 /* haven't too much worth. To enable them, _DEBUG_TLSF_ must be set. */
955 extern void dump_memory_region(unsigned char *mem_ptr
, unsigned int size
);
956 extern void print_block(bhdr_t
* b
);
957 extern void print_tlsf(tlsf_t
* tlsf
);
958 void print_all_blocks(tlsf_t
* tlsf
);
960 void dump_memory_region(unsigned char *mem_ptr
, unsigned int size
)
963 unsigned long begin
= (unsigned long)mem_ptr
;
964 unsigned long end
= (unsigned long)mem_ptr
+ size
;
974 PRINT_MSG("\nMemory region dumped: 0x%lx - 0x%lx\n\n", begin
, end
);
977 PRINT_MSG("0x%lx ", begin
);
979 while (begin
< end
) {
980 if (((unsigned char *)begin
)[0] == 0)
983 PRINT_MSG("%02x", ((unsigned char *)begin
)[0]);
984 if (((unsigned char *)begin
)[1] == 0)
987 PRINT_MSG("%02x ", ((unsigned char *)begin
)[1]);
991 PRINT_MSG("\n0x%lx ", begin
);
999 void print_block(bhdr_t
* b
)
1003 PRINT_MSG(">> [%p] (", b
);
1004 if ((b
->size
& BLOCK_SIZE
))
1005 PRINT_MSG("%lu bytes, ", (unsigned long)(b
->size
& BLOCK_SIZE
));
1007 PRINT_MSG("sentinel, ");
1008 if ((b
->size
& BLOCK_STATE
) == FREE_BLOCK
)
1009 PRINT_MSG("free [%p, %p], ", b
->ptr
.free_ptr
.prev
,
1010 b
->ptr
.free_ptr
.next
);
1012 PRINT_MSG("used, ");
1013 if ((b
->size
& PREV_STATE
) == PREV_FREE
)
1014 PRINT_MSG("prev. free [%p])\n", b
->prev_hdr
);
1016 PRINT_MSG("prev used)\n");
1019 void print_tlsf(tlsf_t
* tlsf
)
1024 PRINT_MSG("\nTLSF at %p\n", tlsf
);
1026 PRINT_MSG("FL bitmap: 0x%x\n\n", (unsigned)tlsf
->fl_bitmap
);
1028 for (i
= 0; i
< REAL_FLI
; i
++) {
1029 if (tlsf
->sl_bitmap
[i
])
1030 PRINT_MSG("SL bitmap 0x%x\n",
1031 (unsigned)tlsf
->sl_bitmap
[i
]);
1032 for (j
= 0; j
< MAX_SLI
; j
++) {
1033 next
= tlsf
->matrix
[i
][j
];
1035 PRINT_MSG("-> [%d][%d]\n", i
, j
);
1038 next
= next
->ptr
.free_ptr
.next
;
1044 void print_all_blocks(tlsf_t
* tlsf
)
1048 PRINT_MSG("\nTLSF at %p\nALL BLOCKS\n\n", tlsf
);
1049 ai
= tlsf
->area_head
;
1051 next
= (bhdr_t
*) ((char *)ai
- BHDR_OVERHEAD
);
1054 if ((next
->size
& BLOCK_SIZE
))
1056 GET_NEXT_BLOCK(next
->ptr
.buffer
,
1057 next
->size
& BLOCK_SIZE
);