1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * This is a memory allocator designed to provide reasonable management of free
11 * space and fast access to allocated data. More than one allocator can be used
12 * at a time by initializing multiple contexts.
14 * Copyright (C) 2009 Andrew Mahone
15 * Copyright (C) 2011 Thomas Martitz
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version 2
21 * of the License, or (at your option) any later version.
23 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
24 * KIND, either express or implied.
26 ****************************************************************************/
28 #include <stdlib.h> /* for abs() */
29 #include <stdio.h> /* for snprintf() */
31 #include "string-extra.h" /* strlcpy() */
33 #include "system.h" /* for ALIGN_*() */
35 /* The main goal of this design is fast fetching of the pointer for a handle.
36 * For that reason, the handles are stored in a table at the end of the buffer
37 * with a fixed address, so that returning the pointer for a handle is a simple
38 * table lookup. To reduce the frequency with which allocated blocks will need
39 * to be moved to free space, allocations grow up in address from the start of
40 * the buffer. The buffer is treated as an array of union buflib_data. Blocks
41 * start with a length marker, which is included in their length. Free blocks
42 * are marked by negative length. Allocated blocks have a positiv length marker,
43 * and additional metadata forllowing that: It follows a pointer
44 * (union buflib_data*) to the corresponding handle table entry. so that it can
45 * be quickly found and updated during compaction. After that follows
46 * the pointer to the struct buflib_callbacks associated with this allocation
47 * (may be NULL). That pointer follows a variable length character array
48 * containing the nul-terminated string identifier of the allocation. After this
49 * array there's a length marker for the length of the character array including
50 * this length marker (counted in n*sizeof(union buflib_data)), which allows
51 * to find the start of the character array (and therefore the start of the
52 * entire block) when only the handle or payload start is known.
55 * |<- alloc block #1 ->|<- unalloc block ->|<- alloc block #2 ->|<-handle table->|
56 * |L|H|C|cccc|L2|XXXXXX|-L|YYYYYYYYYYYYYYYY|L|H|C|cc|L2|XXXXXXXXXXXXX|AAA|
58 * L - length marker (negative if block unallocated)
59 * H - handle table enry pointer
60 * C - pointer to struct buflib_callbacks
61 * c - variable sized string identifier
62 * L2 - second length marker for string identifier
64 * Y - unallocated space
66 * A - pointer to start of payload (first X) in the handle table (may be null)
68 * The blocks can be walked by jumping the abs() of the L length marker, i.e.
69 * union buflib_data* L;
70 * for(L = start; L < end; L += abs(L->val)) { .... }
73 * The allocator functions are passed a context struct so that two allocators
74 * can be run, for example, one per core may be used, with convenience wrappers
75 * for the single-allocator case that use a predefined context.
78 #define B_ALIGN_DOWN(x) \
79 ALIGN_DOWN(x, sizeof(union buflib_data))
81 #define B_ALIGN_UP(x) \
82 ALIGN_UP(x, sizeof(union buflib_data))
86 #define BDEBUGF DEBUGF
88 #define BDEBUGF(...) do { } while(0)
91 #define IS_MOVABLE(a) (!a[2].ops || a[2].ops->move_callback)
92 static union buflib_data
* find_first_free(struct buflib_context
*ctx
);
93 static union buflib_data
* find_block_before(struct buflib_context
*ctx
,
94 union buflib_data
* block
,
96 /* Initialize buffer manager */
98 buflib_init(struct buflib_context
*ctx
, void *buf
, size_t size
)
100 union buflib_data
*bd_buf
= buf
;
102 /* Align on sizeof(buflib_data), to prevent unaligned access */
103 ALIGN_BUFFER(bd_buf
, size
, sizeof(union buflib_data
));
104 size
/= sizeof(union buflib_data
);
105 /* The handle table is initialized with no entries */
106 ctx
->handle_table
= bd_buf
+ size
;
107 ctx
->last_handle
= bd_buf
+ size
;
108 ctx
->first_free_handle
= bd_buf
+ size
- 1;
109 ctx
->buf_start
= bd_buf
;
110 /* A marker is needed for the end of allocated data, to make sure that it
111 * does not collide with the handle table, and to detect end-of-buffer.
113 ctx
->alloc_end
= bd_buf
;
116 BDEBUGF("buflib initialized with %lu.%2lu kiB",
117 (unsigned long)size
/ 1024, ((unsigned long)size
%1000)/10);
120 /* Allocate a new handle, returning 0 on failure */
122 union buflib_data
* handle_alloc(struct buflib_context
*ctx
)
124 union buflib_data
*handle
;
125 /* first_free_handle is a lower bound on free handles, work through the
126 * table from there until a handle containing NULL is found, or the end
127 * of the table is reached.
129 for (handle
= ctx
->first_free_handle
; handle
>= ctx
->last_handle
; handle
--)
132 /* If the search went past the end of the table, it means we need to extend
133 * the table to get a new handle.
135 if (handle
< ctx
->last_handle
)
137 if (handle
>= ctx
->alloc_end
)
146 /* Free one handle, shrinking the handle table if it's the last one */
148 void handle_free(struct buflib_context
*ctx
, union buflib_data
*handle
)
151 /* Update free handle lower bound if this handle has a lower index than the
154 if (handle
> ctx
->first_free_handle
)
155 ctx
->first_free_handle
= handle
;
156 if (handle
== ctx
->last_handle
)
159 ctx
->compact
= false;
162 /* Get the start block of an allocation */
163 static union buflib_data
* handle_to_block(struct buflib_context
* ctx
, int handle
)
165 union buflib_data
* name_field
=
166 (union buflib_data
*)buflib_get_name(ctx
, handle
);
168 return name_field
- 3;
171 /* Shrink the handle table, returning true if its size was reduced, false if
176 handle_table_shrink(struct buflib_context
*ctx
)
179 union buflib_data
*handle
;
180 for (handle
= ctx
->last_handle
; !(handle
->alloc
); handle
++);
181 if (handle
> ctx
->first_free_handle
)
182 ctx
->first_free_handle
= handle
- 1;
183 rv
= handle
!= ctx
->last_handle
;
184 ctx
->last_handle
= handle
;
189 /* If shift is non-zero, it represents the number of places to move
190 * blocks in memory. Calculate the new address for this block,
191 * update its entry in the handle table, and then move its contents.
193 * Returns false if moving was unsucessful
194 * (NULL callback or BUFLIB_CB_CANNOT_MOVE was returned)
197 move_block(struct buflib_context
* ctx
, union buflib_data
* block
, int shift
)
200 union buflib_data
*new_block
, *tmp
= block
[1].handle
;
201 struct buflib_callbacks
*ops
= block
[2].ops
;
202 if (!IS_MOVABLE(block
))
205 int handle
= ctx
->handle_table
- tmp
;
206 BDEBUGF("%s(): moving \"%s\"(id=%d) by %d(%d)\n", __func__
, block
[3].name
,
207 handle
, shift
, shift
*(int)sizeof(union buflib_data
));
208 new_block
= block
+ shift
;
209 new_start
= tmp
->alloc
+ shift
*sizeof(union buflib_data
);
211 /* disable IRQs to make accessing the buffer from interrupt context safe. */
212 /* protect the move callback, as a cached global pointer might be updated
213 * in it. and protect "tmp->alloc = new_start" for buflib_get_data() */
214 /* call the callback before moving */
215 if (ops
&& ops
->sync_callback
)
217 ops
->sync_callback(handle
, true);
225 if (!ops
|| ops
->move_callback(handle
, tmp
->alloc
, new_start
)
226 != BUFLIB_CB_CANNOT_MOVE
)
228 tmp
->alloc
= new_start
; /* update handle table */
229 memmove(new_block
, block
, block
->val
* sizeof(union buflib_data
));
233 if (ops
&& ops
->sync_callback
)
235 ops
->sync_callback(handle
, false);
245 /* Compact allocations and handle table, adjusting handle pointers as needed.
246 * Return true if any space was freed or consolidated, false otherwise.
249 buflib_compact(struct buflib_context
*ctx
)
251 BDEBUGF("%s(): Compacting!\n", __func__
);
252 union buflib_data
*block
,
255 /* Store the results of attempting to shrink the handle table */
256 bool ret
= handle_table_shrink(ctx
);
257 /* compaction has basically two modes of operation:
258 * 1) the buffer is nicely movable: In this mode, blocks can be simply
259 * moved towards the beginning. Free blocks add to a shift value,
260 * which is the amount to move.
261 * 2) the buffer contains unmovable blocks: unmovable blocks create
262 * holes and reset shift. Once a hole is found, we're trying to fill
263 * holes first, moving by shift is the fallback. As the shift is reset,
264 * this effectively splits the buffer into portions of movable blocks.
265 * This mode cannot be used if no holes are found yet as it only works
266 * when it moves blocks across the portions. On the other side,
267 * moving by shift only works within the same portion
268 * For simplicity only 1 hole at a time is considered */
269 for(block
= find_first_free(ctx
); block
< ctx
->alloc_end
; block
+= len
)
271 bool movable
= true; /* cache result to avoid 2nd call to move_block */
273 /* This block is free, add its length to the shift value */
280 /* attempt to fill any hole */
281 if (hole
&& -hole
->val
>= len
)
283 intptr_t hlen
= -hole
->val
;
284 if ((movable
= move_block(ctx
, block
, hole
- block
)))
287 /* Move was successful. The memory at block is now free */
289 /* add its length to shift */
291 /* Reduce the size of the hole accordingly
292 * but be careful to not overwrite an existing block */
296 hole
->val
= len
- hlen
; /* negative */
298 else /* hole closed */
303 /* attempt move the allocation by shift */
306 union buflib_data
* target_block
= block
+ shift
;
307 if (!movable
|| !move_block(ctx
, block
, shift
))
309 /* free space before an unmovable block becomes a hole,
310 * therefore mark this block free and track the hole */
311 target_block
->val
= shift
;
319 /* Move the end-of-allocation mark, and return true if any new space has
322 ctx
->alloc_end
+= shift
;
327 /* Compact the buffer by trying both shrinking and moving.
329 * Try to move first. If unsuccesfull, try to shrink. If that was successful
330 * try to move once more as there might be more room now.
333 buflib_compact_and_shrink(struct buflib_context
*ctx
, unsigned shrink_hints
)
336 /* if something compacted before already there will be no further gain */
338 result
= buflib_compact(ctx
);
341 union buflib_data
*this, *before
;
342 for(this = ctx
->buf_start
, before
= this;
343 this < ctx
->alloc_end
;
344 before
= this, this += abs(this->val
))
346 if (this->val
> 0 && this[2].ops
347 && this[2].ops
->shrink_callback
)
350 int handle
= ctx
->handle_table
- this[1].handle
;
351 char* data
= this[1].handle
->alloc
;
352 bool last
= (this+this->val
) == ctx
->alloc_end
;
353 unsigned pos_hints
= shrink_hints
& BUFLIB_SHRINK_POS_MASK
;
354 /* adjust what we ask for if there's free space in the front
355 * this isn't too unlikely assuming this block is
356 * shrinkable but not movable */
357 if (pos_hints
== BUFLIB_SHRINK_POS_FRONT
358 && before
!= this && before
->val
< 0)
360 size_t free_space
= (-before
->val
) * sizeof(union buflib_data
);
361 size_t wanted
= shrink_hints
& BUFLIB_SHRINK_SIZE_MASK
;
362 if (wanted
< free_space
) /* no shrink needed? */
364 wanted
-= free_space
;
365 shrink_hints
= pos_hints
| wanted
;
367 ret
= this[2].ops
->shrink_callback(handle
, shrink_hints
,
368 data
, (char*)(this+this->val
)-data
);
369 result
|= (ret
== BUFLIB_CB_OK
);
370 /* this might have changed in the callback (if
371 * it shrinked from the top), get it again */
372 this = handle_to_block(ctx
, handle
);
373 /* could also change with shrinking from back */
375 ctx
->alloc_end
= this + this->val
;
378 /* shrinking was successful at least once, try compaction again */
380 result
|= buflib_compact(ctx
);
386 /* Shift buffered items by size units, and update handle pointers. The shift
387 * value must be determined to be safe *before* calling.
390 buflib_buffer_shift(struct buflib_context
*ctx
, int shift
)
392 memmove(ctx
->buf_start
+ shift
, ctx
->buf_start
,
393 (ctx
->alloc_end
- ctx
->buf_start
) * sizeof(union buflib_data
));
394 ctx
->buf_start
+= shift
;
395 ctx
->alloc_end
+= shift
;
396 shift
*= sizeof(union buflib_data
);
397 union buflib_data
*handle
;
398 for (handle
= ctx
->last_handle
; handle
< ctx
->handle_table
; handle
++)
400 handle
->alloc
+= shift
;
403 /* Shift buffered items up by size bytes, or as many as possible if size == 0.
404 * Set size to the number of bytes freed.
407 buflib_buffer_out(struct buflib_context
*ctx
, size_t *size
)
411 size_t avail
= ctx
->last_handle
- ctx
->alloc_end
;
412 size_t avail_b
= avail
* sizeof(union buflib_data
);
413 if (*size
&& *size
< avail_b
)
415 avail
= (*size
+ sizeof(union buflib_data
) - 1)
416 / sizeof(union buflib_data
);
417 avail_b
= avail
* sizeof(union buflib_data
);
420 void *ret
= ctx
->buf_start
;
421 buflib_buffer_shift(ctx
, avail
);
425 /* Shift buffered items down by size bytes */
427 buflib_buffer_in(struct buflib_context
*ctx
, int size
)
429 size
/= sizeof(union buflib_data
);
430 buflib_buffer_shift(ctx
, -size
);
433 /* Allocate a buffer of size bytes, returning a handle for it */
435 buflib_alloc(struct buflib_context
*ctx
, size_t size
)
437 return buflib_alloc_ex(ctx
, size
, "<anonymous>", NULL
);
440 /* Allocate a buffer of size bytes, returning a handle for it.
442 * The additional name parameter gives the allocation a human-readable name,
443 * the ops parameter points to caller-implemented callbacks for moving and
444 * shrinking. NULL for default callbacks (which do nothing but don't
445 * prevent moving or shrinking)
449 buflib_alloc_ex(struct buflib_context
*ctx
, size_t size
, const char *name
,
450 struct buflib_callbacks
*ops
)
452 union buflib_data
*handle
, *block
;
453 size_t name_len
= name
? B_ALIGN_UP(strlen(name
)+1) : 0;
455 /* This really is assigned a value before use */
458 size
= (size
+ sizeof(union buflib_data
) - 1) /
459 sizeof(union buflib_data
)
460 /* add 4 objects for alloc len, pointer to handle table entry and
461 * name length, and the ops pointer */
464 handle
= handle_alloc(ctx
);
467 /* If allocation has failed, and compaction has succeded, it may be
468 * possible to get a handle by trying again.
470 union buflib_data
* last_block
= find_block_before(ctx
,
471 ctx
->alloc_end
, false);
472 struct buflib_callbacks
* ops
= last_block
[2].ops
;
474 if (!ops
|| !ops
->shrink_callback
)
475 { /* the last one isn't shrinkable
476 * make room in front of a shrinkable and move this alloc */
477 hints
= BUFLIB_SHRINK_POS_FRONT
;
478 hints
|= last_block
->val
* sizeof(union buflib_data
);
480 else if (ops
&& ops
->shrink_callback
)
481 { /* the last is shrinkable, make room for handles directly */
482 hints
= BUFLIB_SHRINK_POS_BACK
;
483 hints
|= 16*sizeof(union buflib_data
);
485 /* buflib_compact_and_shrink() will compact and move last_block()
487 if (buflib_compact_and_shrink(ctx
, hints
))
493 /* need to re-evaluate last before the loop because the last allocation
494 * possibly made room in its front to fit this, so last would be wrong */
496 for (block
= find_first_free(ctx
);;block
+= block_len
)
498 /* If the last used block extends all the way to the handle table, the
499 * block "after" it doesn't have a header. Because of this, it's easier
500 * to always find the end of allocation by saving a pointer, and always
501 * calculate the free space at the end by comparing it to the
502 * last_handle pointer.
504 if(block
== ctx
->alloc_end
)
507 block_len
= ctx
->last_handle
- block
;
508 if ((size_t)block_len
< size
)
512 block_len
= block
->val
;
513 /* blocks with positive length are already allocated. */
516 block_len
= -block_len
;
517 /* The search is first-fit, any fragmentation this causes will be
518 * handled at compaction.
520 if ((size_t)block_len
>= size
)
525 /* Try compacting if allocation failed */
526 unsigned hint
= BUFLIB_SHRINK_POS_FRONT
|
527 ((size
*sizeof(union buflib_data
))&BUFLIB_SHRINK_SIZE_MASK
);
528 if (buflib_compact_and_shrink(ctx
, hint
))
533 handle_free(ctx
, handle
);
538 /* Set up the allocated block, by marking the size allocated, and storing
539 * a pointer to the handle.
541 union buflib_data
*name_len_slot
;
543 block
[1].handle
= handle
;
545 strcpy(block
[3].name
, name
);
546 name_len_slot
= (union buflib_data
*)B_ALIGN_UP(block
[3].name
+ name_len
);
547 name_len_slot
->val
= 1 + name_len
/sizeof(union buflib_data
);
548 handle
->alloc
= (char*)(name_len_slot
+ 1);
551 /* alloc_end must be kept current if we're taking the last block. */
553 ctx
->alloc_end
= block
;
554 /* Only free blocks *before* alloc_end have tagged length. */
555 else if ((size_t)block_len
> size
)
556 block
->val
= size
- block_len
;
557 /* Return the handle index as a positive integer. */
558 return ctx
->handle_table
- handle
;
561 static union buflib_data
*
562 find_first_free(struct buflib_context
*ctx
)
564 union buflib_data
* ret
= ctx
->buf_start
;
565 while(ret
< ctx
->alloc_end
)
571 /* ret is now either a free block or the same as alloc_end, both is fine */
575 /* Finds the free block before block, and returns NULL if it's not free */
576 static union buflib_data
*
577 find_block_before(struct buflib_context
*ctx
, union buflib_data
* block
,
580 union buflib_data
*ret
= ctx
->buf_start
,
583 /* find the block that's before the current one */
584 while (next_block
< block
)
587 next_block
+= abs(ret
->val
);
590 /* If next_block == block, the above loop didn't go anywhere. If it did,
591 * and the block before this one is empty, that is the wanted one
593 if (next_block
== block
&& ret
< block
)
595 if (is_free
&& ret
->val
>= 0) /* NULL if found block isn't free */
602 /* Free the buffer associated with handle_num. */
604 buflib_free(struct buflib_context
*ctx
, int handle_num
)
606 union buflib_data
*handle
= ctx
->handle_table
- handle_num
,
607 *freed_block
= handle_to_block(ctx
, handle_num
),
609 /* We need to find the block before the current one, to see if it is free
610 * and can be merged with this one.
612 block
= find_block_before(ctx
, freed_block
, true);
615 block
->val
-= freed_block
->val
;
619 /* Otherwise, set block to the newly-freed block, and mark it free, before
620 * continuing on, since the code below exects block to point to a free
621 * block which may have free space after it.
624 block
->val
= -block
->val
;
626 next_block
= block
- block
->val
;
627 /* Check if we are merging with the free space at alloc_end. */
628 if (next_block
== ctx
->alloc_end
)
629 ctx
->alloc_end
= block
;
630 /* Otherwise, the next block might still be a "normal" free block, and the
631 * mid-allocation free means that the buffer is no longer compact.
634 ctx
->compact
= false;
635 if (next_block
->val
< 0)
636 block
->val
+= next_block
->val
;
638 handle_free(ctx
, handle
);
639 handle
->alloc
= NULL
;
641 return 0; /* unconditionally */
645 free_space_at_end(struct buflib_context
* ctx
)
647 /* subtract 5 elements for
648 * val, handle, name_len, ops and the handle table entry*/
649 ptrdiff_t diff
= (ctx
->last_handle
- ctx
->alloc_end
- 5);
650 diff
-= 16; /* space for future handles */
651 diff
*= sizeof(union buflib_data
); /* make it bytes */
652 diff
-= 16; /* reserve 16 for the name */
660 /* Return the maximum allocatable memory in bytes */
662 buflib_available(struct buflib_context
* ctx
)
664 union buflib_data
*this;
665 size_t free_space
= 0, max_free_space
= 0;
667 /* make sure buffer is as contiguous as possible */
671 /* now look if there's free in holes */
672 for(this = find_first_free(ctx
); this < ctx
->alloc_end
; this += abs(this->val
))
676 free_space
+= -this->val
;
679 /* an unmovable section resets the count as free space
680 * can't be contigous */
681 if (!IS_MOVABLE(this))
683 if (max_free_space
< free_space
)
684 max_free_space
= free_space
;
689 /* select the best */
690 max_free_space
= MAX(max_free_space
, free_space
);
691 max_free_space
*= sizeof(union buflib_data
);
692 max_free_space
= MAX(max_free_space
, free_space_at_end(ctx
));
694 if (max_free_space
> 0)
695 return max_free_space
;
701 * Allocate all available (as returned by buflib_available()) memory and return
704 * This grabs a lock which can only be unlocked by buflib_free() or
705 * buflib_shrink(), to protect from further allocations (which couldn't be
709 buflib_alloc_maximum(struct buflib_context
* ctx
, const char* name
, size_t *size
, struct buflib_callbacks
*ops
)
711 /* limit name to 16 since that's what buflib_available() accounts for it */
714 *size
= buflib_available(ctx
);
715 if (*size
<= 0) /* OOM */
718 strlcpy(buf
, name
, sizeof(buf
));
720 return buflib_alloc_ex(ctx
, *size
, buf
, ops
);
723 /* Shrink the allocation indicated by the handle according to new_start and
724 * new_size. Grow is not possible, therefore new_start and new_start + new_size
725 * must be within the original allocation
728 buflib_shrink(struct buflib_context
* ctx
, int handle
, void* new_start
, size_t new_size
)
730 char* oldstart
= buflib_get_data(ctx
, handle
);
731 char* newstart
= new_start
;
732 char* newend
= newstart
+ new_size
;
734 /* newstart must be higher and new_size not "negative" */
735 if (newstart
< oldstart
|| newend
< newstart
)
737 union buflib_data
*block
= handle_to_block(ctx
, handle
),
738 *old_next_block
= block
+ block
->val
,
739 /* newstart isn't necessarily properly aligned but it
740 * needn't be since it's only dereferenced by the user code */
741 *aligned_newstart
= (union buflib_data
*)B_ALIGN_DOWN(newstart
),
742 *aligned_oldstart
= (union buflib_data
*)B_ALIGN_DOWN(oldstart
),
743 *new_next_block
= (union buflib_data
*)B_ALIGN_UP(newend
),
744 *new_block
, metadata_size
;
746 /* growing is not supported */
747 if (new_next_block
> old_next_block
)
750 metadata_size
.val
= aligned_oldstart
- block
;
751 /* update val and the handle table entry */
752 new_block
= aligned_newstart
- metadata_size
.val
;
753 block
[0].val
= new_next_block
- new_block
;
755 block
[1].handle
->alloc
= newstart
;
756 if (block
!= new_block
)
758 /* move metadata over, i.e. pointer to handle table entry and name
759 * This is actually the point of no return. Data in the allocation is
760 * being modified, and therefore we must successfully finish the shrink
762 memmove(new_block
, block
, metadata_size
.val
*sizeof(metadata_size
));
763 /* mark the old block unallocated */
764 block
->val
= block
- new_block
;
765 /* find the block before in order to merge with the new free space */
766 union buflib_data
*free_before
= find_block_before(ctx
, block
, true);
768 free_before
->val
+= block
->val
;
770 /* We didn't handle size changes yet, assign block to the new one
771 * the code below the wants block whether it changed or not */
775 /* Now deal with size changes that create free blocks after the allocation */
776 if (old_next_block
!= new_next_block
)
778 if (ctx
->alloc_end
== old_next_block
)
779 ctx
->alloc_end
= new_next_block
;
780 else if (old_next_block
->val
< 0)
781 { /* enlarge next block by moving it up */
782 new_next_block
->val
= old_next_block
->val
- (old_next_block
- new_next_block
);
784 else if (old_next_block
!= new_next_block
)
785 { /* creating a hole */
786 /* must be negative to indicate being unallocated */
787 new_next_block
->val
= new_next_block
- old_next_block
;
794 const char* buflib_get_name(struct buflib_context
*ctx
, int handle
)
796 union buflib_data
*data
= ALIGN_DOWN(buflib_get_data(ctx
, handle
), sizeof (*data
));
797 size_t len
= data
[-1].val
;
800 return data
[-len
].name
;
803 #ifdef BUFLIB_DEBUG_BLOCKS
804 void buflib_print_allocs(struct buflib_context
*ctx
,
805 void (*print
)(int, const char*))
807 union buflib_data
*this, *end
= ctx
->handle_table
;
809 for(this = end
- 1; this >= ctx
->last_handle
; this--)
811 if (!this->alloc
) continue;
815 union buflib_data
*block_start
, *alloc_start
;
818 handle_num
= end
- this;
819 alloc_start
= buflib_get_data(ctx
, handle_num
);
820 name
= buflib_get_name(ctx
, handle_num
);
821 block_start
= (union buflib_data
*)name
- 3;
822 alloc_len
= block_start
->val
* sizeof(union buflib_data
);
824 snprintf(buf
, sizeof(buf
),
828 name
?:"(null)", handle_num
, block_start
, alloc_start
, alloc_len
);
829 /* handle_num is 1-based */
830 print(handle_num
- 1, buf
);
834 void buflib_print_blocks(struct buflib_context
*ctx
,
835 void (*print
)(int, const char*))
839 for(union buflib_data
* this = ctx
->buf_start
;
840 this < ctx
->alloc_end
;
841 this += abs(this->val
))
843 snprintf(buf
, sizeof(buf
), "%8p: val: %4ld (%s)",
845 this->val
> 0? this[3].name
:"<unallocated>");
851 #ifdef BUFLIB_DEBUG_BLOCK_SINGLE
852 int buflib_get_num_blocks(struct buflib_context
*ctx
)
855 for(union buflib_data
* this = ctx
->buf_start
;
856 this < ctx
->alloc_end
;
857 this += abs(this->val
))
864 void buflib_print_block_at(struct buflib_context
*ctx
, int block_num
,
865 char* buf
, size_t bufsize
)
867 union buflib_data
* this = ctx
->buf_start
;
868 while(block_num
> 0 && this < ctx
->alloc_end
)
870 this += abs(this->val
);
873 snprintf(buf
, bufsize
, "%8p: val: %4ld (%s)",
874 this, (long)this->val
,
875 this->val
> 0? this[3].name
:"<unallocated>");