1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * This is a memory allocator designed to provide reasonable management of free
11 * space and fast access to allocated data. More than one allocator can be used
12 * at a time by initializing multiple contexts.
14 * Copyright (C) 2009 Andrew Mahone
15 * Copyright (C) 2011 Thomas Martitz
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version 2
21 * of the License, or (at your option) any later version.
23 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
24 * KIND, either express or implied.
26 ****************************************************************************/
28 #include <stdlib.h> /* for abs() */
29 #include <stdio.h> /* for snprintf() */
31 #include "string-extra.h" /* strlcpy() */
33 #include "system.h" /* for ALIGN_*() */
35 /* The main goal of this design is fast fetching of the pointer for a handle.
36 * For that reason, the handles are stored in a table at the end of the buffer
37 * with a fixed address, so that returning the pointer for a handle is a simple
38 * table lookup. To reduce the frequency with which allocated blocks will need
39 * to be moved to free space, allocations grow up in address from the start of
40 * the buffer. The buffer is treated as an array of union buflib_data. Blocks
41 * start with a length marker, which is included in their length. Free blocks
42 * are marked by negative length. Allocated blocks have a positiv length marker,
43 * and additional metadata forllowing that: It follows a pointer
44 * (union buflib_data*) to the corresponding handle table entry. so that it can
45 * be quickly found and updated during compaction. After that follows
46 * the pointer to the struct buflib_callbacks associated with this allocation
47 * (may be NULL). That pointer follows a variable length character array
48 * containing the nul-terminated string identifier of the allocation. After this
49 * array there's a length marker for the length of the character array including
50 * this length marker (counted in n*sizeof(union buflib_data)), which allows
51 * to find the start of the character array (and therefore the start of the
52 * entire block) when only the handle or payload start is known.
55 * |<- alloc block #1 ->|<- unalloc block ->|<- alloc block #2 ->|<-handle table->|
56 * |L|H|C|cccc|L2|XXXXXX|-L|YYYYYYYYYYYYYYYY|L|H|C|cc|L2|XXXXXXXXXXXXX|AAA|
58 * L - length marker (negative if block unallocated)
59 * H - handle table enry pointer
60 * C - pointer to struct buflib_callbacks
61 * c - variable sized string identifier
62 * L2 - second length marker for string identifier
64 * Y - unallocated space
66 * A - pointer to start of payload (first X) in the handle table (may be null)
68 * The blocks can be walked by jumping the abs() of the L length marker, i.e.
69 * union buflib_data* L;
70 * for(L = start; L < end; L += abs(L->val)) { .... }
73 * The allocator functions are passed a context struct so that two allocators
74 * can be run, for example, one per core may be used, with convenience wrappers
75 * for the single-allocator case that use a predefined context.
78 #define B_ALIGN_DOWN(x) \
79 ALIGN_DOWN(x, sizeof(union buflib_data))
81 #define B_ALIGN_UP(x) \
82 ALIGN_UP(x, sizeof(union buflib_data))
86 #define BDEBUGF DEBUGF
88 #define BDEBUGF(...) do { } while(0)
91 #define IS_MOVABLE(a) (!a[2].ops || a[2].ops->move_callback)
92 static union buflib_data
* find_first_free(struct buflib_context
*ctx
);
93 static union buflib_data
* find_block_before(struct buflib_context
*ctx
,
94 union buflib_data
* block
,
96 /* Initialize buffer manager */
98 buflib_init(struct buflib_context
*ctx
, void *buf
, size_t size
)
100 union buflib_data
*bd_buf
= buf
;
102 /* Align on sizeof(buflib_data), to prevent unaligned access */
103 ALIGN_BUFFER(bd_buf
, size
, sizeof(union buflib_data
));
104 size
/= sizeof(union buflib_data
);
105 /* The handle table is initialized with no entries */
106 ctx
->handle_table
= bd_buf
+ size
;
107 ctx
->last_handle
= bd_buf
+ size
;
108 ctx
->first_free_handle
= bd_buf
+ size
- 1;
109 ctx
->buf_start
= bd_buf
;
110 /* A marker is needed for the end of allocated data, to make sure that it
111 * does not collide with the handle table, and to detect end-of-buffer.
113 ctx
->alloc_end
= bd_buf
;
116 BDEBUGF("buflib initialized with %d.%2d kiB", size
/ 1024, (size
%1000)/10);
119 /* Allocate a new handle, returning 0 on failure */
121 union buflib_data
* handle_alloc(struct buflib_context
*ctx
)
123 union buflib_data
*handle
;
124 /* first_free_handle is a lower bound on free handles, work through the
125 * table from there until a handle containing NULL is found, or the end
126 * of the table is reached.
128 for (handle
= ctx
->first_free_handle
; handle
>= ctx
->last_handle
; handle
--)
131 /* If the search went past the end of the table, it means we need to extend
132 * the table to get a new handle.
134 if (handle
< ctx
->last_handle
)
136 if (handle
>= ctx
->alloc_end
)
145 /* Free one handle, shrinking the handle table if it's the last one */
147 void handle_free(struct buflib_context
*ctx
, union buflib_data
*handle
)
150 /* Update free handle lower bound if this handle has a lower index than the
153 if (handle
> ctx
->first_free_handle
)
154 ctx
->first_free_handle
= handle
;
155 if (handle
== ctx
->last_handle
)
158 ctx
->compact
= false;
161 /* Get the start block of an allocation */
162 static union buflib_data
* handle_to_block(struct buflib_context
* ctx
, int handle
)
164 union buflib_data
* name_field
=
165 (union buflib_data
*)buflib_get_name(ctx
, handle
);
167 return name_field
- 3;
170 /* Shrink the handle table, returning true if its size was reduced, false if
175 handle_table_shrink(struct buflib_context
*ctx
)
178 union buflib_data
*handle
;
179 for (handle
= ctx
->last_handle
; !(handle
->alloc
); handle
++);
180 if (handle
> ctx
->first_free_handle
)
181 ctx
->first_free_handle
= handle
- 1;
182 rv
= handle
!= ctx
->last_handle
;
183 ctx
->last_handle
= handle
;
188 /* If shift is non-zero, it represents the number of places to move
189 * blocks in memory. Calculate the new address for this block,
190 * update its entry in the handle table, and then move its contents.
192 * Returns false if moving was unsucessful
193 * (NULL callback or BUFLIB_CB_CANNOT_MOVE was returned)
196 move_block(struct buflib_context
* ctx
, union buflib_data
* block
, int shift
)
199 union buflib_data
*new_block
, *tmp
= block
[1].handle
;
200 struct buflib_callbacks
*ops
= block
[2].ops
;
201 if (!IS_MOVABLE(block
))
204 int handle
= ctx
->handle_table
- tmp
;
205 BDEBUGF("%s(): moving \"%s\"(id=%d) by %d(%d)\n", __func__
, block
[3].name
,
206 handle
, shift
, shift
*sizeof(union buflib_data
));
207 new_block
= block
+ shift
;
208 new_start
= tmp
->alloc
+ shift
*sizeof(union buflib_data
);
210 /* disable IRQs to make accessing the buffer from interrupt context safe. */
211 /* protect the move callback, as a cached global pointer might be updated
212 * in it. and protect "tmp->alloc = new_start" for buflib_get_data() */
214 /* call the callback before moving */
217 if (ops
->move_callback(handle
, tmp
->alloc
, new_start
)
218 == BUFLIB_CB_CANNOT_MOVE
)
225 tmp
->alloc
= new_start
; /* update handle table */
226 memmove(new_block
, block
, block
->val
* sizeof(union buflib_data
));
232 /* Compact allocations and handle table, adjusting handle pointers as needed.
233 * Return true if any space was freed or consolidated, false otherwise.
236 buflib_compact(struct buflib_context
*ctx
)
238 BDEBUGF("%s(): Compacting!\n", __func__
);
239 union buflib_data
*block
,
242 /* Store the results of attempting to shrink the handle table */
243 bool ret
= handle_table_shrink(ctx
);
244 /* compaction has basically two modes of operation:
245 * 1) the buffer is nicely movable: In this mode, blocks can be simply
246 * moved towards the beginning. Free blocks add to a shift value,
247 * which is the amount to move.
248 * 2) the buffer contains unmovable blocks: unmovable blocks create
249 * holes and reset shift. Once a hole is found, we're trying to fill
250 * holes first, moving by shift is the fallback. As the shift is reset,
251 * this effectively splits the buffer into portions of movable blocks.
252 * This mode cannot be used if no holes are found yet as it only works
253 * when it moves blocks across the portions. On the other side,
254 * moving by shift only works within the same portion
255 * For simplicity only 1 hole at a time is considered */
256 for(block
= find_first_free(ctx
); block
< ctx
->alloc_end
; block
+= len
)
258 bool movable
= true; /* cache result to avoid 2nd call to move_block */
260 /* This block is free, add its length to the shift value */
267 /* attempt to fill any hole */
268 if (hole
&& -hole
->val
>= len
)
270 intptr_t hlen
= -hole
->val
;
271 if ((movable
= move_block(ctx
, block
, hole
- block
)))
274 /* Move was successful. The memory at block is now free */
276 /* add its length to shift */
278 /* Reduce the size of the hole accordingly
279 * but be careful to not overwrite an existing block */
283 hole
->val
= len
- hlen
; /* negative */
285 else /* hole closed */
290 /* attempt move the allocation by shift */
293 union buflib_data
* target_block
= block
+ shift
;
294 if (!movable
|| !move_block(ctx
, block
, shift
))
296 /* free space before an unmovable block becomes a hole,
297 * therefore mark this block free and track the hole */
298 target_block
->val
= shift
;
306 /* Move the end-of-allocation mark, and return true if any new space has
309 ctx
->alloc_end
+= shift
;
314 /* Compact the buffer by trying both shrinking and moving.
316 * Try to move first. If unsuccesfull, try to shrink. If that was successful
317 * try to move once more as there might be more room now.
320 buflib_compact_and_shrink(struct buflib_context
*ctx
, unsigned shrink_hints
)
323 /* if something compacted before already there will be no further gain */
325 result
= buflib_compact(ctx
);
328 union buflib_data
*this, *before
;
329 for(this = ctx
->buf_start
, before
= this;
330 this < ctx
->alloc_end
;
331 before
= this, this += abs(this->val
))
333 if (this->val
> 0 && this[2].ops
334 && this[2].ops
->shrink_callback
)
337 int handle
= ctx
->handle_table
- this[1].handle
;
338 char* data
= this[1].handle
->alloc
;
339 bool last
= (this+this->val
) == ctx
->alloc_end
;
340 unsigned pos_hints
= shrink_hints
& BUFLIB_SHRINK_POS_MASK
;
341 /* adjust what we ask for if there's free space in the front
342 * this isn't too unlikely assuming this block is
343 * shrinkable but not movable */
344 if (pos_hints
== BUFLIB_SHRINK_POS_FRONT
345 && before
!= this && before
->val
< 0)
347 size_t free_space
= (-before
->val
) * sizeof(union buflib_data
);
348 size_t wanted
= shrink_hints
& BUFLIB_SHRINK_SIZE_MASK
;
349 if (wanted
< free_space
) /* no shrink needed? */
351 wanted
-= free_space
;
352 shrink_hints
= pos_hints
| wanted
;
354 ret
= this[2].ops
->shrink_callback(handle
, shrink_hints
,
355 data
, (char*)(this+this->val
)-data
);
356 result
|= (ret
== BUFLIB_CB_OK
);
357 /* this might have changed in the callback (if
358 * it shrinked from the top), get it again */
359 this = handle_to_block(ctx
, handle
);
360 /* could also change with shrinking from back */
362 ctx
->alloc_end
= this + this->val
;
365 /* shrinking was successful at least once, try compaction again */
367 result
|= buflib_compact(ctx
);
373 /* Shift buffered items by size units, and update handle pointers. The shift
374 * value must be determined to be safe *before* calling.
377 buflib_buffer_shift(struct buflib_context
*ctx
, int shift
)
379 memmove(ctx
->buf_start
+ shift
, ctx
->buf_start
,
380 (ctx
->alloc_end
- ctx
->buf_start
) * sizeof(union buflib_data
));
381 ctx
->buf_start
+= shift
;
382 ctx
->alloc_end
+= shift
;
383 shift
*= sizeof(union buflib_data
);
384 union buflib_data
*handle
;
385 for (handle
= ctx
->last_handle
; handle
< ctx
->handle_table
; handle
++)
387 handle
->alloc
+= shift
;
390 /* Shift buffered items up by size bytes, or as many as possible if size == 0.
391 * Set size to the number of bytes freed.
394 buflib_buffer_out(struct buflib_context
*ctx
, size_t *size
)
398 size_t avail
= ctx
->last_handle
- ctx
->alloc_end
;
399 size_t avail_b
= avail
* sizeof(union buflib_data
);
400 if (*size
&& *size
< avail_b
)
402 avail
= (*size
+ sizeof(union buflib_data
) - 1)
403 / sizeof(union buflib_data
);
404 avail_b
= avail
* sizeof(union buflib_data
);
407 void *ret
= ctx
->buf_start
;
408 buflib_buffer_shift(ctx
, avail
);
412 /* Shift buffered items down by size bytes */
414 buflib_buffer_in(struct buflib_context
*ctx
, int size
)
416 size
/= sizeof(union buflib_data
);
417 buflib_buffer_shift(ctx
, -size
);
420 /* Allocate a buffer of size bytes, returning a handle for it */
422 buflib_alloc(struct buflib_context
*ctx
, size_t size
)
424 return buflib_alloc_ex(ctx
, size
, "<anonymous>", NULL
);
427 /* Allocate a buffer of size bytes, returning a handle for it.
429 * The additional name parameter gives the allocation a human-readable name,
430 * the ops parameter points to caller-implemented callbacks for moving and
431 * shrinking. NULL for default callbacks (which do nothing but don't
432 * prevent moving or shrinking)
436 buflib_alloc_ex(struct buflib_context
*ctx
, size_t size
, const char *name
,
437 struct buflib_callbacks
*ops
)
439 union buflib_data
*handle
, *block
;
440 size_t name_len
= name
? B_ALIGN_UP(strlen(name
)+1) : 0;
442 /* This really is assigned a value before use */
445 size
= (size
+ sizeof(union buflib_data
) - 1) /
446 sizeof(union buflib_data
)
447 /* add 4 objects for alloc len, pointer to handle table entry and
448 * name length, and the ops pointer */
451 handle
= handle_alloc(ctx
);
454 /* If allocation has failed, and compaction has succeded, it may be
455 * possible to get a handle by trying again.
457 union buflib_data
* last_block
= find_block_before(ctx
,
458 ctx
->alloc_end
, false);
459 struct buflib_callbacks
* ops
= last_block
[2].ops
;
461 if (!ops
|| !ops
->shrink_callback
)
462 { /* the last one isn't shrinkable
463 * make room in front of a shrinkable and move this alloc */
464 hints
= BUFLIB_SHRINK_POS_FRONT
;
465 hints
|= last_block
->val
* sizeof(union buflib_data
);
467 else if (ops
&& ops
->shrink_callback
)
468 { /* the last is shrinkable, make room for handles directly */
469 hints
= BUFLIB_SHRINK_POS_BACK
;
470 hints
|= 16*sizeof(union buflib_data
);
472 /* buflib_compact_and_shrink() will compact and move last_block()
474 if (buflib_compact_and_shrink(ctx
, hints
))
480 /* need to re-evaluate last before the loop because the last allocation
481 * possibly made room in its front to fit this, so last would be wrong */
483 for (block
= find_first_free(ctx
);;block
+= block_len
)
485 /* If the last used block extends all the way to the handle table, the
486 * block "after" it doesn't have a header. Because of this, it's easier
487 * to always find the end of allocation by saving a pointer, and always
488 * calculate the free space at the end by comparing it to the
489 * last_handle pointer.
491 if(block
== ctx
->alloc_end
)
494 block_len
= ctx
->last_handle
- block
;
495 if ((size_t)block_len
< size
)
499 block_len
= block
->val
;
500 /* blocks with positive length are already allocated. */
503 block_len
= -block_len
;
504 /* The search is first-fit, any fragmentation this causes will be
505 * handled at compaction.
507 if ((size_t)block_len
>= size
)
512 /* Try compacting if allocation failed */
513 unsigned hint
= BUFLIB_SHRINK_POS_FRONT
|
514 ((size
*sizeof(union buflib_data
))&BUFLIB_SHRINK_SIZE_MASK
);
515 if (buflib_compact_and_shrink(ctx
, hint
))
520 handle_free(ctx
, handle
);
525 /* Set up the allocated block, by marking the size allocated, and storing
526 * a pointer to the handle.
528 union buflib_data
*name_len_slot
;
530 block
[1].handle
= handle
;
532 strcpy(block
[3].name
, name
);
533 name_len_slot
= (union buflib_data
*)B_ALIGN_UP(block
[3].name
+ name_len
);
534 name_len_slot
->val
= 1 + name_len
/sizeof(union buflib_data
);
535 handle
->alloc
= (char*)(name_len_slot
+ 1);
538 /* alloc_end must be kept current if we're taking the last block. */
540 ctx
->alloc_end
= block
;
541 /* Only free blocks *before* alloc_end have tagged length. */
542 else if ((size_t)block_len
> size
)
543 block
->val
= size
- block_len
;
544 /* Return the handle index as a positive integer. */
545 return ctx
->handle_table
- handle
;
548 static union buflib_data
*
549 find_first_free(struct buflib_context
*ctx
)
551 union buflib_data
* ret
= ctx
->buf_start
;
552 while(ret
< ctx
->alloc_end
)
558 /* ret is now either a free block or the same as alloc_end, both is fine */
562 /* Finds the free block before block, and returns NULL if it's not free */
563 static union buflib_data
*
564 find_block_before(struct buflib_context
*ctx
, union buflib_data
* block
,
567 union buflib_data
*ret
= ctx
->buf_start
,
570 /* find the block that's before the current one */
571 while (next_block
< block
)
574 next_block
+= abs(ret
->val
);
577 /* If next_block == block, the above loop didn't go anywhere. If it did,
578 * and the block before this one is empty, that is the wanted one
580 if (next_block
== block
&& ret
< block
)
582 if (is_free
&& ret
->val
>= 0) /* NULL if found block isn't free */
589 /* Free the buffer associated with handle_num. */
591 buflib_free(struct buflib_context
*ctx
, int handle_num
)
593 union buflib_data
*handle
= ctx
->handle_table
- handle_num
,
594 *freed_block
= handle_to_block(ctx
, handle_num
),
596 /* We need to find the block before the current one, to see if it is free
597 * and can be merged with this one.
599 block
= find_block_before(ctx
, freed_block
, true);
602 block
->val
-= freed_block
->val
;
606 /* Otherwise, set block to the newly-freed block, and mark it free, before
607 * continuing on, since the code below exects block to point to a free
608 * block which may have free space after it.
611 block
->val
= -block
->val
;
613 next_block
= block
- block
->val
;
614 /* Check if we are merging with the free space at alloc_end. */
615 if (next_block
== ctx
->alloc_end
)
616 ctx
->alloc_end
= block
;
617 /* Otherwise, the next block might still be a "normal" free block, and the
618 * mid-allocation free means that the buffer is no longer compact.
621 ctx
->compact
= false;
622 if (next_block
->val
< 0)
623 block
->val
+= next_block
->val
;
625 handle_free(ctx
, handle
);
626 handle
->alloc
= NULL
;
628 return 0; /* unconditionally */
632 free_space_at_end(struct buflib_context
* ctx
)
634 /* subtract 5 elements for
635 * val, handle, name_len, ops and the handle table entry*/
636 ptrdiff_t diff
= (ctx
->last_handle
- ctx
->alloc_end
- 5);
637 diff
-= 16; /* space for future handles */
638 diff
*= sizeof(union buflib_data
); /* make it bytes */
639 diff
-= 16; /* reserve 16 for the name */
647 /* Return the maximum allocatable memory in bytes */
649 buflib_available(struct buflib_context
* ctx
)
651 union buflib_data
*this;
652 size_t free_space
= 0, max_free_space
= 0;
654 /* make sure buffer is as contiguous as possible */
658 /* now look if there's free in holes */
659 for(this = find_first_free(ctx
); this < ctx
->alloc_end
; this += abs(this->val
))
663 free_space
+= -this->val
;
666 /* an unmovable section resets the count as free space
667 * can't be contigous */
668 if (!IS_MOVABLE(this))
670 if (max_free_space
< free_space
)
671 max_free_space
= free_space
;
676 /* select the best */
677 max_free_space
= MAX(max_free_space
, free_space
);
678 max_free_space
*= sizeof(union buflib_data
);
679 max_free_space
= MAX(max_free_space
, free_space_at_end(ctx
));
681 if (max_free_space
> 0)
682 return max_free_space
;
688 * Allocate all available (as returned by buflib_available()) memory and return
691 * This grabs a lock which can only be unlocked by buflib_free() or
692 * buflib_shrink(), to protect from further allocations (which couldn't be
696 buflib_alloc_maximum(struct buflib_context
* ctx
, const char* name
, size_t *size
, struct buflib_callbacks
*ops
)
698 /* limit name to 16 since that's what buflib_available() accounts for it */
701 *size
= buflib_available(ctx
);
702 if (*size
<= 0) /* OOM */
705 strlcpy(buf
, name
, sizeof(buf
));
707 return buflib_alloc_ex(ctx
, *size
, buf
, ops
);
710 /* Shrink the allocation indicated by the handle according to new_start and
711 * new_size. Grow is not possible, therefore new_start and new_start + new_size
712 * must be within the original allocation
715 buflib_shrink(struct buflib_context
* ctx
, int handle
, void* new_start
, size_t new_size
)
717 char* oldstart
= buflib_get_data(ctx
, handle
);
718 char* newstart
= new_start
;
719 char* newend
= newstart
+ new_size
;
721 /* newstart must be higher and new_size not "negative" */
722 if (newstart
< oldstart
|| newend
< newstart
)
724 union buflib_data
*block
= handle_to_block(ctx
, handle
),
725 *old_next_block
= block
+ block
->val
,
726 /* newstart isn't necessarily properly aligned but it
727 * needn't be since it's only dereferenced by the user code */
728 *aligned_newstart
= (union buflib_data
*)B_ALIGN_DOWN(newstart
),
729 *aligned_oldstart
= (union buflib_data
*)B_ALIGN_DOWN(oldstart
),
730 *new_next_block
= (union buflib_data
*)B_ALIGN_UP(newend
),
731 *new_block
, metadata_size
;
733 /* growing is not supported */
734 if (new_next_block
> old_next_block
)
737 metadata_size
.val
= aligned_oldstart
- block
;
738 /* update val and the handle table entry */
739 new_block
= aligned_newstart
- metadata_size
.val
;
740 block
[0].val
= new_next_block
- new_block
;
742 block
[1].handle
->alloc
= newstart
;
743 if (block
!= new_block
)
745 /* move metadata over, i.e. pointer to handle table entry and name
746 * This is actually the point of no return. Data in the allocation is
747 * being modified, and therefore we must successfully finish the shrink
749 memmove(new_block
, block
, metadata_size
.val
*sizeof(metadata_size
));
750 /* mark the old block unallocated */
751 block
->val
= block
- new_block
;
752 /* find the block before in order to merge with the new free space */
753 union buflib_data
*free_before
= find_block_before(ctx
, block
, true);
755 free_before
->val
+= block
->val
;
757 /* We didn't handle size changes yet, assign block to the new one
758 * the code below the wants block whether it changed or not */
762 /* Now deal with size changes that create free blocks after the allocation */
763 if (old_next_block
!= new_next_block
)
765 if (ctx
->alloc_end
== old_next_block
)
766 ctx
->alloc_end
= new_next_block
;
767 else if (old_next_block
->val
< 0)
768 { /* enlarge next block by moving it up */
769 new_next_block
->val
= old_next_block
->val
- (old_next_block
- new_next_block
);
771 else if (old_next_block
!= new_next_block
)
772 { /* creating a hole */
773 /* must be negative to indicate being unallocated */
774 new_next_block
->val
= new_next_block
- old_next_block
;
781 const char* buflib_get_name(struct buflib_context
*ctx
, int handle
)
783 union buflib_data
*data
= ALIGN_DOWN(buflib_get_data(ctx
, handle
), sizeof (*data
));
784 size_t len
= data
[-1].val
;
787 return data
[-len
].name
;
790 #ifdef BUFLIB_DEBUG_BLOCKS
791 void buflib_print_allocs(struct buflib_context
*ctx
,
792 void (*print
)(int, const char*))
794 union buflib_data
*this, *end
= ctx
->handle_table
;
796 for(this = end
- 1; this >= ctx
->last_handle
; this--)
798 if (!this->alloc
) continue;
802 union buflib_data
*block_start
, *alloc_start
;
805 handle_num
= end
- this;
806 alloc_start
= buflib_get_data(ctx
, handle_num
);
807 name
= buflib_get_name(ctx
, handle_num
);
808 block_start
= (union buflib_data
*)name
- 3;
809 alloc_len
= block_start
->val
* sizeof(union buflib_data
);
811 snprintf(buf
, sizeof(buf
),
815 name
?:"(null)", handle_num
, block_start
, alloc_start
, alloc_len
);
816 /* handle_num is 1-based */
817 print(handle_num
- 1, buf
);
821 void buflib_print_blocks(struct buflib_context
*ctx
,
822 void (*print
)(int, const char*))
826 for(union buflib_data
* this = ctx
->buf_start
;
827 this < ctx
->alloc_end
;
828 this += abs(this->val
))
830 snprintf(buf
, sizeof(buf
), "%8p: val: %4ld (%s)",
832 this->val
> 0? this[3].name
:"<unallocated>");
838 #ifdef BUFLIB_DEBUG_BLOCK_SINGLE
839 int buflib_get_num_blocks(struct buflib_context
*ctx
)
842 for(union buflib_data
* this = ctx
->buf_start
;
843 this < ctx
->alloc_end
;
844 this += abs(this->val
))
851 void buflib_print_block_at(struct buflib_context
*ctx
, int block_num
,
852 char* buf
, size_t bufsize
)
854 union buflib_data
* this = ctx
->buf_start
;
855 while(block_num
> 0 && this < ctx
->alloc_end
)
857 this += abs(this->val
);
860 snprintf(buf
, bufsize
, "%8p: val: %4ld (%s)",
861 this, (long)this->val
,
862 this->val
> 0? this[3].name
:"<unallocated>");