Buflib: Clarification about invalid handles
[maemo-rb.git] / firmware / buflib.c
blob7c5f3d208e319975364f5a9460cfd86045f0038b
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * This is a memory allocator designed to provide reasonable management of free
11 * space and fast access to allocated data. More than one allocator can be used
12 * at a time by initializing multiple contexts.
14 * Copyright (C) 2009 Andrew Mahone
15 * Copyright (C) 2011 Thomas Martitz
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version 2
21 * of the License, or (at your option) any later version.
23 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
24 * KIND, either express or implied.
26 ****************************************************************************/
28 #include <stdlib.h> /* for abs() */
29 #include <stdio.h> /* for snprintf() */
30 #include "buflib.h"
31 #include "string-extra.h" /* strlcpy() */
32 #include "debug.h"
33 #include "buffer.h"
34 #include "system.h" /* for ALIGN_*() */
36 /* The main goal of this design is fast fetching of the pointer for a handle.
37 * For that reason, the handles are stored in a table at the end of the buffer
38 * with a fixed address, so that returning the pointer for a handle is a simple
39 * table lookup. To reduce the frequency with which allocated blocks will need
40 * to be moved to free space, allocations grow up in address from the start of
41 * the buffer. The buffer is treated as an array of union buflib_data. Blocks
42 * start with a length marker, which is included in their length. Free blocks
43 * are marked by negative length. Allocated blocks have a positiv length marker,
44 * and additional metadata forllowing that: It follows a pointer
45 * (union buflib_data*) to the corresponding handle table entry. so that it can
46 * be quickly found and updated during compaction. After that follows
47 * the pointer to the struct buflib_callbacks associated with this allocation
48 * (may be NULL). That pointer follows a variable length character array
49 * containing the nul-terminated string identifier of the allocation. After this
50 * array there's a length marker for the length of the character array including
51 * this length marker (counted in n*sizeof(union buflib_data)), which allows
52 * to find the start of the character array (and therefore the start of the
53 * entire block) when only the handle or payload start is known.
55 * Example:
56 * |<- alloc block #1 ->|<- unalloc block ->|<- alloc block #2 ->|<-handle table->|
57 * |L|H|C|cccc|L2|XXXXXX|-L|YYYYYYYYYYYYYYYY|L|H|C|cc|L2|XXXXXXXXXXXXX|AAA|
59 * L - length marker (negative if block unallocated)
60 * H - handle table enry pointer
61 * C - pointer to struct buflib_callbacks
62 * c - variable sized string identifier
63 * L2 - second length marker for string identifier
64 * X - actual payload
65 * Y - unallocated space
67 * A - pointer to start of payload (first X) in the handle table (may be null)
69 * The blocks can be walked by jumping the abs() of the L length marker, i.e.
70 * union buflib_data* L;
71 * for(L = start; L < end; L += abs(L->val)) { .... }
74 * The allocator functions are passed a context struct so that two allocators
75 * can be run, for example, one per core may be used, with convenience wrappers
76 * for the single-allocator case that use a predefined context.
79 #define B_ALIGN_DOWN(x) \
80 ALIGN_DOWN(x, sizeof(union buflib_data))
82 #define B_ALIGN_UP(x) \
83 ALIGN_UP(x, sizeof(union buflib_data))
85 #ifdef DEBUG
86 #include <stdio.h>
87 #define BDEBUGF DEBUGF
88 #else
89 #define BDEBUGF(...) do { } while(0)
90 #endif
92 /* Initialize buffer manager */
93 void
94 buflib_init(struct buflib_context *ctx, void *buf, size_t size)
96 union buflib_data *bd_buf = buf;
98 /* Align on sizeof(buflib_data), to prevent unaligned access */
99 ALIGN_BUFFER(bd_buf, size, sizeof(union buflib_data));
100 size /= sizeof(union buflib_data);
101 /* The handle table is initialized with no entries */
102 ctx->handle_table = bd_buf + size;
103 ctx->last_handle = bd_buf + size;
104 ctx->first_free_handle = bd_buf + size - 1;
105 ctx->first_free_block = bd_buf;
106 ctx->buf_start = bd_buf;
107 /* A marker is needed for the end of allocated data, to make sure that it
108 * does not collide with the handle table, and to detect end-of-buffer.
110 ctx->alloc_end = bd_buf;
111 ctx->compact = true;
113 BDEBUGF("buflib initialized with %d.%2d kiB", size / 1024, (size%1000)/10);
116 /* Allocate a new handle, returning 0 on failure */
117 static inline
118 union buflib_data* handle_alloc(struct buflib_context *ctx)
120 union buflib_data *handle;
121 /* first_free_handle is a lower bound on free handles, work through the
122 * table from there until a handle containing NULL is found, or the end
123 * of the table is reached.
125 for (handle = ctx->first_free_handle; handle >= ctx->last_handle; handle--)
126 if (!handle->alloc)
127 break;
128 /* If the search went past the end of the table, it means we need to extend
129 * the table to get a new handle.
131 if (handle < ctx->last_handle)
133 if (handle >= ctx->alloc_end)
134 ctx->last_handle--;
135 else
136 return NULL;
138 handle->val = -1;
139 return handle;
142 /* Free one handle, shrinking the handle table if it's the last one */
143 static inline
144 void handle_free(struct buflib_context *ctx, union buflib_data *handle)
146 handle->alloc = 0;
147 /* Update free handle lower bound if this handle has a lower index than the
148 * old one.
150 if (handle > ctx->first_free_handle)
151 ctx->first_free_handle = handle;
152 if (handle == ctx->last_handle)
153 ctx->last_handle++;
154 else
155 ctx->compact = false;
158 /* Get the start block of an allocation */
159 static union buflib_data* handle_to_block(struct buflib_context* ctx, int handle)
161 union buflib_data* name_field =
162 (union buflib_data*)buflib_get_name(ctx, handle);
164 return name_field - 3;
167 /* Shrink the handle table, returning true if its size was reduced, false if
168 * not
170 static inline
171 bool
172 handle_table_shrink(struct buflib_context *ctx)
174 bool rv;
175 union buflib_data *handle;
176 for (handle = ctx->last_handle; !(handle->alloc); handle++);
177 if (handle > ctx->first_free_handle)
178 ctx->first_free_handle = handle - 1;
179 rv = handle == ctx->last_handle;
180 ctx->last_handle = handle;
181 return rv;
185 /* If shift is non-zero, it represents the number of places to move
186 * blocks in memory. Calculate the new address for this block,
187 * update its entry in the handle table, and then move its contents.
189 * Returns false if moving was unsucessful
190 * (NULL callback or BUFLIB_CB_CANNOT_MOVE was returned)
192 static bool
193 move_block(struct buflib_context* ctx, union buflib_data* block, int shift)
195 char* new_start;
196 union buflib_data *new_block, *tmp = block[1].handle;
197 struct buflib_callbacks *ops = block[2].ops;
198 if (ops && !ops->move_callback)
199 return false;
201 int handle = ctx->handle_table - tmp;
202 BDEBUGF("%s(): moving \"%s\"(id=%d) by %d(%d)\n", __func__, block[3].name,
203 handle, shift, shift*sizeof(union buflib_data));
204 new_block = block + shift;
205 new_start = tmp->alloc + shift*sizeof(union buflib_data);
206 /* call the callback before moving */
207 if (ops)
209 if (ops->move_callback(handle, tmp->alloc, new_start)
210 == BUFLIB_CB_CANNOT_MOVE)
211 return false;
213 tmp->alloc = new_start; /* update handle table */
214 memmove(new_block, block, block->val * sizeof(union buflib_data));
216 return true;
219 /* Compact allocations and handle table, adjusting handle pointers as needed.
220 * Return true if any space was freed or consolidated, false otherwise.
222 static bool
223 buflib_compact(struct buflib_context *ctx)
225 BDEBUGF("%s(): Compacting!\n", __func__);
226 union buflib_data *block;
227 int shift = 0, len;
228 /* Store the results of attempting to shrink the handle table */
229 bool ret = handle_table_shrink(ctx);
230 for(block = ctx->first_free_block; block != ctx->alloc_end; block += len)
232 len = block->val;
233 /* This block is free, add its length to the shift value */
234 if (len < 0)
236 shift += len;
237 len = -len;
238 continue;
240 /* attempt to fill any hole */
241 if (-ctx->first_free_block->val > block->val)
243 intptr_t size = ctx->first_free_block->val;
244 if (move_block(ctx, block, ctx->first_free_block - block))
246 /* moving was successful. Mark the next block as the new
247 * first_free_block and merge it with the free space
248 * that the move created */
249 ctx->first_free_block += block->val;
250 ctx->first_free_block->val = size + block->val;
251 continue;
254 /* attempt move the allocation by shift */
255 if (shift)
257 /* failing to move creates a hole, therefore mark this
258 * block as not allocated anymore and move first_free_block up */
259 if (!move_block(ctx, block, shift))
261 union buflib_data* hole = block + shift;
262 hole->val = shift;
263 if (ctx->first_free_block > hole)
264 ctx->first_free_block = hole;
265 shift = 0;
267 /* if move was successful, the just moved block is now
268 * possibly in place of the first free one, so move this thing up */
269 else if (ctx->first_free_block == block+shift)
271 ctx->first_free_block += ctx->first_free_block->val;
272 ctx->first_free_block->val = shift;
276 /* Move the end-of-allocation mark, and return true if any new space has
277 * been freed.
279 ctx->alloc_end += shift;
280 /* only move first_free_block up if it wasn't already by a hole */
281 if (ctx->first_free_block > ctx->alloc_end)
282 ctx->first_free_block = ctx->alloc_end;
283 ctx->compact = true;
284 return ret || shift;
287 /* Compact the buffer by trying both shrinking and moving.
289 * Try to move first. If unsuccesfull, try to shrink. If that was successful
290 * try to move once more as there might be more room now.
292 static bool
293 buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints)
295 bool result = false;
296 /* if something compacted before already there will be no further gain */
297 if (!ctx->compact)
298 result = buflib_compact(ctx);
299 if (!result)
301 union buflib_data* this;
302 for(this = ctx->buf_start; this < ctx->alloc_end; this += abs(this->val))
304 if (this->val > 0 && this[2].ops
305 && this[2].ops->shrink_callback)
307 int ret;
308 int handle = ctx->handle_table - this[1].handle;
309 char* data = this[1].handle->alloc;
310 ret = this[2].ops->shrink_callback(handle, shrink_hints,
311 data, (char*)(this+this->val)-data);
312 result |= (ret == BUFLIB_CB_OK);
313 /* this might have changed in the callback (if
314 * it shrinked from the top), get it again */
315 this = handle_to_block(ctx, handle);
318 /* shrinking was successful at least once, try compaction again */
319 if (result)
320 result |= buflib_compact(ctx);
323 return result;
326 /* Shift buffered items by size units, and update handle pointers. The shift
327 * value must be determined to be safe *before* calling.
329 static void
330 buflib_buffer_shift(struct buflib_context *ctx, int shift)
332 memmove(ctx->buf_start + shift, ctx->buf_start,
333 (ctx->alloc_end - ctx->buf_start) * sizeof(union buflib_data));
334 union buflib_data *handle;
335 for (handle = ctx->last_handle; handle < ctx->handle_table; handle++)
336 if (handle->alloc)
337 handle->alloc += shift;
338 ctx->first_free_block += shift;
339 ctx->buf_start += shift;
340 ctx->alloc_end += shift;
343 /* Shift buffered items up by size bytes, or as many as possible if size == 0.
344 * Set size to the number of bytes freed.
346 void*
347 buflib_buffer_out(struct buflib_context *ctx, size_t *size)
349 if (!ctx->compact)
350 buflib_compact(ctx);
351 size_t avail = ctx->last_handle - ctx->alloc_end;
352 size_t avail_b = avail * sizeof(union buflib_data);
353 if (*size && *size < avail_b)
355 avail = (*size + sizeof(union buflib_data) - 1)
356 / sizeof(union buflib_data);
357 avail_b = avail * sizeof(union buflib_data);
359 *size = avail_b;
360 void *ret = ctx->buf_start;
361 buflib_buffer_shift(ctx, avail);
362 return ret;
365 /* Shift buffered items down by size bytes */
366 void
367 buflib_buffer_in(struct buflib_context *ctx, int size)
369 size /= sizeof(union buflib_data);
370 buflib_buffer_shift(ctx, -size);
373 /* Allocate a buffer of size bytes, returning a handle for it */
375 buflib_alloc(struct buflib_context *ctx, size_t size)
377 return buflib_alloc_ex(ctx, size, "<anonymous>", NULL);
380 /* Allocate a buffer of size bytes, returning a handle for it.
382 * The additional name parameter gives the allocation a human-readable name,
383 * the ops parameter points to caller-implemented callbacks for moving and
384 * shrinking. NULL for default callbacks (which do nothing but don't
385 * prevent moving or shrinking)
389 buflib_alloc_ex(struct buflib_context *ctx, size_t size, const char *name,
390 struct buflib_callbacks *ops)
392 union buflib_data *handle, *block;
393 size_t name_len = name ? B_ALIGN_UP(strlen(name)+1) : 0;
394 bool last;
395 /* This really is assigned a value before use */
396 int block_len;
397 size += name_len;
398 size = (size + sizeof(union buflib_data) - 1) /
399 sizeof(union buflib_data)
400 /* add 4 objects for alloc len, pointer to handle table entry and
401 * name length, and the ops pointer */
402 + 4;
403 handle_alloc:
404 handle = handle_alloc(ctx);
405 if (!handle)
407 /* If allocation has failed, and compaction has succeded, it may be
408 * possible to get a handle by trying again.
410 if (!ctx->compact && buflib_compact(ctx))
411 goto handle_alloc;
412 else
413 { /* first try to shrink the alloc before the handle table
414 * to make room for new handles */
415 int handle = ctx->handle_table - ctx->last_handle;
416 union buflib_data* last_block = handle_to_block(ctx, handle);
417 struct buflib_callbacks* ops = last_block[2].ops;
418 if (ops && ops->shrink_callback)
420 char *data = buflib_get_data(ctx, handle);
421 unsigned hint = BUFLIB_SHRINK_POS_BACK | 10*sizeof(union buflib_data);
422 if (ops->shrink_callback(handle, hint, data,
423 (char*)(last_block+last_block->val)-data) == BUFLIB_CB_OK)
424 { /* retry one more time */
425 goto handle_alloc;
428 return -1;
432 buffer_alloc:
433 /* need to re-evaluate last before the loop because the last allocation
434 * possibly made room in its front to fit this, so last would be wrong */
435 last = false;
436 for (block = ctx->first_free_block;;block += block_len)
438 /* If the last used block extends all the way to the handle table, the
439 * block "after" it doesn't have a header. Because of this, it's easier
440 * to always find the end of allocation by saving a pointer, and always
441 * calculate the free space at the end by comparing it to the
442 * last_handle pointer.
444 if(block == ctx->alloc_end)
446 last = true;
447 block_len = ctx->last_handle - block;
448 if ((size_t)block_len < size)
449 block = NULL;
450 break;
452 block_len = block->val;
453 /* blocks with positive length are already allocated. */
454 if(block_len > 0)
455 continue;
456 block_len = -block_len;
457 /* The search is first-fit, any fragmentation this causes will be
458 * handled at compaction.
460 if ((size_t)block_len >= size)
461 break;
463 if (!block)
465 /* Try compacting if allocation failed */
466 unsigned hint = BUFLIB_SHRINK_POS_FRONT |
467 ((size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK);
468 if (buflib_compact_and_shrink(ctx, hint))
470 goto buffer_alloc;
471 } else {
472 handle->val=1;
473 handle_free(ctx, handle);
474 return -2;
478 /* Set up the allocated block, by marking the size allocated, and storing
479 * a pointer to the handle.
481 union buflib_data *name_len_slot;
482 block->val = size;
483 block[1].handle = handle;
484 block[2].ops = ops;
485 strcpy(block[3].name, name);
486 name_len_slot = (union buflib_data*)B_ALIGN_UP(block[3].name + name_len);
487 name_len_slot->val = 1 + name_len/sizeof(union buflib_data);
488 handle->alloc = (char*)(name_len_slot + 1);
489 /* If we have just taken the first free block, the next allocation search
490 * can save some time by starting after this block.
492 if (block == ctx->first_free_block)
493 ctx->first_free_block += size;
494 block += size;
495 /* alloc_end must be kept current if we're taking the last block. */
496 if (last)
497 ctx->alloc_end = block;
498 /* Only free blocks *before* alloc_end have tagged length. */
499 else if ((size_t)block_len > size)
500 block->val = size - block_len;
501 /* Return the handle index as a positive integer. */
502 return ctx->handle_table - handle;
505 /* Finds the free block before block, and returns NULL if it's not free */
506 static union buflib_data*
507 find_free_block_before(struct buflib_context *ctx, union buflib_data* block)
509 union buflib_data *ret = ctx->first_free_block,
510 *next_block = ret;
512 /* find the block that's before the current one */
513 while (next_block < block)
515 ret = next_block;
516 next_block += abs(ret->val);
519 /* If next_block == block, the above loop didn't go anywhere. If it did,
520 * and the block before this one is empty, that is the wanted one
522 if (next_block == block && ret < block && ret->val < 0)
523 return ret;
524 /* otherwise, e.g. if ret > block, or if the buffer is compact,
525 * there's no free block before */
526 return NULL;
529 /* Free the buffer associated with handle_num. */
531 buflib_free(struct buflib_context *ctx, int handle_num)
533 union buflib_data *handle = ctx->handle_table - handle_num,
534 *freed_block = handle_to_block(ctx, handle_num),
535 *block, *next_block;
536 /* We need to find the block before the current one, to see if it is free
537 * and can be merged with this one.
539 block = find_free_block_before(ctx, freed_block);
540 if (block)
542 block->val -= freed_block->val;
544 else
546 /* Otherwise, set block to the newly-freed block, and mark it free, before
547 * continuing on, since the code below exects block to point to a free
548 * block which may have free space after it.
550 block = freed_block;
551 block->val = -block->val;
553 next_block = block - block->val;
554 /* Check if we are merging with the free space at alloc_end. */
555 if (next_block == ctx->alloc_end)
556 ctx->alloc_end = block;
557 /* Otherwise, the next block might still be a "normal" free block, and the
558 * mid-allocation free means that the buffer is no longer compact.
560 else {
561 ctx->compact = false;
562 if (next_block->val < 0)
563 block->val += next_block->val;
565 handle_free(ctx, handle);
566 handle->alloc = NULL;
567 /* If this block is before first_free_block, it becomes the new starting
568 * point for free-block search.
570 if (block < ctx->first_free_block)
571 ctx->first_free_block = block;
573 return 0; /* unconditionally */
576 /* Return the maximum allocatable memory in bytes */
577 size_t
578 buflib_available(struct buflib_context* ctx)
580 /* subtract 5 elements for
581 * val, handle, name_len, ops and the handle table entry*/
582 ptrdiff_t diff = (ctx->last_handle - ctx->alloc_end - 5);
583 diff -= 16; /* space for future handles */
584 diff *= sizeof(union buflib_data); /* make it bytes */
585 diff -= 16; /* reserve 16 for the name */
587 if (diff > 0)
588 return diff;
589 else
590 return 0;
594 * Allocate all available (as returned by buflib_available()) memory and return
595 * a handle to it
597 * This grabs a lock which can only be unlocked by buflib_free() or
598 * buflib_shrink(), to protect from further allocations (which couldn't be
599 * serviced anyway).
602 buflib_alloc_maximum(struct buflib_context* ctx, const char* name, size_t *size, struct buflib_callbacks *ops)
604 /* limit name to 16 since that's what buflib_available() accounts for it */
605 char buf[16];
606 *size = buflib_available(ctx);
607 strlcpy(buf, name, sizeof(buf));
609 return buflib_alloc_ex(ctx, *size, buf, ops);
612 /* Shrink the allocation indicated by the handle according to new_start and
613 * new_size. Grow is not possible, therefore new_start and new_start + new_size
614 * must be within the original allocation
616 bool
617 buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t new_size)
619 char* oldstart = buflib_get_data(ctx, handle);
620 char* newstart = new_start;
621 char* newend = newstart + new_size;
623 /* newstart must be higher and new_size not "negative" */
624 if (newstart < oldstart || newend < newstart)
625 return false;
626 union buflib_data *block = handle_to_block(ctx, handle),
627 *old_next_block = block + block->val,
628 /* newstart isn't necessarily properly aligned but it
629 * needn't be since it's only dereferenced by the user code */
630 *aligned_newstart = (union buflib_data*)B_ALIGN_DOWN(newstart),
631 *aligned_oldstart = (union buflib_data*)B_ALIGN_DOWN(oldstart),
632 *new_next_block = (union buflib_data*)B_ALIGN_UP(newend),
633 *new_block, metadata_size;
635 /* growing is not supported */
636 if (new_next_block > old_next_block)
637 return false;
639 metadata_size.val = aligned_oldstart - block;
640 /* update val and the handle table entry */
641 new_block = aligned_newstart - metadata_size.val;
642 block[0].val = new_next_block - new_block;
644 block[1].handle->alloc = newstart;
645 if (block != new_block)
647 /* move metadata over, i.e. pointer to handle table entry and name
648 * This is actually the point of no return. Data in the allocation is
649 * being modified, and therefore we must successfully finish the shrink
650 * operation */
651 memmove(new_block, block, metadata_size.val*sizeof(metadata_size));
652 /* mark the old block unallocated */
653 block->val = block - new_block;
654 /* find the block before in order to merge with the new free space */
655 union buflib_data *free_before = find_free_block_before(ctx, block);
656 if (free_before)
657 free_before->val += block->val;
658 else if (ctx->first_free_block > block)
659 ctx->first_free_block = block;
661 /* We didn't handle size changes yet, assign block to the new one
662 * the code below the wants block whether it changed or not */
663 block = new_block;
666 /* Now deal with size changes that create free blocks after the allocation */
667 if (old_next_block != new_next_block)
669 if (ctx->alloc_end == old_next_block)
670 ctx->alloc_end = new_next_block;
671 else if (old_next_block->val < 0)
672 { /* enlarge next block by moving it up */
673 new_next_block->val = old_next_block->val - (old_next_block - new_next_block);
675 else if (old_next_block != new_next_block)
676 { /* creating a hole */
677 /* must be negative to indicate being unallocated */
678 new_next_block->val = new_next_block - old_next_block;
680 /* update first_free_block for the newly created free space */
681 if (ctx->first_free_block > new_next_block)
682 ctx->first_free_block = new_next_block;
685 return true;
688 const char* buflib_get_name(struct buflib_context *ctx, int handle)
690 union buflib_data *data = (union buflib_data*)ALIGN_DOWN((intptr_t)buflib_get_data(ctx, handle), sizeof (*data));
691 size_t len = data[-1].val;
692 if (len <= 1)
693 return NULL;
694 return data[-len].name;
697 #ifdef BUFLIB_DEBUG_BLOCKS
698 void buflib_print_allocs(struct buflib_context *ctx,
699 void (*print)(int, const char*))
701 union buflib_data *this, *end = ctx->handle_table;
702 char buf[128];
703 for(this = end - 1; this >= ctx->last_handle; this--)
705 if (!this->alloc) continue;
707 int handle_num;
708 const char *name;
709 union buflib_data *block_start, *alloc_start;
710 intptr_t alloc_len;
712 handle_num = end - this;
713 alloc_start = buflib_get_data(ctx, handle_num);
714 name = buflib_get_name(ctx, handle_num);
715 block_start = (union buflib_data*)name - 3;
716 alloc_len = block_start->val * sizeof(union buflib_data);
718 snprintf(buf, sizeof(buf),
719 "%s(%d):\t%p\n"
720 " \t%p\n"
721 " \t%ld\n",
722 name?:"(null)", handle_num, block_start, alloc_start, alloc_len);
723 /* handle_num is 1-based */
724 print(handle_num - 1, buf);
728 void buflib_print_blocks(struct buflib_context *ctx,
729 void (*print)(int, const char*))
731 char buf[128];
732 int i = 0;
733 for(union buflib_data* this = ctx->buf_start;
734 this < ctx->alloc_end;
735 this += abs(this->val))
737 snprintf(buf, sizeof(buf), "%8p: val: %4ld (%s)",
738 this, this->val,
739 this->val > 0? this[3].name:"<unallocated>");
740 print(i++, buf);
743 #endif
745 #ifdef BUFLIB_DEBUG_BLOCK_SINGLE
746 int buflib_get_num_blocks(struct buflib_context *ctx)
748 int i = 0;
749 for(union buflib_data* this = ctx->buf_start;
750 this < ctx->alloc_end;
751 this += abs(this->val))
753 i++;
755 return i;
758 void buflib_print_block_at(struct buflib_context *ctx, int block_num,
759 char* buf, size_t bufsize)
761 union buflib_data* this = ctx->buf_start;
762 while(block_num > 0 && this < ctx->alloc_end)
764 this += abs(this->val);
765 block_num -= 1;
767 snprintf(buf, bufsize, "%8p: val: %4ld (%s)",
768 this, (long)this->val,
769 this->val > 0? this[3].name:"<unallocated>");
772 #endif