Disable moving memory in buflib for now.
[kugel-rb.git] / firmware / buflib.c
blob8e6808bd54ce003e0b86a4e1b7728028b2f633a8
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * This is a memory allocator designed to provide reasonable management of free
11 * space and fast access to allocated data. More than one allocator can be used
12 * at a time by initializing multiple contexts.
14 * Copyright (C) 2009 Andrew Mahone
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 2
19 * of the License, or (at your option) any later version.
21 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
22 * KIND, either express or implied.
24 ****************************************************************************/
26 #include <stdlib.h> /* for abs() */
27 #include <stdio.h> /* for snprintf() */
28 #include "buflib.h"
29 #include "string-extra.h"
30 #include "debug.h"
31 #include "buffer.h"
32 #include "system.h" /* for ALIGN_*() */
33 /* The main goal of this design is fast fetching of the pointer for a handle.
34 * For that reason, the handles are stored in a table at the end of the buffer
35 * with a fixed address, so that returning the pointer for a handle is a simple
36 * table lookup. To reduce the frequency with which allocated blocks will need
37 * to be moved to free space, allocations grow up in address from the start of
38 * the buffer. The buffer is treated as an array of union buflib_data. Blocks
39 * start with a length marker, which is included in their length. Free blocks
40 * are marked by negative length, allocated ones use the a buflib_data in
41 * the block to store a pointer to their handle table entry, so that it can be
42 * quickly found and updated during compaction. Followed by that, there's
43 * the pointer to the corresponding struct buflib. That pointer follows a
44 * character array containing the string identifier of the allocation. After the
45 * array there is another buflib_data containing the length of that string +
46 * the sizeo of this buflib_data.
47 * The allocator functions are passed a context struct so that two allocators
48 * can be run, for example, one per core may be used, with convenience wrappers
49 * for the single-allocator case that use a predefined context.
52 /* Use this for the default callbacks.
54 * The default callbacks do nothing, therefore the address of this
55 * acts as a magic as to not even call the default callbacks
57 static struct buflib_callbacks default_callbacks;
59 #if defined(ROCKBOX)
60 #define YIELD() yield()
61 #elif defined(__unix) && (__unix == 1)
62 #include <sched.h>
63 #define YIELD() sched_yield()
64 #else
65 #warning YIELD not defined. Will busy-wait
66 #define YIELD()
67 #endif
69 #define B_ALIGN_DOWN(x) \
70 ALIGN_DOWN(x, sizeof(union buflib_data))
72 #define B_ALIGN_UP(x) \
73 ALIGN_UP(x, sizeof(union buflib_data))
75 #ifdef DEBUG
76 #include <stdio.h>
77 #define BDEBUGF DEBUGF
78 #else
79 #define BDEBUGF(...) do { } while(0)
80 #endif
82 /* Initialize buffer manager */
83 void
84 buflib_init(struct buflib_context *ctx, void *buf, size_t size)
86 union buflib_data *bd_buf = buf;
88 /* Align on sizeof(buflib_data), to prevent unaligned access */
89 ALIGN_BUFFER(bd_buf, size, sizeof(union buflib_data));
90 size /= sizeof(union buflib_data);
91 /* The handle table is initialized with no entries */
92 ctx->handle_table = bd_buf + size;
93 ctx->last_handle = bd_buf + size;
94 ctx->first_free_handle = bd_buf + size - 1;
95 ctx->first_free_block = bd_buf;
96 ctx->buf_start = bd_buf;
97 /* A marker is needed for the end of allocated data, to make sure that it
98 * does not collide with the handle table, and to detect end-of-buffer.
100 ctx->alloc_end = bd_buf;
101 ctx->compact = true;
103 BDEBUGF("buflib initialized with %d.%2d kiB", size / 1024, (size%1000)/10);
106 /* Allocate a new handle, returning 0 on failure */
107 static inline
108 union buflib_data* handle_alloc(struct buflib_context *ctx)
110 union buflib_data *handle;
111 /* first_free_handle is a lower bound on free handles, work through the
112 * table from there until a handle containing NULL is found, or the end
113 * of the table is reached.
115 for (handle = ctx->first_free_handle; handle >= ctx->last_handle; handle--)
116 if (!handle->alloc)
117 break;
118 /* If the search went past the end of the table, it means we need to extend
119 * the table to get a new handle.
121 if (handle < ctx->last_handle)
123 if (handle >= ctx->alloc_end)
124 ctx->last_handle--;
125 else
126 return NULL;
128 handle->val = -1;
129 return handle;
132 /* Free one handle, shrinking the handle table if it's the last one */
133 static inline
134 void handle_free(struct buflib_context *ctx, union buflib_data *handle)
136 handle->alloc = 0;
137 /* Update free handle lower bound if this handle has a lower index than the
138 * old one.
140 if (handle > ctx->first_free_handle)
141 ctx->first_free_handle = handle;
142 if (handle == ctx->last_handle)
143 ctx->last_handle++;
144 else
145 ctx->compact = false;
148 /* Get the start block of an allocation */
149 static union buflib_data* handle_to_block(struct buflib_context* ctx, int handle)
151 union buflib_data* name_field =
152 (union buflib_data*)buflib_get_name(ctx, handle);
154 return name_field - 3;
157 /* Shrink the handle table, returning true if its size was reduced, false if
158 * not
160 static inline
161 bool
162 handle_table_shrink(struct buflib_context *ctx)
164 bool rv;
165 union buflib_data *handle;
166 for (handle = ctx->last_handle; !(handle->alloc); handle++);
167 if (handle > ctx->first_free_handle)
168 ctx->first_free_handle = handle - 1;
169 rv = handle == ctx->last_handle;
170 ctx->last_handle = handle;
171 return rv;
175 /* If shift is non-zero, it represents the number of places to move
176 * blocks in memory. Calculate the new address for this block,
177 * update its entry in the handle table, and then move its contents.
179 static bool
180 move_block(struct buflib_context* ctx, union buflib_data* block, int shift)
182 #if 1 /* moving temporarily disabled */
183 (void)ctx;(void)block;(void)shift;
184 return false;
185 #else
186 char* new_start;
187 union buflib_data *new_block, *tmp = block[1].handle;
188 struct buflib_callbacks *ops = block[2].ops;
189 if (ops && !ops->move_callback)
190 return false;
192 int handle = ctx->handle_table - tmp;
193 BDEBUGF("%s(): moving \"%s\"(id=%d) by %d(%d)\n", __func__, block[3].name,
194 handle, shift, shift*sizeof(union buflib_data));
195 new_block = block + shift;
196 new_start = tmp->alloc + shift*sizeof(union buflib_data);
197 /* call the callback before moving, the default one needn't be called */
198 if (ops)
199 ops->move_callback(handle, tmp->alloc, new_start);
201 tmp->alloc = new_start; /* update handle table */
202 memmove(new_block, block, block->val * sizeof(union buflib_data));
204 return true;
205 #endif
208 /* Compact allocations and handle table, adjusting handle pointers as needed.
209 * Return true if any space was freed or consolidated, false otherwise.
211 static bool
212 buflib_compact(struct buflib_context *ctx)
214 BDEBUGF("%s(): Compacting!\n", __func__);
215 union buflib_data *first_free = ctx->first_free_block, *block;
216 int shift = 0, len;
217 /* Store the results of attempting to shrink the handle table */
218 bool ret = handle_table_shrink(ctx);
219 for(block = first_free; block != ctx->alloc_end; block += len)
221 len = block->val;
222 /* This block is free, add its length to the shift value */
223 if (len < 0)
225 shift += len;
226 len = -len;
227 continue;
229 /* attempt to fill any hole */
230 if (abs(ctx->first_free_block->val) > block->val)
232 intptr_t size = first_free->val;
233 if (move_block(ctx, block, first_free - block))
235 block->val *= -1;
236 block = ctx->first_free_block;
237 ctx->first_free_block += block->val;
238 ctx->first_free_block->val = size + block->val;
239 continue;
242 /* attempt move the allocation by shift */
243 if (shift)
245 /* failing to move creates a hole, therefore mark this
246 * block as not allocated anymore and move first_free_block up */
247 if (!move_block(ctx, block, shift))
249 union buflib_data* hole = block + shift;
250 hole->val = shift;
251 if (ctx->first_free_block > hole)
252 ctx->first_free_block = hole;
253 shift = 0;
257 /* Move the end-of-allocation mark, and return true if any new space has
258 * been freed.
260 ctx->alloc_end += shift;
261 /* only move first_free_block up if it wasn't already by a hole */
262 if (ctx->first_free_block > ctx->alloc_end)
263 ctx->first_free_block = ctx->alloc_end;
264 ctx->compact = true;
265 return ret || shift;
268 /* Compact the buffer by trying both shrinking and moving.
270 * Try to move first. If unsuccesfull, try to shrink. If that was successful
271 * try to move once more as there might be more room now.
273 static bool
274 buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints)
276 bool result = false;
277 /* if something compacted before already there will be no further gain */
278 if (!ctx->compact)
279 result = buflib_compact(ctx);
280 if (!result)
282 union buflib_data* this;
283 for(this = ctx->buf_start; this < ctx->alloc_end; this += abs(this->val))
285 if (this->val > 0 && this[2].ops
286 && this[2].ops->shrink_callback)
288 int ret;
289 int handle = ctx->handle_table - this[1].handle;
290 char* data = this[1].handle->alloc;
291 ret = this[2].ops->shrink_callback(handle, shrink_hints,
292 data, (char*)(this+this->val)-data);
293 result |= (ret == BUFLIB_CB_OK);
294 /* this might have changed in the callback (if
295 * it shrinked from the top), get it again */
296 this = handle_to_block(ctx, handle);
299 /* shrinking was successful at least once, try compaction again */
300 if (result)
301 result |= buflib_compact(ctx);
304 return result;
307 /* Shift buffered items by size units, and update handle pointers. The shift
308 * value must be determined to be safe *before* calling.
310 static void
311 buflib_buffer_shift(struct buflib_context *ctx, int shift)
313 memmove(ctx->buf_start + shift, ctx->buf_start,
314 (ctx->alloc_end - ctx->buf_start) * sizeof(union buflib_data));
315 union buflib_data *handle;
316 for (handle = ctx->last_handle; handle < ctx->handle_table; handle++)
317 if (handle->alloc)
318 handle->alloc += shift;
319 ctx->first_free_block += shift;
320 ctx->buf_start += shift;
321 ctx->alloc_end += shift;
324 /* Shift buffered items up by size bytes, or as many as possible if size == 0.
325 * Set size to the number of bytes freed.
327 void*
328 buflib_buffer_out(struct buflib_context *ctx, size_t *size)
330 if (!ctx->compact)
331 buflib_compact(ctx);
332 size_t avail = ctx->last_handle - ctx->alloc_end;
333 size_t avail_b = avail * sizeof(union buflib_data);
334 if (*size && *size < avail_b)
336 avail = (*size + sizeof(union buflib_data) - 1)
337 / sizeof(union buflib_data);
338 avail_b = avail * sizeof(union buflib_data);
340 *size = avail_b;
341 void *ret = ctx->buf_start;
342 buflib_buffer_shift(ctx, avail);
343 return ret;
346 /* Shift buffered items down by size bytes */
347 void
348 buflib_buffer_in(struct buflib_context *ctx, int size)
350 size /= sizeof(union buflib_data);
351 buflib_buffer_shift(ctx, -size);
354 /* Allocate a buffer of size bytes, returning a handle for it */
356 buflib_alloc(struct buflib_context *ctx, size_t size)
358 return buflib_alloc_ex(ctx, size, "<anonymous>", &default_callbacks);
361 /* Allocate a buffer of size bytes, returning a handle for it.
363 * The additional name parameter gives the allocation a human-readable name,
364 * the ops parameter points to caller-implemented callbacks for moving and
365 * shrinking. NULL for default callbacks
369 buflib_alloc_ex(struct buflib_context *ctx, size_t size, const char *name,
370 struct buflib_callbacks *ops)
372 /* busy wait if there's a thread owning the lock */
373 while (ctx->handle_lock != 0) YIELD();
375 union buflib_data *handle, *block;
376 size_t name_len = name ? B_ALIGN_UP(strlen(name)) : 0;
377 bool last;
378 /* This really is assigned a value before use */
379 int block_len;
380 size += name_len;
381 size = (size + sizeof(union buflib_data) - 1) /
382 sizeof(union buflib_data)
383 /* add 4 objects for alloc len, pointer to handle table entry and
384 * name length, and the ops pointer */
385 + 4;
386 handle_alloc:
387 handle = handle_alloc(ctx);
388 if (!handle)
390 /* If allocation has failed, and compaction has succeded, it may be
391 * possible to get a handle by trying again.
393 if (!ctx->compact && buflib_compact(ctx))
394 goto handle_alloc;
395 else
396 { /* first try to shrink the alloc before the handle table
397 * to make room for new handles */
398 int handle = ctx->handle_table - ctx->last_handle;
399 union buflib_data* last_block = handle_to_block(ctx, handle);
400 struct buflib_callbacks* ops = last_block[2].ops;
401 if (ops && ops->shrink_callback)
403 char *data = buflib_get_data(ctx, handle);
404 unsigned hint = BUFLIB_SHRINK_POS_BACK | 10*sizeof(union buflib_data);
405 if (ops->shrink_callback(handle, hint, data,
406 (char*)(last_block+last_block->val)-data) == BUFLIB_CB_OK)
407 { /* retry one more time */
408 goto handle_alloc;
411 return 0;
415 buffer_alloc:
416 /* need to re-evaluate last before the loop because the last allocation
417 * possibly made room in its front to fit this, so last would be wrong */
418 last = false;
419 for (block = ctx->first_free_block;;block += block_len)
421 /* If the last used block extends all the way to the handle table, the
422 * block "after" it doesn't have a header. Because of this, it's easier
423 * to always find the end of allocation by saving a pointer, and always
424 * calculate the free space at the end by comparing it to the
425 * last_handle pointer.
427 if(block == ctx->alloc_end)
429 last = true;
430 block_len = ctx->last_handle - block;
431 if ((size_t)block_len < size)
432 block = NULL;
433 break;
435 block_len = block->val;
436 /* blocks with positive length are already allocated. */
437 if(block_len > 0)
438 continue;
439 block_len = -block_len;
440 /* The search is first-fit, any fragmentation this causes will be
441 * handled at compaction.
443 if ((size_t)block_len >= size)
444 break;
446 if (!block)
448 /* Try compacting if allocation failed */
449 if (buflib_compact_and_shrink(ctx,
450 (size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK))
452 goto buffer_alloc;
453 } else {
454 handle->val=1;
455 handle_free(ctx, handle);
456 return 0;
460 /* Set up the allocated block, by marking the size allocated, and storing
461 * a pointer to the handle.
463 union buflib_data *name_len_slot;
464 block->val = size;
465 block[1].handle = handle;
466 block[2].ops = ops ?: &default_callbacks;
467 strcpy(block[3].name, name);
468 name_len_slot = (union buflib_data*)B_ALIGN_UP(block[3].name + name_len);
469 name_len_slot->val = 1 + name_len/sizeof(union buflib_data);
470 handle->alloc = (char*)(name_len_slot + 1);
471 /* If we have just taken the first free block, the next allocation search
472 * can save some time by starting after this block.
474 if (block == ctx->first_free_block)
475 ctx->first_free_block += size;
476 block += size;
477 /* alloc_end must be kept current if we're taking the last block. */
478 if (last)
479 ctx->alloc_end = block;
480 /* Only free blocks *before* alloc_end have tagged length. */
481 else if ((size_t)block_len > size)
482 block->val = size - block_len;
483 /* Return the handle index as a positive integer. */
484 return ctx->handle_table - handle;
487 /* Free the buffer associated with handle_num. */
488 void
489 buflib_free(struct buflib_context *ctx, int handle_num)
491 union buflib_data *handle = ctx->handle_table - handle_num,
492 *freed_block = handle_to_block(ctx, handle_num),
493 *block = ctx->first_free_block,
494 *next_block = block;
495 /* We need to find the block before the current one, to see if it is free
496 * and can be merged with this one.
498 while (next_block < freed_block)
500 block = next_block;
501 next_block += abs(block->val);
503 /* If next_block == block, the above loop didn't go anywhere. If it did,
504 * and the block before this one is empty, we can combine them.
506 if (next_block == freed_block && next_block != block && block->val < 0)
507 block->val -= freed_block->val;
508 /* Otherwise, set block to the newly-freed block, and mark it free, before
509 * continuing on, since the code below exects block to point to a free
510 * block which may have free space after it.
512 else
514 block = freed_block;
515 block->val = -block->val;
517 next_block = block - block->val;
518 /* Check if we are merging with the free space at alloc_end. */
519 if (next_block == ctx->alloc_end)
520 ctx->alloc_end = block;
521 /* Otherwise, the next block might still be a "normal" free block, and the
522 * mid-allocation free means that the buffer is no longer compact.
524 else {
525 ctx->compact = false;
526 if (next_block->val < 0)
527 block->val += next_block->val;
529 handle_free(ctx, handle);
530 handle->alloc = NULL;
531 /* If this block is before first_free_block, it becomes the new starting
532 * point for free-block search.
534 if (block < ctx->first_free_block)
535 ctx->first_free_block = block;
537 /* if the handle is the one aquired with buflib_alloc_maximum()
538 * unlock buflib_alloc() as part of the shrink */
539 if (ctx->handle_lock == handle_num)
540 ctx->handle_lock = 0;
543 /* Return the maximum allocatable memory in bytes */
544 size_t
545 buflib_available(struct buflib_context* ctx)
547 /* subtract 5 elements for
548 * val, handle, name_len, ops and the handle table entry*/
549 size_t diff = (ctx->last_handle - ctx->alloc_end - 5);
550 diff *= sizeof(union buflib_data); /* make it bytes */
551 diff -= 16; /* reserve 16 for the name */
553 if (diff > 0)
554 return diff;
555 else
556 return 0;
560 * Allocate all available (as returned by buflib_available()) memory and return
561 * a handle to it
563 * This grabs a lock which can only be unlocked by buflib_free() or
564 * buflib_shrink(), to protect from further allocations (which couldn't be
565 * serviced anyway).
568 buflib_alloc_maximum(struct buflib_context* ctx, const char* name, size_t *size, struct buflib_callbacks *ops)
570 int handle;
572 /* limit name to 16 since that's what buflib_available() accounts for it */
573 char buf[16];
574 *size = buflib_available(ctx);
575 strlcpy(buf, name, sizeof(buf));
576 handle = buflib_alloc_ex(ctx, *size, buf, ops);
578 if (handle > 0) /* shouldn't happen ?? */
579 ctx->handle_lock = handle;
581 return handle;
584 /* Shrink the allocation indicated by the handle according to new_start and
585 * new_size. Grow is not possible, therefore new_start and new_start + new_size
586 * must be within the original allocation
588 bool
589 buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t new_size)
591 char* oldstart = buflib_get_data(ctx, handle);
592 char* newstart = new_start;
593 char* newend = newstart + new_size;
595 /* newstart must be higher and new_size not "negative" */
596 if (newstart < oldstart || newend < newstart)
597 return false;
598 union buflib_data *block = handle_to_block(ctx, handle),
599 *old_next_block = block + block->val,
600 /* newstart isn't necessarily properly aligned but it
601 * needn't be since it's only dereferenced by the user code */
602 *aligned_newstart = (union buflib_data*)B_ALIGN_DOWN(newstart),
603 *aligned_oldstart = (union buflib_data*)B_ALIGN_DOWN(oldstart),
604 *new_next_block = (union buflib_data*)B_ALIGN_UP(newend),
605 *new_block, metadata_size;
607 /* growing is not supported */
608 if (new_next_block > old_next_block)
609 return false;
611 metadata_size.val = aligned_oldstart - block;
612 /* update val and the handle table entry */
613 new_block = aligned_newstart - metadata_size.val;
614 block[0].val = new_next_block - new_block;
616 block[1].handle->alloc = newstart;
617 if (block != new_block)
619 /* move metadata over, i.e. pointer to handle table entry and name
620 * This is actually the point of no return. Data in the allocation is
621 * being modified, and therefore we must successfully finish the shrink
622 * operation */
623 memmove(new_block, block, metadata_size.val*sizeof(metadata_size));
624 /* mark the old block unallocated */
625 block->val = block - new_block;
626 union buflib_data *freed_block = block,
627 *free_before = ctx->first_free_block,
628 *next_block = free_before;
629 /* We need to find the block before the current one, to see if it is free
630 * and can be merged with this one.
632 while (next_block < freed_block)
634 free_before = next_block;
635 next_block += abs(block->val);
637 /* If next_block == free_before, the above loop didn't go anywhere.
638 * If it did, and the block before this one is empty, we can combine them.
640 if (next_block == freed_block && next_block != free_before && free_before->val < 0)
641 free_before->val += freed_block->val;
642 else if (next_block == free_before)
643 ctx->first_free_block = freed_block;
645 /* We didn't handle size changes yet, assign block to the new one
646 * the code below the wants block whether it changed or not */
647 block = new_block;
650 /* Now deal with size changes that create free blocks after the allocation */
651 if (old_next_block != new_next_block)
653 if (ctx->alloc_end == old_next_block)
654 ctx->alloc_end = new_next_block;
655 else if (old_next_block->val < 0)
656 { /* enlarge next block by moving it up */
657 new_next_block->val = old_next_block->val - (old_next_block - new_next_block);
659 else if (old_next_block != new_next_block)
660 { /* creating a hole */
661 /* must be negative to indicate being unallocated */
662 new_next_block->val = new_next_block - old_next_block;
664 /* update first_free_block for the newly created free space */
665 if (ctx->first_free_block > new_next_block)
666 ctx->first_free_block = new_next_block;
669 /* if the handle is the one aquired with buflib_alloc_maximum()
670 * unlock buflib_alloc() as part of the shrink */
671 if (ctx->handle_lock == handle)
672 ctx->handle_lock = 0;
674 return true;
677 const char* buflib_get_name(struct buflib_context *ctx, int handle)
679 union buflib_data *data = (union buflib_data*)ALIGN_DOWN((intptr_t)buflib_get_data(ctx, handle), sizeof (*data));
680 size_t len = data[-1].val;
681 if (len <= 1)
682 return NULL;
683 return data[-len].name;
686 void buflib_print_allocs(struct buflib_context *ctx, void (*print)(const char*))
688 union buflib_data *this, *end = ctx->handle_table;
689 char buf[128];
690 for(this = end - 1; this >= ctx->last_handle; this--)
692 if (!this->alloc) continue;
694 int handle_num;
695 const char *name;
696 union buflib_data *block_start, *alloc_start;
697 intptr_t alloc_len;
699 handle_num = end - this;
700 alloc_start = buflib_get_data(ctx, handle_num);
701 name = buflib_get_name(ctx, handle_num);
702 block_start = (union buflib_data*)name - 3;
703 alloc_len = block_start->val * sizeof(union buflib_data);
705 snprintf(buf, sizeof(buf),
706 "%s(%d):\t%p\n"
707 " \t%p\n"
708 " \t%ld\n",
709 name?:"(null)", handle_num, block_start, alloc_start, alloc_len);
710 print(buf);
714 void buflib_print_blocks(struct buflib_context *ctx, void (*print)(const char*))
716 for(union buflib_data* this = ctx->buf_start;
717 this < ctx->alloc_end;
718 this += abs(this->val))
720 char buf[128] = { 0 };
721 snprintf(buf, sizeof(buf), "%8p: val: %4ld (%s)",
722 this, this->val,
723 this->val > 0? this[3].name:"<unallocated>");
724 print(buf);