1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * This is a memory allocator designed to provide reasonable management of free
11 * space and fast access to allocated data. More than one allocator can be used
12 * at a time by initializing multiple contexts.
14 * Copyright (C) 2009 Andrew Mahone
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 2
19 * of the License, or (at your option) any later version.
21 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
22 * KIND, either express or implied.
24 ****************************************************************************/
26 #include <stdlib.h> /* for abs() */
27 #include <stdio.h> /* for snprintf() */
29 #include "string-extra.h"
32 #include "system.h" /* for ALIGN_*() */
33 /* The main goal of this design is fast fetching of the pointer for a handle.
34 * For that reason, the handles are stored in a table at the end of the buffer
35 * with a fixed address, so that returning the pointer for a handle is a simple
36 * table lookup. To reduce the frequency with which allocated blocks will need
37 * to be moved to free space, allocations grow up in address from the start of
38 * the buffer. The buffer is treated as an array of union buflib_data. Blocks
39 * start with a length marker, which is included in their length. Free blocks
40 * are marked by negative length, allocated ones use the a buflib_data in
41 * the block to store a pointer to their handle table entry, so that it can be
42 * quickly found and updated during compaction. Followed by that, there's
43 * the pointer to the corresponding struct buflib. That pointer follows a
44 * character array containing the string identifier of the allocation. After the
45 * array there is another buflib_data containing the length of that string +
46 * the sizeo of this buflib_data.
47 * The allocator functions are passed a context struct so that two allocators
48 * can be run, for example, one per core may be used, with convenience wrappers
49 * for the single-allocator case that use a predefined context.
52 /* Use this for the default callbacks.
54 * The default callbacks do nothing, therefore the address of this
55 * acts as a magic as to not even call the default callbacks
57 static struct buflib_callbacks default_callbacks
;
60 #define YIELD() yield()
61 #elif defined(__unix) && (__unix == 1)
63 #define YIELD() sched_yield()
65 #warning YIELD not defined. Will busy-wait
69 #define B_ALIGN_DOWN(x) \
70 ALIGN_DOWN(x, sizeof(union buflib_data))
72 #define B_ALIGN_UP(x) \
73 ALIGN_UP(x, sizeof(union buflib_data))
77 #define BDEBUGF DEBUGF
79 #define BDEBUGF(...) do { } while(0)
82 /* Initialize buffer manager */
84 buflib_init(struct buflib_context
*ctx
, void *buf
, size_t size
)
86 union buflib_data
*bd_buf
= buf
;
88 /* Align on sizeof(buflib_data), to prevent unaligned access */
89 ALIGN_BUFFER(bd_buf
, size
, sizeof(union buflib_data
));
90 size
/= sizeof(union buflib_data
);
91 /* The handle table is initialized with no entries */
92 ctx
->handle_table
= bd_buf
+ size
;
93 ctx
->last_handle
= bd_buf
+ size
;
94 ctx
->first_free_handle
= bd_buf
+ size
- 1;
95 ctx
->first_free_block
= bd_buf
;
96 ctx
->buf_start
= bd_buf
;
97 /* A marker is needed for the end of allocated data, to make sure that it
98 * does not collide with the handle table, and to detect end-of-buffer.
100 ctx
->alloc_end
= bd_buf
;
103 BDEBUGF("buflib initialized with %d.%2d kiB", size
/ 1024, (size
%1000)/10);
106 /* Allocate a new handle, returning 0 on failure */
108 union buflib_data
* handle_alloc(struct buflib_context
*ctx
)
110 union buflib_data
*handle
;
111 /* first_free_handle is a lower bound on free handles, work through the
112 * table from there until a handle containing NULL is found, or the end
113 * of the table is reached.
115 for (handle
= ctx
->first_free_handle
; handle
>= ctx
->last_handle
; handle
--)
118 /* If the search went past the end of the table, it means we need to extend
119 * the table to get a new handle.
121 if (handle
< ctx
->last_handle
)
123 if (handle
>= ctx
->alloc_end
)
132 /* Free one handle, shrinking the handle table if it's the last one */
134 void handle_free(struct buflib_context
*ctx
, union buflib_data
*handle
)
137 /* Update free handle lower bound if this handle has a lower index than the
140 if (handle
> ctx
->first_free_handle
)
141 ctx
->first_free_handle
= handle
;
142 if (handle
== ctx
->last_handle
)
145 ctx
->compact
= false;
148 /* Get the start block of an allocation */
149 static union buflib_data
* handle_to_block(struct buflib_context
* ctx
, int handle
)
151 union buflib_data
* name_field
=
152 (union buflib_data
*)buflib_get_name(ctx
, handle
);
154 return name_field
- 3;
157 /* Shrink the handle table, returning true if its size was reduced, false if
162 handle_table_shrink(struct buflib_context
*ctx
)
165 union buflib_data
*handle
;
166 for (handle
= ctx
->last_handle
; !(handle
->alloc
); handle
++);
167 if (handle
> ctx
->first_free_handle
)
168 ctx
->first_free_handle
= handle
- 1;
169 rv
= handle
== ctx
->last_handle
;
170 ctx
->last_handle
= handle
;
175 /* If shift is non-zero, it represents the number of places to move
176 * blocks in memory. Calculate the new address for this block,
177 * update its entry in the handle table, and then move its contents.
180 move_block(struct buflib_context
* ctx
, union buflib_data
* block
, int shift
)
183 union buflib_data
*new_block
, *tmp
= block
[1].handle
;
184 struct buflib_callbacks
*ops
= block
[2].ops
;
185 if (ops
&& !ops
->move_callback
)
188 int handle
= ctx
->handle_table
- tmp
;
189 BDEBUGF("%s(): moving \"%s\"(id=%d) by %d(%d)\n", __func__
, block
[3].name
,
190 handle
, shift
, shift
*sizeof(union buflib_data
));
191 new_block
= block
+ shift
;
192 new_start
= tmp
->alloc
+ shift
*sizeof(union buflib_data
);
193 /* call the callback before moving, the default one needn't be called */
195 ops
->move_callback(handle
, tmp
->alloc
, new_start
);
197 tmp
->alloc
= new_start
; /* update handle table */
198 memmove(new_block
, block
, block
->val
* sizeof(union buflib_data
));
203 /* Compact allocations and handle table, adjusting handle pointers as needed.
204 * Return true if any space was freed or consolidated, false otherwise.
207 buflib_compact(struct buflib_context
*ctx
)
209 BDEBUGF("%s(): Compacting!\n", __func__
);
210 union buflib_data
*first_free
= ctx
->first_free_block
, *block
;
212 /* Store the results of attempting to shrink the handle table */
213 bool ret
= handle_table_shrink(ctx
);
214 for(block
= first_free
; block
!= ctx
->alloc_end
; block
+= len
)
217 /* This block is free, add its length to the shift value */
224 /* attempt to fill any hole */
225 if (abs(ctx
->first_free_block
->val
) > block
->val
)
227 intptr_t size
= first_free
->val
;
228 if (move_block(ctx
, block
, first_free
- block
))
231 block
= ctx
->first_free_block
;
232 ctx
->first_free_block
+= block
->val
;
233 ctx
->first_free_block
->val
= size
+ block
->val
;
237 /* attempt move the allocation by shift */
240 /* failing to move creates a hole, therefore mark this
241 * block as not allocated anymore and move first_free_block up */
242 if (!move_block(ctx
, block
, shift
))
244 union buflib_data
* hole
= block
+ shift
;
246 if (ctx
->first_free_block
> hole
)
247 ctx
->first_free_block
= hole
;
252 /* Move the end-of-allocation mark, and return true if any new space has
255 ctx
->alloc_end
+= shift
;
256 /* only move first_free_block up if it wasn't already by a hole */
257 if (ctx
->first_free_block
> ctx
->alloc_end
)
258 ctx
->first_free_block
= ctx
->alloc_end
;
263 /* Compact the buffer by trying both shrinking and moving.
265 * Try to move first. If unsuccesfull, try to shrink. If that was successful
266 * try to move once more as there might be more room now.
269 buflib_compact_and_shrink(struct buflib_context
*ctx
, unsigned shrink_hints
)
272 /* if something compacted before already there will be no further gain */
274 result
= buflib_compact(ctx
);
277 union buflib_data
* this;
278 for(this = ctx
->buf_start
; this < ctx
->alloc_end
; this += abs(this->val
))
280 if (this->val
> 0 && this[2].ops
281 && this[2].ops
->shrink_callback
)
284 int handle
= ctx
->handle_table
- this[1].handle
;
285 char* data
= this[1].handle
->alloc
;
286 ret
= this[2].ops
->shrink_callback(handle
, shrink_hints
,
287 data
, (char*)(this+this->val
)-data
);
288 result
|= (ret
== BUFLIB_CB_OK
);
289 /* this might have changed in the callback (if
290 * it shrinked from the top), get it again */
291 this = handle_to_block(ctx
, handle
);
294 /* shrinking was successful at least once, try compaction again */
296 result
|= buflib_compact(ctx
);
302 /* Shift buffered items by size units, and update handle pointers. The shift
303 * value must be determined to be safe *before* calling.
306 buflib_buffer_shift(struct buflib_context
*ctx
, int shift
)
308 memmove(ctx
->buf_start
+ shift
, ctx
->buf_start
,
309 (ctx
->alloc_end
- ctx
->buf_start
) * sizeof(union buflib_data
));
310 union buflib_data
*handle
;
311 for (handle
= ctx
->last_handle
; handle
< ctx
->handle_table
; handle
++)
313 handle
->alloc
+= shift
;
314 ctx
->first_free_block
+= shift
;
315 ctx
->buf_start
+= shift
;
316 ctx
->alloc_end
+= shift
;
319 /* Shift buffered items up by size bytes, or as many as possible if size == 0.
320 * Set size to the number of bytes freed.
323 buflib_buffer_out(struct buflib_context
*ctx
, size_t *size
)
327 size_t avail
= ctx
->last_handle
- ctx
->alloc_end
;
328 size_t avail_b
= avail
* sizeof(union buflib_data
);
329 if (*size
&& *size
< avail_b
)
331 avail
= (*size
+ sizeof(union buflib_data
) - 1)
332 / sizeof(union buflib_data
);
333 avail_b
= avail
* sizeof(union buflib_data
);
336 void *ret
= ctx
->buf_start
;
337 buflib_buffer_shift(ctx
, avail
);
341 /* Shift buffered items down by size bytes */
343 buflib_buffer_in(struct buflib_context
*ctx
, int size
)
345 size
/= sizeof(union buflib_data
);
346 buflib_buffer_shift(ctx
, -size
);
349 /* Allocate a buffer of size bytes, returning a handle for it */
351 buflib_alloc(struct buflib_context
*ctx
, size_t size
)
353 return buflib_alloc_ex(ctx
, size
, "<anonymous>", &default_callbacks
);
356 /* Allocate a buffer of size bytes, returning a handle for it.
358 * The additional name parameter gives the allocation a human-readable name,
359 * the ops parameter points to caller-implemented callbacks for moving and
360 * shrinking. NULL for default callbacks
364 buflib_alloc_ex(struct buflib_context
*ctx
, size_t size
, const char *name
,
365 struct buflib_callbacks
*ops
)
367 /* busy wait if there's a thread owning the lock */
368 while (ctx
->handle_lock
!= 0) YIELD();
370 union buflib_data
*handle
, *block
;
371 size_t name_len
= name
? B_ALIGN_UP(strlen(name
)) : 0;
373 /* This really is assigned a value before use */
376 size
= (size
+ sizeof(union buflib_data
) - 1) /
377 sizeof(union buflib_data
)
378 /* add 4 objects for alloc len, pointer to handle table entry and
379 * name length, and the ops pointer */
382 handle
= handle_alloc(ctx
);
385 /* If allocation has failed, and compaction has succeded, it may be
386 * possible to get a handle by trying again.
388 if (!ctx
->compact
&& buflib_compact(ctx
))
391 { /* first try to shrink the alloc before the handle table
392 * to make room for new handles */
393 int handle
= ctx
->handle_table
- ctx
->last_handle
;
394 union buflib_data
* last_block
= handle_to_block(ctx
, handle
);
395 struct buflib_callbacks
* ops
= last_block
[2].ops
;
396 if (ops
&& ops
->shrink_callback
)
398 char *data
= buflib_get_data(ctx
, handle
);
399 unsigned hint
= BUFLIB_SHRINK_POS_BACK
| 10*sizeof(union buflib_data
);
400 if (ops
->shrink_callback(handle
, hint
, data
,
401 (char*)(last_block
+last_block
->val
)-data
) == BUFLIB_CB_OK
)
402 { /* retry one more time */
411 /* need to re-evaluate last before the loop because the last allocation
412 * possibly made room in its front to fit this, so last would be wrong */
414 for (block
= ctx
->first_free_block
;;block
+= block_len
)
416 /* If the last used block extends all the way to the handle table, the
417 * block "after" it doesn't have a header. Because of this, it's easier
418 * to always find the end of allocation by saving a pointer, and always
419 * calculate the free space at the end by comparing it to the
420 * last_handle pointer.
422 if(block
== ctx
->alloc_end
)
425 block_len
= ctx
->last_handle
- block
;
426 if ((size_t)block_len
< size
)
430 block_len
= block
->val
;
431 /* blocks with positive length are already allocated. */
434 block_len
= -block_len
;
435 /* The search is first-fit, any fragmentation this causes will be
436 * handled at compaction.
438 if ((size_t)block_len
>= size
)
443 /* Try compacting if allocation failed */
444 if (buflib_compact_and_shrink(ctx
,
445 (size
*sizeof(union buflib_data
))&BUFLIB_SHRINK_SIZE_MASK
))
450 handle_free(ctx
, handle
);
455 /* Set up the allocated block, by marking the size allocated, and storing
456 * a pointer to the handle.
458 union buflib_data
*name_len_slot
;
460 block
[1].handle
= handle
;
461 block
[2].ops
= ops
?: &default_callbacks
;
462 strcpy(block
[3].name
, name
);
463 name_len_slot
= (union buflib_data
*)B_ALIGN_UP(block
[3].name
+ name_len
);
464 name_len_slot
->val
= 1 + name_len
/sizeof(union buflib_data
);
465 handle
->alloc
= (char*)(name_len_slot
+ 1);
466 /* If we have just taken the first free block, the next allocation search
467 * can save some time by starting after this block.
469 if (block
== ctx
->first_free_block
)
470 ctx
->first_free_block
+= size
;
472 /* alloc_end must be kept current if we're taking the last block. */
474 ctx
->alloc_end
= block
;
475 /* Only free blocks *before* alloc_end have tagged length. */
476 else if ((size_t)block_len
> size
)
477 block
->val
= size
- block_len
;
478 /* Return the handle index as a positive integer. */
479 return ctx
->handle_table
- handle
;
482 /* Free the buffer associated with handle_num. */
484 buflib_free(struct buflib_context
*ctx
, int handle_num
)
486 union buflib_data
*handle
= ctx
->handle_table
- handle_num
,
487 *freed_block
= handle_to_block(ctx
, handle_num
),
488 *block
= ctx
->first_free_block
,
490 /* We need to find the block before the current one, to see if it is free
491 * and can be merged with this one.
493 while (next_block
< freed_block
)
496 next_block
+= abs(block
->val
);
498 /* If next_block == block, the above loop didn't go anywhere. If it did,
499 * and the block before this one is empty, we can combine them.
501 if (next_block
== freed_block
&& next_block
!= block
&& block
->val
< 0)
502 block
->val
-= freed_block
->val
;
503 /* Otherwise, set block to the newly-freed block, and mark it free, before
504 * continuing on, since the code below exects block to point to a free
505 * block which may have free space after it.
510 block
->val
= -block
->val
;
512 next_block
= block
- block
->val
;
513 /* Check if we are merging with the free space at alloc_end. */
514 if (next_block
== ctx
->alloc_end
)
515 ctx
->alloc_end
= block
;
516 /* Otherwise, the next block might still be a "normal" free block, and the
517 * mid-allocation free means that the buffer is no longer compact.
520 ctx
->compact
= false;
521 if (next_block
->val
< 0)
522 block
->val
+= next_block
->val
;
524 handle_free(ctx
, handle
);
525 handle
->alloc
= NULL
;
526 /* If this block is before first_free_block, it becomes the new starting
527 * point for free-block search.
529 if (block
< ctx
->first_free_block
)
530 ctx
->first_free_block
= block
;
532 /* if the handle is the one aquired with buflib_alloc_maximum()
533 * unlock buflib_alloc() as part of the shrink */
534 if (ctx
->handle_lock
== handle_num
)
535 ctx
->handle_lock
= 0;
538 /* Return the maximum allocatable memory in bytes */
540 buflib_available(struct buflib_context
* ctx
)
542 /* subtract 5 elements for
543 * val, handle, name_len, ops and the handle table entry*/
544 size_t diff
= (ctx
->last_handle
- ctx
->alloc_end
- 5);
545 diff
*= sizeof(union buflib_data
); /* make it bytes */
546 diff
-= 16; /* reserve 16 for the name */
555 * Allocate all available (as returned by buflib_available()) memory and return
558 * This grabs a lock which can only be unlocked by buflib_free() or
559 * buflib_shrink(), to protect from further allocations (which couldn't be
563 buflib_alloc_maximum(struct buflib_context
* ctx
, const char* name
, size_t *size
, struct buflib_callbacks
*ops
)
567 /* limit name to 16 since that's what buflib_available() accounts for it */
569 *size
= buflib_available(ctx
);
570 strlcpy(buf
, name
, sizeof(buf
));
571 handle
= buflib_alloc_ex(ctx
, *size
, buf
, ops
);
573 if (handle
> 0) /* shouldn't happen ?? */
574 ctx
->handle_lock
= handle
;
579 /* Shrink the allocation indicated by the handle according to new_start and
580 * new_size. Grow is not possible, therefore new_start and new_start + new_size
581 * must be within the original allocation
584 buflib_shrink(struct buflib_context
* ctx
, int handle
, void* new_start
, size_t new_size
)
586 char* oldstart
= buflib_get_data(ctx
, handle
);
587 char* newstart
= new_start
;
588 char* newend
= newstart
+ new_size
;
590 /* newstart must be higher and new_size not "negative" */
591 if (newstart
< oldstart
|| newend
< newstart
)
593 union buflib_data
*block
= handle_to_block(ctx
, handle
),
594 *old_next_block
= block
+ block
->val
,
595 /* newstart isn't necessarily properly aligned but it
596 * needn't be since it's only dereferenced by the user code */
597 *aligned_newstart
= (union buflib_data
*)B_ALIGN_DOWN(newstart
),
598 *aligned_oldstart
= (union buflib_data
*)B_ALIGN_DOWN(oldstart
),
599 *new_next_block
= (union buflib_data
*)B_ALIGN_UP(newend
),
600 *new_block
, metadata_size
;
602 /* growing is not supported */
603 if (new_next_block
> old_next_block
)
606 metadata_size
.val
= aligned_oldstart
- block
;
607 /* update val and the handle table entry */
608 new_block
= aligned_newstart
- metadata_size
.val
;
609 block
[0].val
= new_next_block
- new_block
;
611 block
[1].handle
->alloc
= newstart
;
612 if (block
!= new_block
)
614 /* move metadata over, i.e. pointer to handle table entry and name
615 * This is actually the point of no return. Data in the allocation is
616 * being modified, and therefore we must successfully finish the shrink
618 memmove(new_block
, block
, metadata_size
.val
*sizeof(metadata_size
));
619 /* mark the old block unallocated */
620 block
->val
= block
- new_block
;
621 union buflib_data
*freed_block
= block
,
622 *free_before
= ctx
->first_free_block
,
623 *next_block
= free_before
;
624 /* We need to find the block before the current one, to see if it is free
625 * and can be merged with this one.
627 while (next_block
< freed_block
)
629 free_before
= next_block
;
630 next_block
+= abs(block
->val
);
632 /* If next_block == free_before, the above loop didn't go anywhere.
633 * If it did, and the block before this one is empty, we can combine them.
635 if (next_block
== freed_block
&& next_block
!= free_before
&& free_before
->val
< 0)
636 free_before
->val
+= freed_block
->val
;
637 else if (next_block
== free_before
)
638 ctx
->first_free_block
= freed_block
;
640 /* We didn't handle size changes yet, assign block to the new one
641 * the code below the wants block whether it changed or not */
645 /* Now deal with size changes that create free blocks after the allocation */
646 if (old_next_block
!= new_next_block
)
648 if (ctx
->alloc_end
== old_next_block
)
649 ctx
->alloc_end
= new_next_block
;
650 else if (old_next_block
->val
< 0)
651 { /* enlarge next block by moving it up */
652 new_next_block
->val
= old_next_block
->val
- (old_next_block
- new_next_block
);
654 else if (old_next_block
!= new_next_block
)
655 { /* creating a hole */
656 /* must be negative to indicate being unallocated */
657 new_next_block
->val
= new_next_block
- old_next_block
;
659 /* update first_free_block for the newly created free space */
660 if (ctx
->first_free_block
> new_next_block
)
661 ctx
->first_free_block
= new_next_block
;
664 /* if the handle is the one aquired with buflib_alloc_maximum()
665 * unlock buflib_alloc() as part of the shrink */
666 if (ctx
->handle_lock
== handle
)
667 ctx
->handle_lock
= 0;
672 const char* buflib_get_name(struct buflib_context
*ctx
, int handle
)
674 union buflib_data
*data
= (union buflib_data
*)ALIGN_DOWN((intptr_t)buflib_get_data(ctx
, handle
), sizeof (*data
));
675 size_t len
= data
[-1].val
;
678 return data
[-len
].name
;
681 void buflib_print_allocs(struct buflib_context
*ctx
, void (*print
)(const char*))
683 union buflib_data
*this, *end
= ctx
->handle_table
;
685 for(this = end
- 1; this >= ctx
->last_handle
; this--)
687 if (!this->alloc
) continue;
691 union buflib_data
*block_start
, *alloc_start
;
694 handle_num
= end
- this;
695 alloc_start
= buflib_get_data(ctx
, handle_num
);
696 name
= buflib_get_name(ctx
, handle_num
);
697 block_start
= (union buflib_data
*)name
- 3;
698 alloc_len
= block_start
->val
* sizeof(union buflib_data
);
700 snprintf(buf
, sizeof(buf
),
704 name
?:"(null)", handle_num
, block_start
, alloc_start
, alloc_len
);
709 void buflib_print_blocks(struct buflib_context
*ctx
, void (*print
)(const char*))
711 for(union buflib_data
* this = ctx
->buf_start
;
712 this < ctx
->alloc_end
;
713 this += abs(this->val
))
715 char buf
[128] = { 0 };
716 snprintf(buf
, sizeof(buf
), "%8p: val: %4ld (%s)",
718 this->val
> 0? this[3].name
:"<unallocated>");