d: Merge upstream dmd, druntime b65767825f, phobos 92dc5a4e9.
[official-gcc.git] / libphobos / src / std / experimental / allocator / building_blocks / kernighan_ritchie.d
blob167cf1bc6bce32e3e6f791b08b5d86be190a11b4
1 // Written in the D programming language.
2 /**
3 Source: $(PHOBOSSRC std/experimental/allocator/building_blocks/kernighan_ritchie.d)
4 */
5 module std.experimental.allocator.building_blocks.kernighan_ritchie;
6 import std.experimental.allocator.building_blocks.null_allocator :
7 NullAllocator;
9 //debug = KRRegion;
10 debug(KRRegion) import std.stdio;
12 // KRRegion
13 /**
14 `KRRegion` draws inspiration from the $(MREF_ALTTEXT region allocation
15 strategy, std,experimental,allocator,building_blocks,region) and also the
16 $(HTTP stackoverflow.com/questions/13159564/explain-this-implementation-of-malloc-from-the-kr-book,
17 famed allocator) described by Brian Kernighan and Dennis Ritchie in section 8.7
18 of the book $(HTTP amazon.com/exec/obidos/ASIN/0131103628/classicempire, "The C
19 Programming Language"), Second Edition, Prentice Hall, 1988.
21 $(H4 `KRRegion` = `Region` + Kernighan-Ritchie Allocator)
23 Initially, `KRRegion` starts in "region" mode: allocations are served from
24 the memory chunk in a region fashion. Thus, as long as there is enough memory
25 left, `KRRegion.allocate` has the performance profile of a region allocator.
26 Deallocation inserts (in $(BIGOH 1) time) the deallocated blocks in an
27 unstructured freelist, which is not read in region mode.
29 Once the region cannot serve an `allocate` request, `KRRegion` switches
30 to "free list" mode. It sorts the list of previously deallocated blocks by
31 address and serves allocation requests off that free list. The allocation and
32 deallocation follow the pattern described by Kernighan and Ritchie.
34 The recommended use of `KRRegion` is as a $(I region with deallocation). If the
35 `KRRegion` is dimensioned appropriately, it could often not enter free list
36 mode during its lifetime. Thus it is as fast as a simple region, whilst
37 offering deallocation at a small cost. When the region memory is exhausted,
38 the previously deallocated memory is still usable, at a performance cost. If
39 the region is not excessively large and fragmented, the linear allocation and
40 deallocation cost may still be compensated for by the good locality
41 characteristics.
43 If the chunk of memory managed is large, it may be desirable to switch
44 management to free list from the beginning. That way, memory may be used in a
45 more compact manner than region mode. To force free list mode, call $(D
46 switchToFreeList) shortly after construction or when deemed appropriate.
48 The smallest size that can be allocated is two words (16 bytes on 64-bit
49 systems, 8 bytes on 32-bit systems). This is because the free list management
50 needs two words (one for the length, the other for the next pointer in the
51 singly-linked list).
53 The `ParentAllocator` type parameter is the type of the allocator used to
54 allocate the memory chunk underlying the `KRRegion` object. Choosing the
55 default (`NullAllocator`) means the user is responsible for passing a buffer
56 at construction (and for deallocating it if necessary). Otherwise, `KRRegion`
57 automatically deallocates the buffer during destruction. For that reason, if
58 `ParentAllocator` is not `NullAllocator`, then `KRRegion` is not
59 copyable.
61 $(H4 Implementation Details)
63 In free list mode, `KRRegion` embeds a free blocks list onto the chunk of
64 memory. The free list is circular, coalesced, and sorted by address at all
65 times. Allocations and deallocations take time proportional to the number of
66 previously deallocated blocks. (In practice the cost may be lower, e.g. if
67 memory is deallocated in reverse order of allocation, all operations take
68 constant time.) Memory utilization is good (small control structure and no
69 per-allocation overhead). The disadvantages of freelist mode include proneness
70 to fragmentation, a minimum allocation size of two words, and linear worst-case
71 allocation and deallocation times.
73 Similarities of `KRRegion` (in free list mode) with the
74 Kernighan-Ritchie allocator:
76 $(UL
77 $(LI Free blocks have variable size and are linked in a singly-linked list.)
78 $(LI The freelist is maintained in increasing address order, which makes
79 coalescing easy.)
80 $(LI The strategy for finding the next available block is first fit.)
81 $(LI The free list is circular, with the last node pointing back to the first.)
82 $(LI Coalescing is carried during deallocation.)
85 Differences from the Kernighan-Ritchie allocator:
87 $(UL
88 $(LI Once the chunk is exhausted, the Kernighan-Ritchie allocator allocates
89 another chunk using operating system primitives. For better composability, $(D
90 KRRegion) just gets full (returns `null` on new allocation requests). The
91 decision to allocate more blocks is deferred to a higher-level entity. For an
92 example, see the example below using `AllocatorList` in conjunction with $(D
93 KRRegion).)
94 $(LI Allocated blocks do not hold a size prefix. This is because in D the size
95 information is available in client code at deallocation time.)
99 struct KRRegion(ParentAllocator = NullAllocator)
101 import std.experimental.allocator.common : stateSize, alignedAt;
102 import std.traits : hasMember;
103 import std.typecons : Ternary;
105 private static struct Node
107 import std.typecons : tuple, Tuple;
109 Node* next;
110 size_t size;
112 this(this) @disable;
114 void[] payload() inout
116 return (cast(ubyte*) &this)[0 .. size];
119 bool adjacent(in Node* right) const
121 assert(right);
122 auto p = payload;
123 return p.ptr < right && right < p.ptr + p.length + Node.sizeof;
126 bool coalesce(void* memoryEnd = null)
128 // Coalesce the last node before the memory end with any possible gap
129 if (memoryEnd
130 && memoryEnd < payload.ptr + payload.length + Node.sizeof)
132 size += memoryEnd - (payload.ptr + payload.length);
133 return true;
136 if (!adjacent(next)) return false;
137 size = (cast(ubyte*) next + next.size) - cast(ubyte*) &this;
138 next = next.next;
139 return true;
142 Tuple!(void[], Node*) allocateHere(size_t bytes)
144 assert(bytes >= Node.sizeof);
145 assert(bytes % Node.alignof == 0);
146 assert(next);
147 assert(!adjacent(next));
148 if (size < bytes) return typeof(return)();
149 assert(size >= bytes);
150 immutable leftover = size - bytes;
152 if (leftover >= Node.sizeof)
154 // There's room for another node
155 auto newNode = cast(Node*) ((cast(ubyte*) &this) + bytes);
156 newNode.size = leftover;
157 newNode.next = next == &this ? newNode : next;
158 assert(next);
159 return tuple(payload, newNode);
162 // No slack space, just return next node
163 return tuple(payload, next == &this ? null : next);
167 // state
169 If `ParentAllocator` holds state, `parent` is a public member of type
170 `KRRegion`. Otherwise, `parent` is an `alias` for
171 `ParentAllocator.instance`.
173 static if (stateSize!ParentAllocator) ParentAllocator parent;
174 else alias parent = ParentAllocator.instance;
175 private void[] payload;
176 private Node* root;
177 private bool regionMode() const { return bytesUsedRegionMode != size_t.max; }
178 private void cancelRegionMode() { bytesUsedRegionMode = size_t.max; }
179 private size_t bytesUsedRegionMode = 0;
181 auto byNodePtr()
183 static struct Range
185 Node* start, current;
186 @property bool empty() { return !current; }
187 @property Node* front() { return current; }
188 void popFront()
190 assert(current && current.next);
191 current = current.next;
192 if (current == start) current = null;
194 @property Range save() { return this; }
196 import std.range : isForwardRange;
197 static assert(isForwardRange!Range);
198 return Range(root, root);
201 string toString()
203 import std.format : format;
204 string s = "KRRegion@";
205 s ~= format("%s-%s(0x%s[%s] %s", &this, &this + 1,
206 payload.ptr, payload.length,
207 regionMode ? "(region)" : "(freelist)");
209 Node* lastNode = null;
210 if (!regionMode)
212 foreach (node; byNodePtr)
214 s ~= format(", %sfree(0x%s[%s])",
215 lastNode && lastNode.adjacent(node) ? "+" : "",
216 cast(void*) node, node.size);
217 lastNode = node;
220 else
222 for (auto node = root; node; node = node.next)
224 s ~= format(", %sfree(0x%s[%s])",
225 lastNode && lastNode.adjacent(node) ? "+" : "",
226 cast(void*) node, node.size);
227 lastNode = node;
231 s ~= ')';
232 return s;
235 private void assertValid(string s)
237 assert(!regionMode);
238 if (!payload.ptr)
240 assert(!root, s);
241 return;
243 if (!root)
245 return;
247 assert(root >= payload.ptr, s);
248 assert(root < payload.ptr + payload.length, s);
250 // Check that the list terminates
251 size_t n;
252 foreach (node; byNodePtr)
254 assert(node.next);
255 assert(!node.adjacent(node.next));
256 assert(n++ < payload.length / Node.sizeof, s);
260 private Node* sortFreelist(Node* root)
262 // Find a monotonic run
263 auto last = root;
264 for (;;)
266 if (!last.next) return root;
267 if (last > last.next) break;
268 assert(last < last.next);
269 last = last.next;
271 auto tail = last.next;
272 last.next = null;
273 tail = sortFreelist(tail);
274 return merge(root, tail);
277 private Node* merge(Node* left, Node* right)
279 assert(left != right);
280 if (!left) return right;
281 if (!right) return left;
282 if (left < right)
284 auto result = left;
285 result.next = merge(left.next, right);
286 return result;
288 auto result = right;
289 result.next = merge(left, right.next);
290 return result;
293 private void coalesceAndMakeCircular()
295 for (auto n = root;;)
297 assert(!n.next || n < n.next);
298 if (!n.next)
300 // Convert to circular
301 n.next = root;
302 break;
304 if (n.coalesce) continue; // possibly another coalesce
305 n = n.next;
310 Create a `KRRegion`. If `ParentAllocator` is not `NullAllocator`,
311 `KRRegion`'s destructor will call `parent.deallocate`.
313 Params:
314 b = Block of memory to serve as support for the allocator. Memory must be
315 larger than two words and word-aligned.
316 n = Capacity desired. This constructor is defined only if $(D
317 ParentAllocator) is not `NullAllocator`.
319 this(ubyte[] b)
321 if (b.length < Node.sizeof)
323 // Init as empty
324 assert(root is null);
325 assert(payload is null);
326 return;
328 assert(b.length >= Node.sizeof);
329 assert(b.ptr.alignedAt(Node.alignof));
330 assert(b.length >= 2 * Node.sizeof);
331 payload = b;
332 root = cast(Node*) b.ptr;
333 // Initialize the free list with all list
334 assert(regionMode);
335 root.next = null;
336 root.size = b.length;
337 debug(KRRegion) writefln("KRRegion@%s: init with %s[%s]", &this,
338 b.ptr, b.length);
341 /// Ditto
342 static if (!is(ParentAllocator == NullAllocator) && !stateSize!ParentAllocator)
343 this(size_t n)
345 assert(n > Node.sizeof);
346 this(cast(ubyte[])(parent.allocate(n)));
349 /// Ditto
350 static if (!is(ParentAllocator == NullAllocator) && stateSize!ParentAllocator)
351 this(ParentAllocator parent, size_t n)
353 assert(n > Node.sizeof);
354 this.parent = parent;
355 this(cast(ubyte[])(parent.allocate(n)));
358 /// Ditto
359 static if (!is(ParentAllocator == NullAllocator)
360 && hasMember!(ParentAllocator, "deallocate"))
361 ~this()
363 parent.deallocate(payload);
367 Forces free list mode. If already in free list mode, does nothing.
368 Otherwise, sorts the free list accumulated so far and switches strategy for
369 future allocations to KR style.
371 void switchToFreeList()
373 if (!regionMode) return;
374 cancelRegionMode;
375 if (!root) return;
376 root = sortFreelist(root);
377 coalesceAndMakeCircular;
381 Noncopyable
383 @disable this(this);
386 Word-level alignment.
388 enum alignment = Node.alignof;
391 Allocates `n` bytes. Allocation searches the list of available blocks
392 until a free block with `n` or more bytes is found (first fit strategy).
393 The block is split (if larger) and returned.
395 Params: n = number of bytes to _allocate
397 Returns: A word-aligned buffer of `n` bytes, or `null`.
399 void[] allocate(size_t n)
401 if (!n || !root) return null;
402 const actualBytes = goodAllocSize(n);
404 // Try the region first
405 if (regionMode)
407 // Only look at the head of the freelist
408 if (root.size >= actualBytes)
410 // Enough room for allocation
411 bytesUsedRegionMode += actualBytes;
412 void* result = root;
413 immutable balance = root.size - actualBytes;
414 if (balance >= Node.sizeof)
416 auto newRoot = cast(Node*) (result + actualBytes);
417 newRoot.next = root.next;
418 newRoot.size = balance;
419 root = newRoot;
421 else
423 root = null;
424 switchToFreeList;
426 return result[0 .. n];
429 // Not enough memory, switch to freelist mode and fall through
430 switchToFreeList;
433 // Try to allocate from next after the iterating node
434 for (auto pnode = root;;)
436 assert(!pnode.adjacent(pnode.next));
437 auto k = pnode.next.allocateHere(actualBytes);
438 if (k[0] !is null)
440 // awes
441 assert(k[0].length >= n);
442 if (root == pnode.next) root = k[1];
443 pnode.next = k[1];
444 return k[0][0 .. n];
447 pnode = pnode.next;
448 if (pnode == root) break;
450 return null;
454 Deallocates `b`, which is assumed to have been previously allocated with
455 this allocator. Deallocation performs a linear search in the free list to
456 preserve its sorting order. It follows that blocks with higher addresses in
457 allocators with many free blocks are slower to deallocate.
459 Params: b = block to be deallocated
461 nothrow @nogc
462 bool deallocate(void[] b)
464 debug(KRRegion) writefln("KRRegion@%s: deallocate(%s[%s])", &this,
465 b.ptr, b.length);
466 if (!b.ptr) return true;
467 assert(owns(b) == Ternary.yes);
468 assert(b.ptr.alignedAt(Node.alignof));
470 // Insert back in the freelist, keeping it sorted by address. Do not
471 // coalesce at this time. Instead, do it lazily during allocation.
472 auto n = cast(Node*) b.ptr;
473 n.size = goodAllocSize(b.length);
474 auto memoryEnd = payload.ptr + payload.length;
476 if (regionMode)
478 assert(root);
479 // Insert right after root
480 bytesUsedRegionMode -= n.size;
481 n.next = root.next;
482 root.next = n;
483 return true;
486 if (!root)
488 // What a sight for sore eyes
489 root = n;
490 root.next = root;
492 // If the first block freed is the last one allocated,
493 // maybe there's a gap after it.
494 root.coalesce(memoryEnd);
495 return true;
498 version (assert) foreach (test; byNodePtr)
500 assert(test != n);
502 // Linear search
503 auto pnode = root;
506 assert(pnode && pnode.next);
507 assert(pnode != n);
508 assert(pnode.next != n);
510 if (pnode < pnode.next)
512 if (pnode > n || n > pnode.next) continue;
513 // Insert in between pnode and pnode.next
514 n.next = pnode.next;
515 pnode.next = n;
516 n.coalesce;
517 pnode.coalesce;
518 root = pnode;
519 return true;
521 else if (pnode < n)
523 // Insert at the end of the list
524 // Add any possible gap at the end of n to the length of n
525 n.next = pnode.next;
526 pnode.next = n;
527 n.coalesce(memoryEnd);
528 pnode.coalesce;
529 root = pnode;
530 return true;
532 else if (n < pnode.next)
534 // Insert at the front of the list
535 n.next = pnode.next;
536 pnode.next = n;
537 n.coalesce;
538 root = n;
539 return true;
542 while ((pnode = pnode.next) != root);
543 assert(0, "Wrong parameter passed to deallocate");
547 Allocates all memory available to this allocator. If the allocator is empty,
548 returns the entire available block of memory. Otherwise, it still performs
549 a best-effort allocation: if there is no fragmentation (e.g. `allocate`
550 has been used but not `deallocate`), allocates and returns the only
551 available block of memory.
553 The operation takes time proportional to the number of adjacent free blocks
554 at the front of the free list. These blocks get coalesced, whether
555 `allocateAll` succeeds or fails due to fragmentation.
557 void[] allocateAll()
559 if (regionMode) switchToFreeList;
560 if (root && root.next == root)
561 return allocate(root.size);
562 return null;
566 @system unittest
568 import std.experimental.allocator.gc_allocator : GCAllocator;
569 auto alloc = KRRegion!GCAllocator(1024 * 64);
570 const b1 = alloc.allocate(2048);
571 assert(b1.length == 2048);
572 const b2 = alloc.allocateAll;
573 assert(b2.length == 1024 * 62);
577 Deallocates all memory currently allocated, making the allocator ready for
578 other allocations. This is a $(BIGOH 1) operation.
580 pure nothrow @nogc
581 bool deallocateAll()
583 debug(KRRegion) assertValid("deallocateAll");
584 debug(KRRegion) scope(exit) assertValid("deallocateAll");
585 root = cast(Node*) payload.ptr;
587 // Reset to regionMode
588 bytesUsedRegionMode = 0;
589 if (root)
591 root.next = null;
592 root.size = payload.length;
594 return true;
598 Checks whether the allocator is responsible for the allocation of `b`.
599 It does a simple $(BIGOH 1) range check. `b` should be a buffer either
600 allocated with `this` or obtained through other means.
602 pure nothrow @trusted @nogc
603 Ternary owns(void[] b)
605 debug(KRRegion) assertValid("owns");
606 debug(KRRegion) scope(exit) assertValid("owns");
607 return Ternary(b && payload && (&b[0] >= &payload[0])
608 && (&b[0] < &payload[0] + payload.length));
612 Adjusts `n` to a size suitable for allocation (two words or larger,
613 word-aligned).
615 pure nothrow @safe @nogc
616 static size_t goodAllocSize(size_t n)
618 import std.experimental.allocator.common : roundUpToMultipleOf;
619 return n <= Node.sizeof
620 ? Node.sizeof : n.roundUpToMultipleOf(alignment);
624 Returns: `Ternary.yes` if the allocator is empty, `Ternary.no` otherwise.
625 Never returns `Ternary.unknown`.
627 pure nothrow @safe @nogc
628 Ternary empty()
630 if (regionMode)
631 return Ternary(bytesUsedRegionMode == 0);
633 return Ternary(root && root.size == payload.length);
638 `KRRegion` is preferable to `Region` as a front for a general-purpose
639 allocator if `deallocate` is needed, yet the actual deallocation traffic is
640 relatively low. The example below shows a `KRRegion` using stack storage
641 fronting the GC allocator.
643 @system unittest
645 import std.experimental.allocator.building_blocks.fallback_allocator
646 : fallbackAllocator;
647 import std.experimental.allocator.gc_allocator : GCAllocator;
648 import std.typecons : Ternary;
649 // KRRegion fronting a general-purpose allocator
650 align(KRRegion!().alignment) ubyte[1024 * 128] buf;
651 auto alloc = fallbackAllocator(KRRegion!()(buf), GCAllocator.instance);
652 auto b = alloc.allocate(100);
653 assert(b.length == 100);
654 assert((() pure nothrow @safe @nogc => alloc.primary.owns(b))() == Ternary.yes);
658 The code below defines a scalable allocator consisting of 1 MB (or larger)
659 blocks fetched from the garbage-collected heap. Each block is organized as a
660 KR-style heap. More blocks are allocated and freed on a need basis.
662 This is the closest example to the allocator introduced in the K$(AMP)R book.
663 It should perform slightly better because instead of searching through one
664 large free list, it searches through several shorter lists in LRU order. Also,
665 it actually returns memory to the operating system when possible.
667 @system unittest
669 import std.algorithm.comparison : max;
670 import std.experimental.allocator.building_blocks.allocator_list
671 : AllocatorList;
672 import std.experimental.allocator.mmap_allocator : MmapAllocator;
673 AllocatorList!(n => KRRegion!MmapAllocator(max(n * 16, 1024 * 1024))) alloc;
676 @system unittest
678 import std.algorithm.comparison : max;
679 import std.experimental.allocator.building_blocks.allocator_list
680 : AllocatorList;
681 import std.experimental.allocator.mallocator : Mallocator;
682 import std.typecons : Ternary;
684 Create a scalable allocator consisting of 1 MB (or larger) blocks fetched
685 from the garbage-collected heap. Each block is organized as a KR-style
686 heap. More blocks are allocated and freed on a need basis.
688 AllocatorList!(n => KRRegion!Mallocator(max(n * 16, 1024 * 1024)),
689 NullAllocator) alloc;
690 void[][50] array;
691 foreach (i; 0 .. array.length)
693 auto length = i * 10_000 + 1;
694 array[i] = alloc.allocate(length);
695 assert(array[i].ptr);
696 assert(array[i].length == length);
698 import std.random : randomShuffle;
699 randomShuffle(array[]);
700 foreach (i; 0 .. array.length)
702 assert(array[i].ptr);
703 assert((() pure nothrow @safe @nogc => alloc.owns(array[i]))() == Ternary.yes);
704 () nothrow @nogc { alloc.deallocate(array[i]); }();
708 @system unittest
710 import std.algorithm.comparison : max;
711 import std.experimental.allocator.building_blocks.allocator_list
712 : AllocatorList;
713 import std.experimental.allocator.mmap_allocator : MmapAllocator;
714 import std.typecons : Ternary;
716 Create a scalable allocator consisting of 1 MB (or larger) blocks fetched
717 from the garbage-collected heap. Each block is organized as a KR-style
718 heap. More blocks are allocated and freed on a need basis.
720 AllocatorList!((n) {
721 auto result = KRRegion!MmapAllocator(max(n * 2, 1024 * 1024));
722 return result;
723 }) alloc;
724 void[][99] array;
725 foreach (i; 0 .. array.length)
727 auto length = i * 10_000 + 1;
728 array[i] = alloc.allocate(length);
729 assert(array[i].ptr);
730 foreach (j; 0 .. i)
732 assert(array[i].ptr != array[j].ptr);
734 assert(array[i].length == length);
736 import std.random : randomShuffle;
737 randomShuffle(array[]);
738 foreach (i; 0 .. array.length)
740 assert((() pure nothrow @safe @nogc => alloc.owns(array[i]))() == Ternary.yes);
741 () nothrow @nogc { alloc.deallocate(array[i]); }();
745 version (StdUnittest)
746 @system unittest
748 import std.algorithm.comparison : max;
749 import std.experimental.allocator.building_blocks.allocator_list
750 : AllocatorList;
751 import std.experimental.allocator.common : testAllocator;
752 import std.experimental.allocator.gc_allocator : GCAllocator;
753 testAllocator!(() => AllocatorList!(
754 n => KRRegion!GCAllocator(max(n * 16, 1024 * 1024)))());
757 @system unittest
759 import std.experimental.allocator.gc_allocator : GCAllocator;
761 auto alloc = KRRegion!GCAllocator(1024 * 1024);
763 void[][] array;
764 foreach (i; 1 .. 4)
766 array ~= alloc.allocate(i);
767 assert(array[$ - 1].length == i);
769 () nothrow @nogc { alloc.deallocate(array[1]); }();
770 () nothrow @nogc { alloc.deallocate(array[0]); }();
771 () nothrow @nogc { alloc.deallocate(array[2]); }();
772 assert(alloc.allocateAll().length == 1024 * 1024);
775 @system unittest
777 import std.experimental.allocator.gc_allocator : GCAllocator;
778 import std.typecons : Ternary;
779 auto alloc = KRRegion!()(
780 cast(ubyte[])(GCAllocator.instance.allocate(1024 * 1024)));
781 const store = alloc.allocate(KRRegion!().sizeof);
782 auto p = cast(KRRegion!()* ) store.ptr;
783 import core.lifetime : emplace;
784 import core.stdc.string : memcpy;
785 import std.conv : text;
787 memcpy(p, &alloc, alloc.sizeof);
788 emplace(&alloc);
790 void[][100] array;
791 foreach (i; 0 .. array.length)
793 auto length = 100 * i + 1;
794 array[i] = p.allocate(length);
795 assert(array[i].length == length, text(array[i].length));
796 assert((() pure nothrow @safe @nogc => p.owns(array[i]))() == Ternary.yes);
798 import std.random : randomShuffle;
799 randomShuffle(array[]);
800 foreach (i; 0 .. array.length)
802 assert((() pure nothrow @safe @nogc => p.owns(array[i]))() == Ternary.yes);
803 () nothrow @nogc { p.deallocate(array[i]); }();
805 auto b = p.allocateAll();
806 assert(b.length == 1024 * 1024 - KRRegion!().sizeof, text(b.length));
809 @system unittest
811 import std.typecons : Ternary;
812 import std.experimental.allocator.gc_allocator : GCAllocator;
813 auto alloc = KRRegion!()(
814 cast(ubyte[])(GCAllocator.instance.allocate(1024 * 1024)));
815 auto p = alloc.allocateAll();
816 assert(p.length == 1024 * 1024);
817 assert((() nothrow @nogc => alloc.deallocateAll())());
818 assert(alloc.empty() == Ternary.yes);
819 p = alloc.allocateAll();
820 assert(p.length == 1024 * 1024);
823 @system unittest
825 import std.random : randomCover;
826 import std.typecons : Ternary;
828 // Both sequences must work on either system
830 // A sequence of allocs which generates the error described in https://issues.dlang.org/show_bug.cgi?id=16564
831 // that is a gap at the end of buf from the perspective of the allocator
833 // for 64 bit systems (leftover balance = 8 bytes < 16)
834 int[] sizes64 = [18904, 2008, 74904, 224, 111904, 1904, 52288, 8];
836 // for 32 bit systems (leftover balance < 8)
837 int[] sizes32 = [81412, 107068, 49892, 23768];
840 void test(int[] sizes)
842 align(size_t.sizeof) ubyte[256 * 1024] buf;
843 auto a = KRRegion!()(buf);
845 void[][] bufs;
847 foreach (size; sizes)
849 bufs ~= a.allocate(size);
852 foreach (b; bufs.randomCover)
854 () nothrow @nogc { a.deallocate(b); }();
857 assert((() pure nothrow @safe @nogc => a.empty)() == Ternary.yes);
860 test(sizes64);
861 test(sizes32);
864 @system unittest
866 import std.typecons : Ternary;
868 // For 64 bits, we allocate in multiples of 8, but the minimum alloc size is 16.
869 // This can create gaps.
870 // This test is an example of such a case. The gap is formed between the block
871 // allocated for the second value in sizes and the third. There is also a gap
872 // at the very end. (total lost 2 * word)
874 int[] sizes64 = [2008, 18904, 74904, 224, 111904, 1904, 52288, 8];
875 int[] sizes32 = [81412, 107068, 49892, 23768];
877 int word64 = 8;
878 int word32 = 4;
880 void test(int[] sizes, int word)
882 align(size_t.sizeof) ubyte[256 * 1024] buf;
883 auto a = KRRegion!()(buf);
885 void[][] bufs;
887 foreach (size; sizes)
889 bufs ~= a.allocate(size);
892 () nothrow @nogc { a.deallocate(bufs[1]); }();
893 bufs ~= a.allocate(sizes[1] - word);
895 () nothrow @nogc { a.deallocate(bufs[0]); }();
896 foreach (i; 2 .. bufs.length)
898 () nothrow @nogc { a.deallocate(bufs[i]); }();
901 assert((() pure nothrow @safe @nogc => a.empty)() == Ternary.yes);
904 test(sizes64, word64);
905 test(sizes32, word32);
908 @system unittest
910 import std.experimental.allocator.gc_allocator : GCAllocator;
912 auto a = KRRegion!GCAllocator(1024 * 1024);
913 assert((() pure nothrow @safe @nogc => a.goodAllocSize(1))() == typeof(*a.root).sizeof);
916 @system unittest
917 { import std.typecons : Ternary;
919 align(KRRegion!().alignment) ubyte[1024] b;
920 auto alloc = KRRegion!()(b);
922 auto k = alloc.allocate(128);
923 assert(k.length == 128);
924 assert(alloc.empty == Ternary.no);
925 assert(alloc.deallocate(k));
926 assert(alloc.empty == Ternary.yes);
928 k = alloc.allocate(512);
929 assert(k.length == 512);
930 assert(alloc.empty == Ternary.no);
931 assert(alloc.deallocate(k));
932 assert(alloc.empty == Ternary.yes);
934 k = alloc.allocate(1024);
935 assert(k.length == 1024);
936 assert(alloc.empty == Ternary.no);
937 assert(alloc.deallocate(k));
938 assert(alloc.empty == Ternary.yes);