/
kernighan_ritchie.d
943 lines (832 loc) · 29.8 KB
/
kernighan_ritchie.d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
// Written in the D programming language.
/**
Source: $(PHOBOSSRC std/experimental/allocator/building_blocks/kernighan_ritchie.d)
*/
module std.experimental.allocator.building_blocks.kernighan_ritchie;
import std.experimental.allocator.building_blocks.null_allocator;
//debug = KRRegion;
debug(KRRegion) import std.stdio;
// KRRegion
/**
`KRRegion` draws inspiration from the $(MREF_ALTTEXT region allocation
strategy, std,experimental,allocator,building_blocks,region) and also the
$(HTTP stackoverflow.com/questions/13159564/explain-this-implementation-of-malloc-from-the-kr-book,
famed allocator) described by Brian Kernighan and Dennis Ritchie in section 8.7
of the book $(HTTP amazon.com/exec/obidos/ASIN/0131103628/classicempire, "The C
Programming Language"), Second Edition, Prentice Hall, 1988.
$(H4 `KRRegion` = `Region` + Kernighan-Ritchie Allocator)
Initially, `KRRegion` starts in "region" mode: allocations are served from
the memory chunk in a region fashion. Thus, as long as there is enough memory
left, `KRRegion.allocate` has the performance profile of a region allocator.
Deallocation inserts (in $(BIGOH 1) time) the deallocated blocks in an
unstructured freelist, which is not read in region mode.
Once the region cannot serve an `allocate` request, `KRRegion` switches
to "free list" mode. It sorts the list of previously deallocated blocks by
address and serves allocation requests off that free list. The allocation and
deallocation follow the pattern described by Kernighan and Ritchie.
The recommended use of `KRRegion` is as a $(I region with deallocation). If the
`KRRegion` is dimensioned appropriately, it could often not enter free list
mode during its lifetime. Thus it is as fast as a simple region, whilst
offering deallocation at a small cost. When the region memory is exhausted,
the previously deallocated memory is still usable, at a performance cost. If
the region is not excessively large and fragmented, the linear allocation and
deallocation cost may still be compensated for by the good locality
characteristics.
If the chunk of memory managed is large, it may be desirable to switch
management to free list from the beginning. That way, memory may be used in a
more compact manner than region mode. To force free list mode, call $(D
switchToFreeList) shortly after construction or when deemed appropriate.
The smallest size that can be allocated is two words (16 bytes on 64-bit
systems, 8 bytes on 32-bit systems). This is because the free list management
needs two words (one for the length, the other for the next pointer in the
singly-linked list).
The `ParentAllocator` type parameter is the type of the allocator used to
allocate the memory chunk underlying the `KRRegion` object. Choosing the
default (`NullAllocator`) means the user is responsible for passing a buffer
at construction (and for deallocating it if necessary). Otherwise, `KRRegion`
automatically deallocates the buffer during destruction. For that reason, if
`ParentAllocator` is not `NullAllocator`, then `KRRegion` is not
copyable.
$(H4 Implementation Details)
In free list mode, `KRRegion` embeds a free blocks list onto the chunk of
memory. The free list is circular, coalesced, and sorted by address at all
times. Allocations and deallocations take time proportional to the number of
previously deallocated blocks. (In practice the cost may be lower, e.g. if
memory is deallocated in reverse order of allocation, all operations take
constant time.) Memory utilization is good (small control structure and no
per-allocation overhead). The disadvantages of freelist mode include proneness
to fragmentation, a minimum allocation size of two words, and linear worst-case
allocation and deallocation times.
Similarities of `KRRegion` (in free list mode) with the
Kernighan-Ritchie allocator:
$(UL
$(LI Free blocks have variable size and are linked in a singly-linked list.)
$(LI The freelist is maintained in increasing address order, which makes
coalescing easy.)
$(LI The strategy for finding the next available block is first fit.)
$(LI The free list is circular, with the last node pointing back to the first.)
$(LI Coalescing is carried during deallocation.)
)
Differences from the Kernighan-Ritchie allocator:
$(UL
$(LI Once the chunk is exhausted, the Kernighan-Ritchie allocator allocates
another chunk using operating system primitives. For better composability, $(D
KRRegion) just gets full (returns `null` on new allocation requests). The
decision to allocate more blocks is deferred to a higher-level entity. For an
example, see the example below using `AllocatorList` in conjunction with $(D
KRRegion).)
$(LI Allocated blocks do not hold a size prefix. This is because in D the size
information is available in client code at deallocation time.)
)
*/
struct KRRegion(ParentAllocator = NullAllocator)
{
import std.experimental.allocator.common : stateSize, alignedAt;
import std.traits : hasMember;
import std.typecons : Ternary;
private static struct Node
{
import std.typecons : tuple, Tuple;
Node* next;
size_t size;
this(this) @disable;
void[] payload() inout
{
return (cast(ubyte*) &this)[0 .. size];
}
bool adjacent(in Node* right) const
{
assert(right);
auto p = payload;
return p.ptr < right && right < p.ptr + p.length + Node.sizeof;
}
bool coalesce(void* memoryEnd = null)
{
// Coalesce the last node before the memory end with any possible gap
if (memoryEnd
&& memoryEnd < payload.ptr + payload.length + Node.sizeof)
{
size += memoryEnd - (payload.ptr + payload.length);
return true;
}
if (!adjacent(next)) return false;
size = (cast(ubyte*) next + next.size) - cast(ubyte*) &this;
next = next.next;
return true;
}
Tuple!(void[], Node*) allocateHere(size_t bytes)
{
assert(bytes >= Node.sizeof);
assert(bytes % Node.alignof == 0);
assert(next);
assert(!adjacent(next));
if (size < bytes) return typeof(return)();
assert(size >= bytes);
immutable leftover = size - bytes;
if (leftover >= Node.sizeof)
{
// There's room for another node
auto newNode = cast(Node*) ((cast(ubyte*) &this) + bytes);
newNode.size = leftover;
newNode.next = next == &this ? newNode : next;
assert(next);
return tuple(payload, newNode);
}
// No slack space, just return next node
return tuple(payload, next == &this ? null : next);
}
}
// state
/**
If `ParentAllocator` holds state, `parent` is a public member of type
`KRRegion`. Otherwise, `parent` is an `alias` for
`ParentAllocator.instance`.
*/
static if (stateSize!ParentAllocator) ParentAllocator parent;
else alias parent = ParentAllocator.instance;
private void[] payload;
private Node* root;
private bool regionMode() const { return bytesUsedRegionMode != size_t.max; }
private void cancelRegionMode() { bytesUsedRegionMode = size_t.max; }
private size_t bytesUsedRegionMode = 0;
auto byNodePtr()
{
static struct Range
{
Node* start, current;
@property bool empty() { return !current; }
@property Node* front() { return current; }
void popFront()
{
assert(current && current.next);
current = current.next;
if (current == start) current = null;
}
@property Range save() { return this; }
}
import std.range : isForwardRange;
static assert(isForwardRange!Range);
return Range(root, root);
}
string toString()
{
import std.format : format;
string s = "KRRegion@";
s ~= format("%s-%s(0x%s[%s] %s", &this, &this + 1,
payload.ptr, payload.length,
regionMode ? "(region)" : "(freelist)");
Node* lastNode = null;
if (!regionMode)
{
foreach (node; byNodePtr)
{
s ~= format(", %sfree(0x%s[%s])",
lastNode && lastNode.adjacent(node) ? "+" : "",
cast(void*) node, node.size);
lastNode = node;
}
}
else
{
for (auto node = root; node; node = node.next)
{
s ~= format(", %sfree(0x%s[%s])",
lastNode && lastNode.adjacent(node) ? "+" : "",
cast(void*) node, node.size);
lastNode = node;
}
}
s ~= ')';
return s;
}
private void assertValid(string s)
{
assert(!regionMode);
if (!payload.ptr)
{
assert(!root, s);
return;
}
if (!root)
{
return;
}
assert(root >= payload.ptr, s);
assert(root < payload.ptr + payload.length, s);
// Check that the list terminates
size_t n;
foreach (node; byNodePtr)
{
assert(node.next);
assert(!node.adjacent(node.next));
assert(n++ < payload.length / Node.sizeof, s);
}
}
private Node* sortFreelist(Node* root)
{
// Find a monotonic run
auto last = root;
for (;;)
{
if (!last.next) return root;
if (last > last.next) break;
assert(last < last.next);
last = last.next;
}
auto tail = last.next;
last.next = null;
tail = sortFreelist(tail);
return merge(root, tail);
}
private Node* merge(Node* left, Node* right)
{
assert(left != right);
if (!left) return right;
if (!right) return left;
if (left < right)
{
auto result = left;
result.next = merge(left.next, right);
return result;
}
auto result = right;
result.next = merge(left, right.next);
return result;
}
private void coalesceAndMakeCircular()
{
for (auto n = root;;)
{
assert(!n.next || n < n.next);
if (!n.next)
{
// Convert to circular
n.next = root;
break;
}
if (n.coalesce) continue; // possibly another coalesce
n = n.next;
}
}
/**
Create a `KRRegion`. If `ParentAllocator` is not `NullAllocator`,
`KRRegion`'s destructor will call `parent.deallocate`.
Params:
b = Block of memory to serve as support for the allocator. Memory must be
larger than two words and word-aligned.
n = Capacity desired. This constructor is defined only if $(D
ParentAllocator) is not `NullAllocator`.
*/
this(ubyte[] b)
{
if (b.length < Node.sizeof)
{
// Init as empty
assert(root is null);
assert(payload is null);
return;
}
assert(b.length >= Node.sizeof);
assert(b.ptr.alignedAt(Node.alignof));
assert(b.length >= 2 * Node.sizeof);
payload = b;
root = cast(Node*) b.ptr;
// Initialize the free list with all list
assert(regionMode);
root.next = null;
root.size = b.length;
debug(KRRegion) writefln("KRRegion@%s: init with %s[%s]", &this,
b.ptr, b.length);
}
/// Ditto
static if (!is(ParentAllocator == NullAllocator) && !stateSize!ParentAllocator)
this(size_t n)
{
assert(n > Node.sizeof);
this(cast(ubyte[])(parent.allocate(n)));
}
/// Ditto
static if (!is(ParentAllocator == NullAllocator) && stateSize!ParentAllocator)
this(ParentAllocator parent, size_t n)
{
assert(n > Node.sizeof);
this.parent = parent;
this(cast(ubyte[])(parent.allocate(n)));
}
/// Ditto
static if (!is(ParentAllocator == NullAllocator)
&& hasMember!(ParentAllocator, "deallocate"))
~this()
{
parent.deallocate(payload);
}
/**
Forces free list mode. If already in free list mode, does nothing.
Otherwise, sorts the free list accumulated so far and switches strategy for
future allocations to KR style.
*/
void switchToFreeList()
{
if (!regionMode) return;
cancelRegionMode;
if (!root) return;
root = sortFreelist(root);
coalesceAndMakeCircular;
}
/*
Noncopyable
*/
@disable this(this);
/**
Word-level alignment.
*/
enum alignment = Node.alignof;
/**
Allocates `n` bytes. Allocation searches the list of available blocks
until a free block with `n` or more bytes is found (first fit strategy).
The block is split (if larger) and returned.
Params: n = number of bytes to _allocate
Returns: A word-aligned buffer of `n` bytes, or `null`.
*/
void[] allocate(size_t n)
{
if (!n || !root) return null;
const actualBytes = goodAllocSize(n);
// Try the region first
if (regionMode)
{
// Only look at the head of the freelist
if (root.size >= actualBytes)
{
// Enough room for allocation
bytesUsedRegionMode += actualBytes;
void* result = root;
immutable balance = root.size - actualBytes;
if (balance >= Node.sizeof)
{
auto newRoot = cast(Node*) (result + actualBytes);
newRoot.next = root.next;
newRoot.size = balance;
root = newRoot;
}
else
{
root = null;
switchToFreeList;
}
return result[0 .. n];
}
// Not enough memory, switch to freelist mode and fall through
switchToFreeList;
}
// Try to allocate from next after the iterating node
for (auto pnode = root;;)
{
assert(!pnode.adjacent(pnode.next));
auto k = pnode.next.allocateHere(actualBytes);
if (k[0] !is null)
{
// awes
assert(k[0].length >= n);
if (root == pnode.next) root = k[1];
pnode.next = k[1];
return k[0][0 .. n];
}
pnode = pnode.next;
if (pnode == root) break;
}
return null;
}
/**
Deallocates `b`, which is assumed to have been previously allocated with
this allocator. Deallocation performs a linear search in the free list to
preserve its sorting order. It follows that blocks with higher addresses in
allocators with many free blocks are slower to deallocate.
Params: b = block to be deallocated
*/
nothrow @nogc
bool deallocate(void[] b)
{
debug(KRRegion) writefln("KRRegion@%s: deallocate(%s[%s])", &this,
b.ptr, b.length);
if (!b.ptr) return true;
assert(owns(b) == Ternary.yes);
assert(b.ptr.alignedAt(Node.alignof));
// Insert back in the freelist, keeping it sorted by address. Do not
// coalesce at this time. Instead, do it lazily during allocation.
auto n = cast(Node*) b.ptr;
n.size = goodAllocSize(b.length);
auto memoryEnd = payload.ptr + payload.length;
if (regionMode)
{
assert(root);
// Insert right after root
bytesUsedRegionMode -= n.size;
n.next = root.next;
root.next = n;
return true;
}
if (!root)
{
// What a sight for sore eyes
root = n;
root.next = root;
// If the first block freed is the last one allocated,
// maybe there's a gap after it.
root.coalesce(memoryEnd);
return true;
}
version (assert) foreach (test; byNodePtr)
{
assert(test != n);
}
// Linear search
auto pnode = root;
do
{
assert(pnode && pnode.next);
assert(pnode != n);
assert(pnode.next != n);
if (pnode < pnode.next)
{
if (pnode > n || n > pnode.next) continue;
// Insert in between pnode and pnode.next
n.next = pnode.next;
pnode.next = n;
n.coalesce;
pnode.coalesce;
root = pnode;
return true;
}
else if (pnode < n)
{
// Insert at the end of the list
// Add any possible gap at the end of n to the length of n
n.next = pnode.next;
pnode.next = n;
n.coalesce(memoryEnd);
pnode.coalesce;
root = pnode;
return true;
}
else if (n < pnode.next)
{
// Insert at the front of the list
n.next = pnode.next;
pnode.next = n;
n.coalesce;
root = n;
return true;
}
}
while ((pnode = pnode.next) != root);
assert(0, "Wrong parameter passed to deallocate");
}
/**
Allocates all memory available to this allocator. If the allocator is empty,
returns the entire available block of memory. Otherwise, it still performs
a best-effort allocation: if there is no fragmentation (e.g. `allocate`
has been used but not `deallocate`), allocates and returns the only
available block of memory.
The operation takes time proportional to the number of adjacent free blocks
at the front of the free list. These blocks get coalesced, whether
`allocateAll` succeeds or fails due to fragmentation.
*/
void[] allocateAll()
{
if (regionMode) switchToFreeList;
if (root && root.next == root)
return allocate(root.size);
return null;
}
///
@system unittest
{
import std.experimental.allocator.gc_allocator : GCAllocator;
auto alloc = KRRegion!GCAllocator(1024 * 64);
const b1 = alloc.allocate(2048);
assert(b1.length == 2048);
const b2 = alloc.allocateAll;
assert(b2.length == 1024 * 62);
}
/**
Deallocates all memory currently allocated, making the allocator ready for
other allocations. This is a $(BIGOH 1) operation.
*/
pure nothrow @nogc
bool deallocateAll()
{
debug(KRRegion) assertValid("deallocateAll");
debug(KRRegion) scope(exit) assertValid("deallocateAll");
root = cast(Node*) payload.ptr;
// Reset to regionMode
bytesUsedRegionMode = 0;
if (root)
{
root.next = null;
root.size = payload.length;
}
return true;
}
/**
Checks whether the allocator is responsible for the allocation of `b`.
It does a simple $(BIGOH 1) range check. `b` should be a buffer either
allocated with `this` or obtained through other means.
*/
pure nothrow @trusted @nogc
Ternary owns(void[] b)
{
debug(KRRegion) assertValid("owns");
debug(KRRegion) scope(exit) assertValid("owns");
return Ternary(b && payload && (&b[0] >= &payload[0])
&& (&b[0] < &payload[0] + payload.length));
}
/**
Adjusts `n` to a size suitable for allocation (two words or larger,
word-aligned).
*/
pure nothrow @safe @nogc
static size_t goodAllocSize(size_t n)
{
import std.experimental.allocator.common : roundUpToMultipleOf;
return n <= Node.sizeof
? Node.sizeof : n.roundUpToMultipleOf(alignment);
}
/**
Returns: `Ternary.yes` if the allocator is empty, `Ternary.no` otherwise.
Never returns `Ternary.unknown`.
*/
pure nothrow @safe @nogc
Ternary empty()
{
if (regionMode)
return Ternary(bytesUsedRegionMode == 0);
return Ternary(root && root.size == payload.length);
}
}
/**
`KRRegion` is preferable to `Region` as a front for a general-purpose
allocator if `deallocate` is needed, yet the actual deallocation traffic is
relatively low. The example below shows a `KRRegion` using stack storage
fronting the GC allocator.
*/
@system unittest
{
import std.experimental.allocator.building_blocks.fallback_allocator
: fallbackAllocator;
import std.experimental.allocator.gc_allocator : GCAllocator;
import std.typecons : Ternary;
// KRRegion fronting a general-purpose allocator
ubyte[1024 * 128] buf;
auto alloc = fallbackAllocator(KRRegion!()(buf), GCAllocator.instance);
auto b = alloc.allocate(100);
assert(b.length == 100);
assert((() pure nothrow @safe @nogc => alloc.primary.owns(b))() == Ternary.yes);
}
/**
The code below defines a scalable allocator consisting of 1 MB (or larger)
blocks fetched from the garbage-collected heap. Each block is organized as a
KR-style heap. More blocks are allocated and freed on a need basis.
This is the closest example to the allocator introduced in the K$(AMP)R book.
It should perform slightly better because instead of searching through one
large free list, it searches through several shorter lists in LRU order. Also,
it actually returns memory to the operating system when possible.
*/
@system unittest
{
import std.algorithm.comparison : max;
import std.experimental.allocator.building_blocks.allocator_list
: AllocatorList;
import std.experimental.allocator.gc_allocator : GCAllocator;
import std.experimental.allocator.mmap_allocator : MmapAllocator;
AllocatorList!(n => KRRegion!MmapAllocator(max(n * 16, 1024 * 1024))) alloc;
}
@system unittest
{
import std.algorithm.comparison : max;
import std.experimental.allocator.building_blocks.allocator_list
: AllocatorList;
import std.experimental.allocator.gc_allocator : GCAllocator;
import std.experimental.allocator.mallocator : Mallocator;
import std.typecons : Ternary;
/*
Create a scalable allocator consisting of 1 MB (or larger) blocks fetched
from the garbage-collected heap. Each block is organized as a KR-style
heap. More blocks are allocated and freed on a need basis.
*/
AllocatorList!(n => KRRegion!Mallocator(max(n * 16, 1024 * 1024)),
NullAllocator) alloc;
void[][50] array;
foreach (i; 0 .. array.length)
{
auto length = i * 10_000 + 1;
array[i] = alloc.allocate(length);
assert(array[i].ptr);
assert(array[i].length == length);
}
import std.random : randomShuffle;
randomShuffle(array[]);
foreach (i; 0 .. array.length)
{
assert(array[i].ptr);
assert((() pure nothrow @safe @nogc => alloc.owns(array[i]))() == Ternary.yes);
() nothrow @nogc { alloc.deallocate(array[i]); }();
}
}
@system unittest
{
import std.algorithm.comparison : max;
import std.experimental.allocator.building_blocks.allocator_list
: AllocatorList;
import std.experimental.allocator.gc_allocator : GCAllocator;
import std.experimental.allocator.mmap_allocator : MmapAllocator;
import std.typecons : Ternary;
/*
Create a scalable allocator consisting of 1 MB (or larger) blocks fetched
from the garbage-collected heap. Each block is organized as a KR-style
heap. More blocks are allocated and freed on a need basis.
*/
AllocatorList!((n) {
auto result = KRRegion!MmapAllocator(max(n * 2, 1024 * 1024));
return result;
}) alloc;
void[][99] array;
foreach (i; 0 .. array.length)
{
auto length = i * 10_000 + 1;
array[i] = alloc.allocate(length);
assert(array[i].ptr);
foreach (j; 0 .. i)
{
assert(array[i].ptr != array[j].ptr);
}
assert(array[i].length == length);
}
import std.random : randomShuffle;
randomShuffle(array[]);
foreach (i; 0 .. array.length)
{
assert((() pure nothrow @safe @nogc => alloc.owns(array[i]))() == Ternary.yes);
() nothrow @nogc { alloc.deallocate(array[i]); }();
}
}
@system unittest
{
import std.algorithm.comparison : max;
import std.experimental.allocator.building_blocks.allocator_list
: AllocatorList;
import std.experimental.allocator.common : testAllocator;
import std.experimental.allocator.gc_allocator : GCAllocator;
testAllocator!(() => AllocatorList!(
n => KRRegion!GCAllocator(max(n * 16, 1024 * 1024)))());
}
@system unittest
{
import std.experimental.allocator.gc_allocator : GCAllocator;
auto alloc = KRRegion!GCAllocator(1024 * 1024);
void[][] array;
foreach (i; 1 .. 4)
{
array ~= alloc.allocate(i);
assert(array[$ - 1].length == i);
}
() nothrow @nogc { alloc.deallocate(array[1]); }();
() nothrow @nogc { alloc.deallocate(array[0]); }();
() nothrow @nogc { alloc.deallocate(array[2]); }();
assert(alloc.allocateAll().length == 1024 * 1024);
}
@system unittest
{
import std.experimental.allocator.gc_allocator : GCAllocator;
import std.typecons : Ternary;
auto alloc = KRRegion!()(
cast(ubyte[])(GCAllocator.instance.allocate(1024 * 1024)));
const store = alloc.allocate(KRRegion!().sizeof);
auto p = cast(KRRegion!()* ) store.ptr;
import core.stdc.string : memcpy;
import std.algorithm.mutation : move;
import std.conv : text, emplace;
memcpy(p, &alloc, alloc.sizeof);
emplace(&alloc);
void[][100] array;
foreach (i; 0 .. array.length)
{
auto length = 100 * i + 1;
array[i] = p.allocate(length);
assert(array[i].length == length, text(array[i].length));
assert((() pure nothrow @safe @nogc => p.owns(array[i]))() == Ternary.yes);
}
import std.random : randomShuffle;
randomShuffle(array[]);
foreach (i; 0 .. array.length)
{
assert((() pure nothrow @safe @nogc => p.owns(array[i]))() == Ternary.yes);
() nothrow @nogc { p.deallocate(array[i]); }();
}
auto b = p.allocateAll();
assert(b.length == 1024 * 1024 - KRRegion!().sizeof, text(b.length));
}
@system unittest
{
import std.typecons : Ternary;
import std.experimental.allocator.gc_allocator : GCAllocator;
auto alloc = KRRegion!()(
cast(ubyte[])(GCAllocator.instance.allocate(1024 * 1024)));
auto p = alloc.allocateAll();
assert(p.length == 1024 * 1024);
assert((() nothrow @nogc => alloc.deallocateAll())());
assert(alloc.empty() == Ternary.yes);
p = alloc.allocateAll();
assert(p.length == 1024 * 1024);
}
@system unittest
{
import std.experimental.allocator.building_blocks;
import std.random;
import std.typecons : Ternary;
// Both sequences must work on either system
// A sequence of allocs which generates the error described in issue 16564
// that is a gap at the end of buf from the perspective of the allocator
// for 64 bit systems (leftover balance = 8 bytes < 16)
int[] sizes64 = [18904, 2008, 74904, 224, 111904, 1904, 52288, 8];
// for 32 bit systems (leftover balance < 8)
int[] sizes32 = [81412, 107068, 49892, 23768];
void test(int[] sizes)
{
align(size_t.sizeof) ubyte[256 * 1024] buf;
auto a = KRRegion!()(buf);
void[][] bufs;
foreach (size; sizes)
{
bufs ~= a.allocate(size);
}
foreach (b; bufs.randomCover)
{
() nothrow @nogc { a.deallocate(b); }();
}
assert((() pure nothrow @safe @nogc => a.empty)() == Ternary.yes);
}
test(sizes64);
test(sizes32);
}
@system unittest
{
import std.experimental.allocator.building_blocks;
import std.random;
import std.typecons : Ternary;
// For 64 bits, we allocate in multiples of 8, but the minimum alloc size is 16.
// This can create gaps.
// This test is an example of such a case. The gap is formed between the block
// allocated for the second value in sizes and the third. There is also a gap
// at the very end. (total lost 2 * word)
int[] sizes64 = [2008, 18904, 74904, 224, 111904, 1904, 52288, 8];
int[] sizes32 = [81412, 107068, 49892, 23768];
int word64 = 8;
int word32 = 4;
void test(int[] sizes, int word)
{
align(size_t.sizeof) ubyte[256 * 1024] buf;
auto a = KRRegion!()(buf);
void[][] bufs;
foreach (size; sizes)
{
bufs ~= a.allocate(size);
}
() nothrow @nogc { a.deallocate(bufs[1]); }();
bufs ~= a.allocate(sizes[1] - word);
() nothrow @nogc { a.deallocate(bufs[0]); }();
foreach (i; 2 .. bufs.length)
{
() nothrow @nogc { a.deallocate(bufs[i]); }();
}
assert((() pure nothrow @safe @nogc => a.empty)() == Ternary.yes);
}
test(sizes64, word64);
test(sizes32, word32);
}
@system unittest
{
import std.experimental.allocator.gc_allocator : GCAllocator;
auto a = KRRegion!GCAllocator(1024 * 1024);
assert((() pure nothrow @safe @nogc => a.goodAllocSize(1))() == typeof(*a.root).sizeof);
}
@system unittest
{ import std.typecons : Ternary;
ubyte[1024] b;
auto alloc = KRRegion!()(b);
auto k = alloc.allocate(128);
assert(k.length == 128);
assert(alloc.empty == Ternary.no);
assert(alloc.deallocate(k));
assert(alloc.empty == Ternary.yes);
k = alloc.allocate(512);
assert(k.length == 512);
assert(alloc.empty == Ternary.no);
assert(alloc.deallocate(k));
assert(alloc.empty == Ternary.yes);
k = alloc.allocate(1024);
assert(k.length == 1024);
assert(alloc.empty == Ternary.no);
assert(alloc.deallocate(k));
assert(alloc.empty == Ternary.yes);
}