@@ -11,13 +11,14 @@ const hash = @import("../utility/hash.zig");
11
11
const debug = @import ("../debug.zig" );
12
12
const Value = @import ("./value.zig" ).Value ;
13
13
const Object = @import ("./Object.zig" );
14
- const ByteVector = @import ("./ByteArray.zig" );
14
+ const ByteArray = @import ("./ByteArray.zig" );
15
15
const Activation = @import ("./Activation.zig" );
16
16
const HandleArea = @import ("./HandleArea.zig" );
17
17
const VirtualMachine = @import ("./VirtualMachine.zig" );
18
18
const ActivationStack = Activation .ActivationStack ;
19
19
20
20
const GC_DEBUG = debug .GC_DEBUG ;
21
+ const GC_TOKEN_DEBUG = debug .GC_TOKEN_DEBUG ;
21
22
const GC_TRACK_SOURCE_DEBUG = debug .GC_TRACK_SOURCE_DEBUG ;
22
23
const REMEMBERED_SET_DEBUG = debug .REMEMBERED_SET_DEBUG ;
23
24
@@ -58,6 +59,27 @@ const EdenSize = 1 * 1024 * 1024;
58
59
const NewSpaceSize = 4 * 1024 * 1024 ;
59
60
const InitialOldSpaceSize = 16 * 1024 * 1024 ;
60
61
62
+ const Segment = enum { Object , ByteArray };
63
+ pub const AllocationToken = struct {
64
+ heap : * Self ,
65
+ total_bytes : usize ,
66
+ bytes_left : usize ,
67
+
68
+ pub fn allocate (self : * @This (), segment : Segment , bytes : usize ) [* ]u64 {
69
+ if (self .bytes_left < bytes ) {
70
+ std .debug .panic (
71
+ "!!! Attempted to allocate {} bytes from {} byte-sized allocation token with {} bytes remaining!" ,
72
+ .{ bytes , self .total_bytes , self .bytes_left },
73
+ );
74
+ }
75
+
76
+ self .bytes_left -= bytes ;
77
+ // NOTE: The only error this can raise is allocation failure during lazy allocation
78
+ // which eden does not do.
79
+ return self .heap .eden .allocateInSegment (self .heap .allocator , segment , bytes ) catch unreachable ;
80
+ }
81
+ };
82
+
61
83
pub fn create (allocator : Allocator , vm : * VirtualMachine ) ! * Self {
62
84
const self = try allocator .create (Self );
63
85
errdefer allocator .destroy (self );
@@ -115,15 +137,14 @@ fn deinit(self: *Self) void {
115
137
}
116
138
}
117
139
118
- // Attempts to allocate `size` bytes in the object segment of the eden. If
119
- // necessary, garbage collection is performed in the process.
120
- // The given size must be a multiple of `@sizeOf(u64)`.
121
- pub fn allocateInObjectSegment ( self : * Self , size : usize ) ! [ * ] u64 {
122
- return try self . eden . allocateInObjectSegment ( self . allocator , size );
123
- }
140
+ pub fn getAllocation ( self : * Self , bytes : usize ) ! AllocationToken {
141
+ if ( GC_TOKEN_DEBUG ) std . debug . print ( "Heap.getAllocation: Attempting to get a token of size {} \n " , .{ bytes });
142
+ try self . eden . collectGarbage ( self . allocator , bytes );
143
+
144
+ if ( bytes % @sizeOf ( u64 ) != 0 )
145
+ std . debug . panic ( "!!! Attempted to allocate {} bytes which is not a multiple of @sizeOf(u64)!" , .{ bytes });
124
146
125
- pub fn allocateInByteVectorSegment (self : * Self , size : usize ) ! [* ]u64 {
126
- return try self .eden .allocateInByteVectorSegment (self .allocator , size );
147
+ return AllocationToken { .heap = self , .total_bytes = bytes , .bytes_left = bytes };
127
148
}
128
149
129
150
/// Mark the given address within the heap as an object which needs to know when
@@ -174,13 +195,6 @@ pub fn untrack(self: *Self, tracked: Tracked) void {
174
195
}
175
196
}
176
197
177
- /// Ensures that the given amount of bytes are immediately available in eden, so
178
- /// garbage collection won't happen. Performs a pre-emptive garbage collection
179
- /// if there isn't enough space.
180
- pub fn ensureSpaceInEden (self : * Self , required_memory : usize ) ! void {
181
- try self .eden .collectGarbage (self .allocator , required_memory );
182
- }
183
-
184
198
/// Go through the whole heap, updating references to the given value with the
185
199
/// new value.
186
200
pub fn updateAllReferencesTo (self : * Self , old_value : Value , new_value : Value ) ! void {
@@ -434,15 +448,15 @@ const Space = struct {
434
448
return new_address ;
435
449
}
436
450
437
- /// Same as copyObjectTo, but for byte vectors .
438
- fn copyByteVectorTo (allocator : Allocator , address : [* ]u64 , target_space : * Space ) [* ]u64 {
439
- const byte_array = ByteVector .fromAddress (address );
451
+ /// Same as copyObjectTo, but for byte arrays .
452
+ fn copyByteArrayTo (allocator : Allocator , address : [* ]u64 , target_space : * Space ) [* ]u64 {
453
+ const byte_array = ByteArray .fromAddress (address );
440
454
const byte_array_size = byte_array .getSizeInMemory ();
441
455
std .debug .assert (byte_array_size % @sizeOf (u64 ) == 0 );
442
456
443
457
const byte_array_size_in_words = byte_array_size / @sizeOf (u64 );
444
458
// We must have enough space at this point.
445
- const new_address = target_space .allocateInByteVectorSegment (allocator , byte_array_size ) catch unreachable ;
459
+ const new_address = target_space .allocateInByteArraySegment (allocator , byte_array_size ) catch unreachable ;
446
460
std .mem .copy (u64 , new_address [0.. byte_array_size_in_words ], address [0.. byte_array_size_in_words ]);
447
461
448
462
return new_address ;
@@ -454,7 +468,7 @@ const Space = struct {
454
468
if (self .objectSegmentContains (address )) {
455
469
return self .copyObjectTo (allocator , address , target_space );
456
470
} else if (self .byteArraySegmentContains (address )) {
457
- return copyByteVectorTo (allocator , address , target_space );
471
+ return copyByteArrayTo (allocator , address , target_space );
458
472
} else if (require_copy ) {
459
473
std .debug .panic ("!!! copyAddress called with an address that's not allocated in this space!" , .{});
460
474
}
@@ -568,8 +582,8 @@ const Space = struct {
568
582
.{ target_object_segment_index , target_space .object_segment .len },
569
583
);
570
584
571
- // Try to catch up to the target space's object and byte vector cursors,
572
- // copying any other objects/byte vectors that still exist in this
585
+ // Try to catch up to the target space's object and byte array cursors,
586
+ // copying any other objects/byte arrays that still exist in this
573
587
// space.
574
588
while (target_object_segment_index < target_space .object_segment .len ) : (target_object_segment_index += 1 ) {
575
589
const word_ptr = & target_space .object_segment [target_object_segment_index ];
@@ -581,7 +595,7 @@ const Space = struct {
581
595
if (self .objectSegmentContains (address )) {
582
596
word_ptr .* = Value .fromObjectAddress (try self .copyObjectTo (allocator , address , target_space )).data ;
583
597
} else if (self .byteArraySegmentContains (address )) {
584
- word_ptr .* = Value .fromObjectAddress (copyByteVectorTo (allocator , address , target_space )).data ;
598
+ word_ptr .* = Value .fromObjectAddress (copyByteArrayTo (allocator , address , target_space )).data ;
585
599
}
586
600
}
587
601
}
@@ -753,13 +767,11 @@ const Space = struct {
753
767
}
754
768
755
769
/// Allocates the requested amount in bytes in the object segment of this
756
- /// space, garbage collecting if there is not enough space .
757
- pub fn allocateInObjectSegment (self : * Space , allocator : Allocator , size : usize ) ! [* ]u64 {
770
+ /// space. Panics if there isn't enough memory .
771
+ fn allocateInObjectSegment (self : * Space , allocator : Allocator , size : usize ) ! [* ]u64 {
758
772
if (self .lazy_allocate )
759
773
try self .allocateMemory (allocator );
760
774
761
- if (self .freeMemory () < size ) try self .collectGarbage (allocator , size );
762
-
763
775
const size_in_words = @divExact (size , @sizeOf (u64 ));
764
776
const current_object_segment_offset = self .object_segment .len ;
765
777
self .object_segment .len += size_in_words ;
@@ -771,14 +783,12 @@ const Space = struct {
771
783
return start_of_object ;
772
784
}
773
785
774
- /// Allocates the requested amount in bytes in the byte vector segment of
775
- /// this space, garbage collecting if there is not enough space .
776
- pub fn allocateInByteVectorSegment (self : * Space , allocator : Allocator , size : usize ) ! [* ]u64 {
786
+ /// Allocates the requested amount in bytes in the byte array segment of
787
+ /// this space. Panics if there isn't enough memory .
788
+ fn allocateInByteArraySegment (self : * Space , allocator : Allocator , size : usize ) ! [* ]u64 {
777
789
if (self .lazy_allocate )
778
790
try self .allocateMemory (allocator );
779
791
780
- if (self .freeMemory () < size ) try self .collectGarbage (allocator , size );
781
-
782
792
const size_in_words = @divExact (size , @sizeOf (u64 ));
783
793
self .byte_array_segment .ptr -= size_in_words ;
784
794
self .byte_array_segment .len += size_in_words ;
@@ -789,6 +799,15 @@ const Space = struct {
789
799
return self .byte_array_segment .ptr ;
790
800
}
791
801
802
+ pub fn allocateInSegment (self : * Space , allocator : Allocator , segment : Segment , size : usize ) Allocator.Error ! [* ]u64 {
803
+ return switch (segment ) {
804
+ .Object = > self .allocateInObjectSegment (allocator , size ),
805
+ .ByteArray = > self .allocateInByteArraySegment (allocator , size ),
806
+ };
807
+ }
808
+
809
+ /// Allocates the requested amount of bytes in the appropriate segment of
810
+ /// this space.
792
811
/// Adds the given address to the finalization set of this space.
793
812
pub fn addToFinalizationSet (self : * Space , allocator : Allocator , address : [* ]u64 ) ! void {
794
813
try self .finalization_set .put (allocator , address , {});
@@ -1019,14 +1038,14 @@ test "link an object to another and perform scavenge" {
1019
1038
1020
1039
// The object being referenced
1021
1040
var referenced_object_map = try Object .Map .Slots .create (heap , 1 );
1022
- var actual_name = try ByteVector .createFromString (heap , "actual" );
1041
+ var actual_name = try ByteArray .createFromString (heap , "actual" );
1023
1042
referenced_object_map .getSlots ()[0 ].initConstant (actual_name , .NotParent , Value .fromUnsignedInteger (0xDEADBEEF ));
1024
1043
var referenced_object = try Object .Slots .create (heap , referenced_object_map , &[_ ]Value {});
1025
1044
1026
1045
// The "activation object", which is how we get a reference to the object in
1027
1046
// the from space after the tenure is done
1028
1047
var activation_object_map = try Object .Map .Slots .create (heap , 1 );
1029
- var reference_name = try ByteVector .createFromString (heap , "reference" );
1048
+ var reference_name = try ByteArray .createFromString (heap , "reference" );
1030
1049
activation_object_map .getSlots ()[0 ].initMutable (Object .Map .Slots , activation_object_map , reference_name , .NotParent );
1031
1050
var activation_object = try Object .Slots .create (heap , activation_object_map , &[_ ]Value {referenced_object .asValue ()});
1032
1051
@@ -1042,15 +1061,15 @@ test "link an object to another and perform scavenge" {
1042
1061
var new_activation_object_map = new_activation_object .getMap ();
1043
1062
try std .testing .expect (activation_object_map != new_activation_object_map );
1044
1063
try std .testing .expect (activation_object_map .getSlots ()[0 ].name .asObjectAddress () != new_activation_object_map .getSlots ()[0 ].name .asObjectAddress ());
1045
- try std .testing .expectEqualStrings ("reference" , new_activation_object_map .getSlots ()[0 ].name .asByteVector ().getValues ());
1064
+ try std .testing .expectEqualStrings ("reference" , new_activation_object_map .getSlots ()[0 ].name .asByteArray ().getValues ());
1046
1065
1047
1066
// Find the new referenced object
1048
1067
var new_referenced_object = new_activation_object .getAssignableSlotValueByName ("reference" ).? .asObject ().asSlotsObject ();
1049
1068
try std .testing .expect (referenced_object != new_referenced_object );
1050
1069
var new_referenced_object_map = new_referenced_object .getMap ();
1051
1070
try std .testing .expect (referenced_object_map != new_referenced_object_map );
1052
1071
try std .testing .expect (referenced_object_map .getSlots ()[0 ].name .asObjectAddress () != new_referenced_object_map .getSlots ()[0 ].name .asObjectAddress ());
1053
- try std .testing .expectEqualStrings ("actual" , new_referenced_object_map .getSlots ()[0 ].name .asByteVector ().getValues ());
1072
+ try std .testing .expectEqualStrings ("actual" , new_referenced_object_map .getSlots ()[0 ].name .asByteArray ().getValues ());
1054
1073
1055
1074
// Verify that the map map is shared (aka forwarding addresses work)
1056
1075
try std .testing .expectEqual (
0 commit comments