/
Allocator.java
307 lines (280 loc) · 11.3 KB
/
Allocator.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
/*
* This file is part of the Jikes RVM project (http://jikesrvm.org).
*
* This file is licensed to You under the Eclipse Public License (EPL);
* You may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.opensource.org/licenses/eclipse-1.0.php
*
* See the COPYRIGHT.txt file distributed with this work for information
* regarding copyright ownership.
*/
package org.mmtk.utility.alloc;
import static org.mmtk.utility.Constants.*;
import org.mmtk.vm.Lock;
import org.mmtk.plan.Plan;
import org.mmtk.policy.Space;
import org.mmtk.utility.*;
import org.mmtk.vm.VM;
import org.vmmagic.unboxed.*;
import org.vmmagic.pragma.*;
/**
* This abstract base class provides the basis for processor-local
* allocation. The key functionality provided is the retry mechanism
* that is necessary to correctly handle the fact that a "slow-path"
* allocation can cause a GC which violate the uninterruptability assumption.
* This results in the thread being moved to a different processor so that
* the allocator object it is using is not actually the one for the processor
* it is running on.<p>
*
* This class also includes functionality to assist allocators with
* ensuring that requests are aligned according to requests.<p>
*
* Failing to handle this properly will lead to very hard to trace bugs
* where the allocation that caused a GC or allocations immediately following
* GC are run incorrectly.<p>
*
* TODO the comments in this class need to be rephrased from using the
* particle terminology to alignments.
*/
@Uninterruptible
public abstract class Allocator {
/** Lock used for out of memory handling */
private static Lock oomLock = VM.newLock("OOM Lock");
/** Has an allocation succeeded since the emergency collection? */
private static volatile boolean allocationSuccess;
/** Maximum number of failed attempts by a single thread */
private static int collectionAttempts;
/**
* @return a consecutive failure count for any allocating thread.
*/
public static int determineCollectionAttempts() {
if (!allocationSuccess) {
collectionAttempts++;
} else {
allocationSuccess = false;
collectionAttempts = 1;
}
return collectionAttempts;
}
/**
* Return the space this allocator is currently bound to.
*
* @return The Space.
*/
protected abstract Space getSpace();
/**
* Aligns up an allocation request. The allocation request accepts a
* region, that must be at least particle aligned, an alignment
* request (some power of two number of particles) and an offset (a
* number of particles). There is also a knownAlignment parameter to
* allow a more optimised check when the particular allocator in use
* always aligns at a coarser grain than individual particles, such
* as some free lists.
*
* @param region The region to align up.
* @param alignment The requested alignment
* @param offset The offset from the alignment
* @param knownAlignment The statically known minimum alignment.
* @param fillAlignmentGap whether to fill up holes in the alignment
* with the alignment value ({@link Constants#ALIGNMENT_VALUE})
* @return The aligned up address.
*/
@Inline
public static Address alignAllocation(Address region, int alignment, int offset, int knownAlignment, boolean fillAlignmentGap) {
if (VM.VERIFY_ASSERTIONS) {
VM.assertions._assert(knownAlignment >= MIN_ALIGNMENT);
VM.assertions._assert(MIN_ALIGNMENT >= BYTES_IN_INT);
VM.assertions._assert(!(fillAlignmentGap && region.isZero()));
VM.assertions._assert(alignment <= MAX_ALIGNMENT);
VM.assertions._assert(offset >= 0);
VM.assertions._assert(region.toWord().and(Word.fromIntSignExtend(MIN_ALIGNMENT - 1)).isZero());
VM.assertions._assert((alignment & (MIN_ALIGNMENT - 1)) == 0);
VM.assertions._assert((offset & (MIN_ALIGNMENT - 1)) == 0);
}
// No alignment ever required.
if (alignment <= knownAlignment || MAX_ALIGNMENT <= MIN_ALIGNMENT)
return region;
// May require an alignment
Word mask = Word.fromIntSignExtend(alignment - 1);
Word negOff = Word.fromIntSignExtend(-offset);
Offset delta = negOff.minus(region.toWord()).and(mask).toOffset();
if (fillAlignmentGap && ALIGNMENT_VALUE != 0) {
fillAlignmentGap(region, region.plus(delta));
}
return region.plus(delta);
}
/**
* Fill the specified region with the alignment value.
*
* @param start The start of the region.
* @param end A pointer past the end of the region.
*/
@Inline
public static void fillAlignmentGap(Address start, Address end) {
if ((MAX_ALIGNMENT - MIN_ALIGNMENT) == BYTES_IN_INT) {
// At most a single hole
if (!end.diff(start).isZero()) {
start.store(ALIGNMENT_VALUE);
}
} else {
while (start.LT(end)) {
start.store(ALIGNMENT_VALUE);
start = start.plus(BYTES_IN_INT);
}
}
}
/**
* Aligns up an allocation request. The allocation request accepts a
* region, that must be at least particle aligned, an alignment
* request (some power of two number of particles) and an offset (a
* number of particles).
*
* @param region The region to align up.
* @param alignment The requested alignment
* @param offset The offset from the alignment
* @return The aligned up address.
*/
@Inline
public static Address alignAllocation(Address region, int alignment, int offset) {
return alignAllocation(region, alignment, offset, MIN_ALIGNMENT, true);
}
/**
* Aligns up an allocation request. The allocation request accepts a
* region, that must be at least particle aligned, an alignment
* request (some power of two number of particles) and an offset (a
* number of particles).
*
* @param region The region to align up.
* @param alignment The requested alignment
* @param offset The offset from the alignment
* @return The aligned up address.
*/
@Inline
public static Address alignAllocationNoFill(Address region, int alignment, int offset) {
return alignAllocation(region, alignment, offset, MIN_ALIGNMENT, false);
}
/**
* This method calculates the minimum size that will guarantee the allocation
* of a specified number of bytes at the specified alignment.
*
* @param size The number of bytes (not aligned).
* @param alignment The requested alignment (some factor of 2).
* @return the minimum size (in bytes) that's necessary to guarantee allocation
* at the given alignment
*/
@Inline
public static int getMaximumAlignedSize(int size, int alignment) {
return getMaximumAlignedSize(size, alignment, MIN_ALIGNMENT);
}
/**
* This method calculates the minimum size that will guarantee the allocation
* of a specified number of bytes at the specified alignment.
*
* @param size The number of bytes (not aligned).
* @param alignment The requested alignment (some factor of 2).
* @param knownAlignment The known minimum alignment. Specifically for use in
* allocators that enforce greater than particle alignment. It is a <b>precondition</b>
* that size is aligned to knownAlignment, and that knownAlignment >=
* {@link Constants#MIN_ALIGNMENT}.
* @return the minimum size (in bytes) that's necessary to guarantee allocation
* at the given alignment
*/
@Inline
public static int getMaximumAlignedSize(int size, int alignment, int knownAlignment) {
if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(size == Conversions.roundDown(size, knownAlignment));
if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(knownAlignment >= MIN_ALIGNMENT);
if (MAX_ALIGNMENT <= MIN_ALIGNMENT || alignment <= knownAlignment) {
return size;
} else {
return size + alignment - knownAlignment;
}
}
/**
* Single slow path allocation attempt. This is called by allocSlow.
*
* @param bytes The size of the allocation request
* @param alignment The required alignment
* @param offset The alignment offset
* @return The start address of the region, or zero if allocation fails
*/
protected abstract Address allocSlowOnce(int bytes, int alignment, int offset);
/**
* <b>Out-of-line</b> slow path allocation. This method forces slow path
* allocation to be out of line (typically desirable, but not when the
* calling context is already explicitly out-of-line).
*
* @param bytes The size of the allocation request
* @param alignment The required alignment
* @param offset The alignment offset
* @return The start address of the region, or zero if allocation fails
*/
@NoInline
public final Address allocSlow(int bytes, int alignment, int offset) {
return allocSlowInline(bytes, alignment, offset);
}
/**
* <b>Inline</b> slow path allocation. This method attempts allocSlowOnce
* several times, and allows collection to occur, and ensures that execution
* safely resumes by taking care of potential thread/mutator context affinity
* changes. All allocators should use this as the trampoline for slow
* path allocation.
*
* @param bytes The size of the allocation request
* @param alignment The required alignment
* @param offset The alignment offset
* @return The start address of the region, or zero if allocation fails
*/
@Inline
public final Address allocSlowInline(int bytes, int alignment, int offset) {
Allocator current = this;
Space space = current.getSpace();
// Information about the previous collection.
boolean emergencyCollection = false;
while (true) {
// Try to allocate using the slow path
Address result = current.allocSlowOnce(bytes, alignment, offset);
// Collector allocation always succeeds (or fails inside allocSlow).
if (!VM.activePlan.isMutator()) {
if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!result.isZero());
return result;
}
if (!result.isZero()) {
// Report allocation success to assist OutOfMemory handling.
if (!allocationSuccess) {
oomLock.acquire();
allocationSuccess = true;
oomLock.release();
}
return result;
}
if (emergencyCollection) {
// Check if we are in an OutOfMemory situation
oomLock.acquire();
boolean failWithOOM = !allocationSuccess;
// This seems odd, but we must allow each OOM to run its course (and maybe give us back memory)
allocationSuccess = true;
oomLock.release();
if (failWithOOM) {
// Nobody has successfully allocated since an emergency collection: OutOfMemory
VM.collection.outOfMemory();
VM.assertions.fail("Not Reached");
return Address.zero();
}
}
/* This is in case a GC occurs, and our mutator context is stale.
* In some VMs the scheduler can change the affinity between the
* current thread and the mutator context. This is possible for
* VMs that dynamically multiplex Java threads onto multiple mutator
* contexts, */
current = VM.activePlan.mutator().getAllocatorFromSpace(space);
/*
* Record whether last collection was an Emergency collection.
* If so, we make one more attempt to allocate before we signal
* an OOM.
*/
emergencyCollection = Plan.isEmergencyCollection();
}
}
}