-
Notifications
You must be signed in to change notification settings - Fork 3
/
AbstractParser.java
344 lines (321 loc) · 13.3 KB
/
AbstractParser.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
/*******************************************************************************
* Copyright (c) 2008, 2023 SAP AG, IBM Corporation and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License 2.0
* which accompanies this distribution, and is available at
* https://www.eclipse.org/legal/epl-2.0/
*
* SPDX-License-Identifier: EPL-2.0
*
* Contributors:
* SAP AG - initial API and implementation
* IBM Corporation - multiple heap dumps
* Netflix (Jason Koch) - refactors for increased performance and concurrency
*******************************************************************************/
package org.eclipse.mat.hprof;
import java.io.IOException;
import org.eclipse.mat.hprof.describer.Version;
import org.eclipse.mat.hprof.ui.HprofPreferences;
import org.eclipse.mat.hprof.ui.HprofPreferences.HprofStrictness;
import org.eclipse.mat.snapshot.ISnapshot;
import org.eclipse.mat.snapshot.model.IObject;
import org.eclipse.mat.snapshot.model.IPrimitiveArray;
import org.eclipse.mat.snapshot.model.ObjectReference;
import org.eclipse.mat.util.IProgressListener.Severity;
import org.eclipse.mat.util.MessageUtil;
import org.eclipse.mat.util.SimpleMonitor.Listener;
// Hprof binary format as defined here:
// https://heap-snapshot.dev.java.net/files/documents/4282/31543/hprof-binary-format.html
/* package */abstract class AbstractParser
{
interface Constants
{
interface Record
{
int STRING_IN_UTF8 = 0x01;
int LOAD_CLASS = 0x02;
int UNLOAD_CLASS = 0x03;
int STACK_FRAME = 0x04;
int STACK_TRACE = 0x05;
int ALLOC_SITES = 0x06;
int HEAP_SUMMARY = 0x07;
int START_THREAD = 0x0a;
int END_THREAD = 0x0b;
int HEAP_DUMP = 0x0c;
int HEAP_DUMP_SEGMENT = 0x1c;
int HEAP_DUMP_END = 0x2c;
int CPU_SAMPLES = 0x0d;
int CONTROL_SETTINGS = 0x0e;
}
interface DumpSegment
{
int ROOT_UNKNOWN = 0xff;
int ROOT_JNI_GLOBAL = 0x01;
int ROOT_JNI_LOCAL = 0x02;
int ROOT_JAVA_FRAME = 0x03;
int ROOT_NATIVE_STACK = 0x04;
int ROOT_STICKY_CLASS = 0x05;
int ROOT_THREAD_BLOCK = 0x06;
int ROOT_MONITOR_USED = 0x07;
int ROOT_THREAD_OBJECT = 0x08;
int CLASS_DUMP = 0x20;
int INSTANCE_DUMP = 0x21;
int OBJECT_ARRAY_DUMP = 0x22;
int PRIMITIVE_ARRAY_DUMP = 0x23;
}
}
protected Version version;
// The size of identifiers in the dump file
protected int idSize;
protected final HprofPreferences.HprofStrictness strictnessPreference;
/** First stack frame address */
protected long stackFrameBase = 0x100;
/** Alignment of stack frames - should not be stricter than rest of heap */
protected long stackFrameAlign = 0x100;
/*
* Names used as pseudo-class names
* Not translatable
*/
static final String METHOD = "<method>"; //$NON-NLS-1$
static final String METHOD_TYPE = "<method type>"; //$NON-NLS-1$
static final String STACK_FRAME = "<stack frame>"; //$NON-NLS-1$
static final String NATIVE_MEMORY = "<native memory>"; //$NON-NLS-1$
static final String NATIVE_MEMORY_TYPE = "<native memory type>"; //$NON-NLS-1$
/*
* Field names for pseudo classes.
* Not translatable
*/
static final String STACK_DEPTH = "stackDepth"; //$NON-NLS-1$
static final String FRAME_NUMBER = "frameNumber"; //$NON-NLS-1$
static final String LOCATION_ADDRESS = "locationAddress"; //$NON-NLS-1$
static final String COMPILATION_LEVEL = "compilationLevel"; //$NON-NLS-1$
static final String NATIVE = "native"; //$NON-NLS-1$
static final String LINE_NUMBER = "lineNumber"; //$NON-NLS-1$
static final String DECLARING_CLASS = "declaringClass"; //$NON-NLS-1$
static final String METHOD_NAME = "methodName"; //$NON-NLS-1$
static final String FILE_NAME = "fileName"; //$NON-NLS-1$
/* package */AbstractParser(HprofPreferences.HprofStrictness strictnessPreference)
{
this.strictnessPreference = strictnessPreference;
}
/* protected */static Version readVersion(IPositionInputStream in) throws IOException
{
StringBuilder version = new StringBuilder();
int bytesRead = 0;
while (bytesRead < 20)
{
byte b = (byte) in.read();
bytesRead++;
if (b != 0)
{
version.append((char) b);
}
else
{
Version answer = Version.byLabel(version.toString());
if (answer == null)
{
if (bytesRead <= 13) // did not read "JAVA PROFILE "
throw new IOException(Messages.AbstractParser_Error_NotHeapDump);
else
throw new IOException(MessageUtil.format(Messages.AbstractParser_Error_UnknownHPROFVersion,
version.toString()));
}
if (answer == Version.JDK12BETA3) // not supported by MAT
throw new IOException(MessageUtil.format(Messages.AbstractParser_Error_UnsupportedHPROFVersion,
answer.getLabel()));
return answer;
}
}
throw new IOException(Messages.AbstractParser_Error_InvalidHPROFHeader);
}
protected Object readValue(IPositionInputStream in, ISnapshot snapshot) throws IOException
{
byte type = in.readByte();
return readValue(in, snapshot, type);
}
protected Object readValue(IPositionInputStream in, ISnapshot snapshot, int type) throws IOException
{
switch (type)
{
case IObject.Type.OBJECT:
long id = in.readID(idSize);
return id == 0 ? null : new ObjectReference(snapshot, id);
case IObject.Type.BOOLEAN:
return in.readByte() != 0;
case IObject.Type.CHAR:
return in.readChar();
case IObject.Type.FLOAT:
return in.readFloat();
case IObject.Type.DOUBLE:
return in.readDouble();
case IObject.Type.BYTE:
return in.readByte();
case IObject.Type.SHORT:
return in.readShort();
case IObject.Type.INT:
return in.readInt();
case IObject.Type.LONG:
return in.readLong();
default:
throw new IOException(MessageUtil.format(Messages.AbstractParser_Error_IllegalType, type, in.position()));
}
}
public static Object readValue(IPositionInputStream in, ISnapshot snapshot, int type, int idSize) throws IOException
{
switch (type)
{
case IObject.Type.OBJECT:
long id = in.readID(idSize);
return id == 0 ? null : new ObjectReference(snapshot, id);
case IObject.Type.BOOLEAN:
return in.readByte() != 0;
case IObject.Type.CHAR:
return in.readChar();
case IObject.Type.FLOAT:
return in.readFloat();
case IObject.Type.DOUBLE:
return in.readDouble();
case IObject.Type.BYTE:
return in.readByte();
case IObject.Type.SHORT:
return in.readShort();
case IObject.Type.INT:
return in.readInt();
case IObject.Type.LONG:
return in.readLong();
default:
throw new IOException(MessageUtil.format(Messages.AbstractParser_Error_IllegalType, type, in.position()));
}
}
protected void skipValue(IPositionInputStream in) throws IOException
{
byte type = in.readByte();
skipValue(in, type);
}
protected void skipValue(IPositionInputStream in, int type) throws IOException
{
int skip;
if (type == IObject.Type.OBJECT)
skip = idSize;
else
skip = IPrimitiveArray.ELEMENT_SIZE[type];
while (skip > 0)
{
int skipped = in.skipBytes(skip);
if (skipped == 0)
{
in.readByte();
skipped = 1;
}
skip -= skipped;
}
}
/**
* Usually the HPROF file contains exactly one heap dump. However, when
* acquiring heap dumps via the legacy HPROF agent, the dump file can
* possibly contain multiple heap dumps. Currently there is no API and no UI
* to determine which dump to use. As this happens very rarely, we decided
* to go with the following mechanism: use only the first dump unless the
* user provides a dump number via environment variable. Once the dump has
* been parsed, the same dump is reopened regardless of the environment
* variable.
* MAT_HPROF_DUMP_NR is a 0 offset number, or direct id
* The returned value is an 0 offset number or 1 offset id, e.g. #1
*/
protected String determineDumpNumber()
{
String dumpNr = System.getProperty("MAT_HPROF_DUMP_NR"); //$NON-NLS-1$
return dumpNr;
}
protected String dumpIdentifier(int n)
{
return "#" + (n+1); //$NON-NLS-1$
}
protected boolean dumpMatches(int n, String match)
{
if (match == null && n == 0)
return true;
if (dumpIdentifier(n).equals(match))
return true;
try
{
int nm = Integer.parseInt(match);
return nm == n;
}
catch (NumberFormatException e)
{
}
return false;
}
/**
* It seems the HPROF file writes the length field as an unsigned int.
*/
private final static long MAX_UNSIGNED_4BYTE_INT = 4294967296L;
/**
* It seems the HPROF spec only allows 4 bytes for record length, so a
* record length greater than 4GB will be overflowed and will be useless and
* throw off the rest of the processing. There's no good way to tell the
* overflow has occurred but if the strictness preference has been set to
* permissive, we can check the most common case of a heap dump record that
* should run to the end of the file.
*
* @param fileSize
* The total file size.
* @param curPos
* The current position of the input stream.
* @param record
* The record identifier.
* @param length
* The length read from the record.
* @param monitor
* The listener to send any warnings.
* @return The updated length or the original length if no update is made.
*/
protected long updateLengthIfNecessary(long fileSize, long curPos, int record, long length, Listener monitor)
{
// Sometimes the HPROF file is truncated during the write and the
// length field is never updated from 0. Presume it goes to the end.
if (length == 0 && (
strictnessPreference == HprofStrictness.STRICTNESS_WARNING ||
strictnessPreference == HprofStrictness.STRICTNESS_PERMISSIVE))
{
long length1 = fileSize - curPos - 9;
if (length1 > 0)
{
monitor.sendUserMessage(Severity.WARNING, MessageUtil.format(
Messages.AbstractParser_GuessedRecordLength,
Integer.toHexString(record),
Long.toHexString(curPos), length, length1), null);
length = length1;
}
}
// See https://bugs.eclipse.org/bugs/show_bug.cgi?id=404679
//
// We do this check no matter the strictness preference. Since we're
// checking based on an exact overflow calculation and we're only
// inferring the heap dump record if it goes all the way to the end of
// the file, it seems this can be "safely" done all the time.
if (
// strictnessPreference == HprofStrictness.STRICTNESS_PERMISSIVE &&
record == Constants.Record.HEAP_DUMP)
{
long bytesLeft = fileSize - curPos - 9;
if (bytesLeft >= MAX_UNSIGNED_4BYTE_INT)
{
// We can be more confident in this guess by assuming that this
// record goes to the end of the file, so we can actually
// emulate the overflow and see if that matches up.
//
if ((bytesLeft - length) % MAX_UNSIGNED_4BYTE_INT == 0)
{
monitor.sendUserMessage(Severity.WARNING, MessageUtil.format(
Messages.Pass1Parser_GuessingLengthOverflow, Integer.toHexString(record),
Long.toHexString(curPos), length, bytesLeft), null);
length = bytesLeft;
}
}
}
return length;
}
}