-
Notifications
You must be signed in to change notification settings - Fork 5
/
guest_amd64_defs.h
426 lines (317 loc) · 16.2 KB
/
guest_amd64_defs.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
/*---------------------------------------------------------------*/
/*--- begin guest_amd64_defs.h ---*/
/*---------------------------------------------------------------*/
/*
This file is part of Valgrind, a dynamic binary instrumentation
framework.
Copyright (C) 2004-2010 OpenWorks LLP
info@open-works.net
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
The GNU General Public License is contained in the file COPYING.
Neither the names of the U.S. Department of Energy nor the
University of California nor the names of its contributors may be
used to endorse or promote products derived from this software
without prior written permission.
*/
/* Only to be used within the guest-amd64 directory. */
#ifndef __VEX_GUEST_AMD64_DEFS_H
#define __VEX_GUEST_AMD64_DEFS_H
/*---------------------------------------------------------*/
/*--- amd64 to IR conversion ---*/
/*---------------------------------------------------------*/
/* Convert one amd64 insn to IR. See the type DisOneInstrFn in
bb_to_IR.h. */
extern
DisResult disInstr_AMD64 ( IRSB* irbb,
Bool put_IP,
Bool (*resteerOkFn) ( void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
UChar* guest_code,
Long delta,
Addr64 guest_IP,
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
Bool host_bigendian );
/* Used by the optimiser to specialise calls to helpers. */
extern
IRExpr* guest_amd64_spechelper ( HChar* function_name,
IRExpr** args );
/* Describes to the optimiser which part of the guest state require
precise memory exceptions. This is logically part of the guest
state description. */
extern
Bool guest_amd64_state_requires_precise_mem_exns ( Int, Int );
extern
VexGuestLayout amd64guest_layout;
/*---------------------------------------------------------*/
/*--- amd64 guest helpers ---*/
/*---------------------------------------------------------*/
/* --- CLEAN HELPERS --- */
extern ULong amd64g_calculate_rflags_all (
ULong cc_op,
ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
);
extern ULong amd64g_calculate_rflags_c (
ULong cc_op,
ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
);
extern ULong amd64g_calculate_condition (
ULong/*AMD64Condcode*/ cond,
ULong cc_op,
ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
);
extern ULong amd64g_calculate_FXAM ( ULong tag, ULong dbl );
extern ULong amd64g_calculate_RCR (
ULong arg, ULong rot_amt, ULong rflags_in, Long sz
);
extern ULong amd64g_calculate_RCL (
ULong arg, ULong rot_amt, ULong rflags_in, Long sz
);
extern ULong amd64g_check_fldcw ( ULong fpucw );
extern ULong amd64g_create_fpucw ( ULong fpround );
extern ULong amd64g_check_ldmxcsr ( ULong mxcsr );
extern ULong amd64g_create_mxcsr ( ULong sseround );
extern VexEmWarn amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
/* Translate a guest virtual_addr into a guest linear address by
consulting the supplied LDT/GDT structures. Their representation
must be as specified in pub/libvex_guest_amd64.h. To indicate a
translation failure, 1<<32 is returned. On success, the lower 32
bits of the returned result indicate the linear address.
*/
//extern
//ULong amd64g_use_seg_selector ( HWord ldt, HWord gdt,
// UInt seg_selector, UInt virtual_addr );
extern ULong amd64g_calculate_mmx_pmaddwd ( ULong, ULong );
extern ULong amd64g_calculate_mmx_psadbw ( ULong, ULong );
extern ULong amd64g_calculate_mmx_pmovmskb ( ULong );
extern ULong amd64g_calculate_sse_pmovmskb ( ULong w64hi, ULong w64lo );
/* --- DIRTY HELPERS --- */
extern ULong amd64g_dirtyhelper_loadF80le ( ULong/*addr*/ );
extern void amd64g_dirtyhelper_storeF80le ( ULong/*addr*/, ULong/*data*/ );
extern void amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st );
extern void amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st );
extern void amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* );
extern void amd64g_dirtyhelper_FXSAVE ( VexGuestAMD64State*, HWord );
extern ULong amd64g_dirtyhelper_RDTSC ( void );
extern ULong amd64g_dirtyhelper_IN ( ULong portno, ULong sz/*1,2 or 4*/ );
extern void amd64g_dirtyhelper_OUT ( ULong portno, ULong data,
ULong sz/*1,2 or 4*/ );
//extern void amd64g_dirtyhelper_CPUID_sse0 ( VexGuestAMD64State* );
//extern void amd64g_dirtyhelper_CPUID_sse1 ( VexGuestAMD64State* );
//extern void amd64g_dirtyhelper_CPUID_sse2 ( VexGuestAMD64State* );
//extern void amd64g_dirtyhelper_FSAVE ( VexGuestAMD64State*, HWord );
//extern VexEmWarn
// amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
//extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
//extern VexEmWarn
// amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
/*---------------------------------------------------------*/
/*--- Condition code stuff ---*/
/*---------------------------------------------------------*/
/* rflags masks */
#define AMD64G_CC_SHIFT_O 11
#define AMD64G_CC_SHIFT_S 7
#define AMD64G_CC_SHIFT_Z 6
#define AMD64G_CC_SHIFT_A 4
#define AMD64G_CC_SHIFT_C 0
#define AMD64G_CC_SHIFT_P 2
#define AMD64G_CC_MASK_O (1ULL << AMD64G_CC_SHIFT_O)
#define AMD64G_CC_MASK_S (1ULL << AMD64G_CC_SHIFT_S)
#define AMD64G_CC_MASK_Z (1ULL << AMD64G_CC_SHIFT_Z)
#define AMD64G_CC_MASK_A (1ULL << AMD64G_CC_SHIFT_A)
#define AMD64G_CC_MASK_C (1ULL << AMD64G_CC_SHIFT_C)
#define AMD64G_CC_MASK_P (1ULL << AMD64G_CC_SHIFT_P)
/* FPU flag masks */
#define AMD64G_FC_SHIFT_C3 14
#define AMD64G_FC_SHIFT_C2 10
#define AMD64G_FC_SHIFT_C1 9
#define AMD64G_FC_SHIFT_C0 8
#define AMD64G_FC_MASK_C3 (1ULL << AMD64G_FC_SHIFT_C3)
#define AMD64G_FC_MASK_C2 (1ULL << AMD64G_FC_SHIFT_C2)
#define AMD64G_FC_MASK_C1 (1ULL << AMD64G_FC_SHIFT_C1)
#define AMD64G_FC_MASK_C0 (1ULL << AMD64G_FC_SHIFT_C0)
/* %RFLAGS thunk descriptors. A four-word thunk is used to record
details of the most recent flag-setting operation, so the flags can
be computed later if needed. It is possible to do this a little
more efficiently using a 3-word thunk, but that makes it impossible
to describe the flag data dependencies sufficiently accurately for
Memcheck. Hence 4 words are used, with minimal loss of efficiency.
The four words are:
CC_OP, which describes the operation.
CC_DEP1 and CC_DEP2. These are arguments to the operation.
We want Memcheck to believe that the resulting flags are
data-dependent on both CC_DEP1 and CC_DEP2, hence the
name DEP.
CC_NDEP. This is a 3rd argument to the operation which is
sometimes needed. We arrange things so that Memcheck does
not believe the resulting flags are data-dependent on CC_NDEP
("not dependent").
To make Memcheck believe that (the definedness of) the encoded
flags depends only on (the definedness of) CC_DEP1 and CC_DEP2
requires two things:
(1) In the guest state layout info (amd64guest_layout), CC_OP and
CC_NDEP are marked as always defined.
(2) When passing the thunk components to an evaluation function
(calculate_condition, calculate_eflags, calculate_eflags_c) the
IRCallee's mcx_mask must be set so as to exclude from
consideration all passed args except CC_DEP1 and CC_DEP2.
Strictly speaking only (2) is necessary for correctness. However,
(1) helps efficiency in that since (2) means we never ask about the
definedness of CC_OP or CC_NDEP, we may as well not even bother to
track their definedness.
When building the thunk, it is always necessary to write words into
CC_DEP1 and CC_DEP2, even if those args are not used given the
CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4).
This is important because otherwise Memcheck could give false
positives as it does not understand the relationship between the
CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the
definedness of the stored flags always depends on both CC_DEP1 and
CC_DEP2.
However, it is only necessary to set CC_NDEP when the CC_OP value
requires it, because Memcheck ignores CC_NDEP, and the evaluation
functions do understand the CC_OP fields and will only examine
CC_NDEP for suitable values of CC_OP.
A summary of the field usages is:
Operation DEP1 DEP2 NDEP
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
add/sub/mul first arg second arg unused
adc/sbb first arg (second arg)
XOR old_carry old_carry
and/or/xor result zero unused
inc/dec result zero old_carry
shl/shr/sar result subshifted- unused
result
rol/ror result zero old_flags
copy old_flags zero unused.
Therefore Memcheck will believe the following:
* add/sub/mul -- definedness of result flags depends on definedness
of both args.
* adc/sbb -- definedness of result flags depends on definedness of
both args and definedness of the old C flag. Because only two
DEP fields are available, the old C flag is XOR'd into the second
arg so that Memcheck sees the data dependency on it. That means
the NDEP field must contain a second copy of the old C flag
so that the evaluation functions can correctly recover the second
arg.
* and/or/xor are straightforward -- definedness of result flags
depends on definedness of result value.
* inc/dec -- definedness of result flags depends only on
definedness of result. This isn't really true -- it also depends
on the old C flag. However, we don't want Memcheck to see that,
and so the old C flag must be passed in NDEP and not in DEP2.
It's inconceivable that a compiler would generate code that puts
the C flag in an undefined state, then does an inc/dec, which
leaves C unchanged, and then makes a conditional jump/move based
on C. So our fiction seems a good approximation.
* shl/shr/sar -- straightforward, again, definedness of result
flags depends on definedness of result value. The subshifted
value (value shifted one less) is also needed, but its
definedness is the same as the definedness of the shifted value.
* rol/ror -- these only set O and C, and leave A Z C P alone.
However it seems prudent (as per inc/dec) to say the definedness
of all resulting flags depends on the definedness of the result,
hence the old flags must go in as NDEP and not DEP2.
* rcl/rcr are too difficult to do in-line, and so are done by a
helper function. They are not part of this scheme. The helper
function takes the value to be rotated, the rotate amount and the
old flags, and returns the new flags and the rotated value.
Since the helper's mcx_mask does not have any set bits, Memcheck
will lazily propagate undefinedness from any of the 3 args into
both results (flags and actual value).
*/
enum {
AMD64G_CC_OP_COPY=0, /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
/* just copy DEP1 to output */
AMD64G_CC_OP_ADDB, /* 1 */
AMD64G_CC_OP_ADDW, /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
AMD64G_CC_OP_ADDL, /* 3 */
AMD64G_CC_OP_ADDQ, /* 4 */
AMD64G_CC_OP_SUBB, /* 5 */
AMD64G_CC_OP_SUBW, /* 6 DEP1 = argL, DEP2 = argR, NDEP = unused */
AMD64G_CC_OP_SUBL, /* 7 */
AMD64G_CC_OP_SUBQ, /* 8 */
AMD64G_CC_OP_ADCB, /* 9 */
AMD64G_CC_OP_ADCW, /* 10 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
AMD64G_CC_OP_ADCL, /* 11 */
AMD64G_CC_OP_ADCQ, /* 12 */
AMD64G_CC_OP_SBBB, /* 13 */
AMD64G_CC_OP_SBBW, /* 14 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
AMD64G_CC_OP_SBBL, /* 15 */
AMD64G_CC_OP_SBBQ, /* 16 */
AMD64G_CC_OP_LOGICB, /* 17 */
AMD64G_CC_OP_LOGICW, /* 18 DEP1 = result, DEP2 = 0, NDEP = unused */
AMD64G_CC_OP_LOGICL, /* 19 */
AMD64G_CC_OP_LOGICQ, /* 20 */
AMD64G_CC_OP_INCB, /* 21 */
AMD64G_CC_OP_INCW, /* 22 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
AMD64G_CC_OP_INCL, /* 23 */
AMD64G_CC_OP_INCQ, /* 24 */
AMD64G_CC_OP_DECB, /* 25 */
AMD64G_CC_OP_DECW, /* 26 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
AMD64G_CC_OP_DECL, /* 27 */
AMD64G_CC_OP_DECQ, /* 28 */
AMD64G_CC_OP_SHLB, /* 29 DEP1 = res, DEP2 = res', NDEP = unused */
AMD64G_CC_OP_SHLW, /* 30 where res' is like res but shifted one bit less */
AMD64G_CC_OP_SHLL, /* 31 */
AMD64G_CC_OP_SHLQ, /* 32 */
AMD64G_CC_OP_SHRB, /* 33 DEP1 = res, DEP2 = res', NDEP = unused */
AMD64G_CC_OP_SHRW, /* 34 where res' is like res but shifted one bit less */
AMD64G_CC_OP_SHRL, /* 35 */
AMD64G_CC_OP_SHRQ, /* 36 */
AMD64G_CC_OP_ROLB, /* 37 */
AMD64G_CC_OP_ROLW, /* 38 DEP1 = res, DEP2 = 0, NDEP = old flags */
AMD64G_CC_OP_ROLL, /* 39 */
AMD64G_CC_OP_ROLQ, /* 40 */
AMD64G_CC_OP_RORB, /* 41 */
AMD64G_CC_OP_RORW, /* 42 DEP1 = res, DEP2 = 0, NDEP = old flags */
AMD64G_CC_OP_RORL, /* 43 */
AMD64G_CC_OP_RORQ, /* 44 */
AMD64G_CC_OP_UMULB, /* 45 */
AMD64G_CC_OP_UMULW, /* 46 DEP1 = argL, DEP2 = argR, NDEP = unused */
AMD64G_CC_OP_UMULL, /* 47 */
AMD64G_CC_OP_UMULQ, /* 48 */
AMD64G_CC_OP_SMULB, /* 49 */
AMD64G_CC_OP_SMULW, /* 50 DEP1 = argL, DEP2 = argR, NDEP = unused */
AMD64G_CC_OP_SMULL, /* 51 */
AMD64G_CC_OP_SMULQ, /* 52 */
AMD64G_CC_OP_NUMBER
};
typedef
enum {
AMD64CondO = 0, /* overflow */
AMD64CondNO = 1, /* no overflow */
AMD64CondB = 2, /* below */
AMD64CondNB = 3, /* not below */
AMD64CondZ = 4, /* zero */
AMD64CondNZ = 5, /* not zero */
AMD64CondBE = 6, /* below or equal */
AMD64CondNBE = 7, /* not below or equal */
AMD64CondS = 8, /* negative */
AMD64CondNS = 9, /* not negative */
AMD64CondP = 10, /* parity even */
AMD64CondNP = 11, /* not parity even */
AMD64CondL = 12, /* jump less */
AMD64CondNL = 13, /* not less */
AMD64CondLE = 14, /* less or equal */
AMD64CondNLE = 15, /* not less or equal */
AMD64CondAlways = 16 /* HACK */
}
AMD64Condcode;
#endif /* ndef __VEX_GUEST_AMD64_DEFS_H */
/*---------------------------------------------------------------*/
/*--- end guest_amd64_defs.h ---*/
/*---------------------------------------------------------------*/