/
iasm_amd64.go
82 lines (70 loc) · 2.09 KB
/
iasm_amd64.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
package pgen
import (
`math`
`github.com/chenzhuoyu/iasm/x86_64`
)
const (
AL = x86_64.AL
AX = x86_64.AX
EAX = x86_64.EAX
RAX = x86_64.RAX
RCX = x86_64.RCX
RDX = x86_64.RDX
RBX = x86_64.RBX
RSP = x86_64.RSP
RBP = x86_64.RBP
RSI = x86_64.RSI
RDI = x86_64.RDI
R8 = x86_64.R8
R9 = x86_64.R9
R10 = x86_64.R10
R11 = x86_64.R11
R12 = x86_64.R12
R13 = x86_64.R13
R14 = x86_64.R14
R15 = x86_64.R15
XMM15 = x86_64.XMM15
)
var allocationOrder = [11]x86_64.Register64 {
R12, R13, R14, R15, // reserved registers first (except for RBX)
R10, R11, // then scratch registers
R9, R8, RCX, RDX, // then argument registers in reverse order (RDI, RSI and RAX are always free)
RBX, // finally the RBX, we put RBX here to reduce collision with Go register ABI
}
func Abs(disp int32) *x86_64.MemoryOperand {
return x86_64.Abs(disp)
}
func Ptr(base x86_64.Register, disp int32) *x86_64.MemoryOperand {
return x86_64.Ptr(base, disp)
}
func Sib(base x86_64.Register, index x86_64.Register64, scale uint8, disp int32) *x86_64.MemoryOperand {
return x86_64.Sib(base, index, scale, disp)
}
func isPow2(v int64) bool {
return v & (v - 1) == 0
}
func isInt32(v int64) bool {
return v >= math.MinInt32 && v <= math.MaxInt32
}
func isReg64(v x86_64.Register) (ok bool) {
_, ok = v.(x86_64.Register64)
return
}
func toAddress(p *x86_64.Label) uintptr {
if v, err := p.Evaluate(); err != nil {
panic(err)
} else {
return uintptr(v)
}
}
func isSimpleMem(v *x86_64.MemoryOperand) bool {
return !v.Masked &&
v.Broadcast == 0 &&
v.Addr.Type == x86_64.Memory &&
v.Addr.Offset == 0 &&
v.Addr.Reference == nil &&
v.Addr.Memory.Scale <= 1 &&
v.Addr.Memory.Index == nil &&
v.Addr.Memory.Displacement == 0 &&
isReg64(v.Addr.Memory.Base)
}