This repository has been archived by the owner on May 11, 2020. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 150
/
lower_amd64.go
344 lines (305 loc) · 8.5 KB
/
lower_amd64.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
// Copyright 2019 The go-interpreter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package compile
import (
"fmt"
asm "github.com/twitchyliquid64/golang-asm"
"github.com/twitchyliquid64/golang-asm/obj"
"github.com/twitchyliquid64/golang-asm/obj/x86"
)
const (
// APushWasmStack is a symbolic instruction representing the movement
// of a value in an x86-64 register, to the top of the WASM stack.
APushWasmStack = x86.ALAST + iota
// APopWasmStack is a symbolic instruction representing the movement
// of the top of the WASM stack, to a value in an x86-64 register.
APopWasmStack
// ALoadGlobalsSliceHeader is a symbolic instruction representing that
// the slice header for wasm globals should be loaded into R15. This
// allows us to defer instruction generation till later phases, so
// this instruction can be a NOP if already loaded.
ALoadGlobalsSliceHeader
// ALoadLocalsFirstElem is a symbolic instruction representing that
// the first wasm should be loaded into R15. This allows us to defer
// instruction generation till later phases, so this instruction can
// be a NOP if already loaded.
ALoadLocalsFirstElem
// AFlushStackLength is a symbolic instruction representing that
// the WASM stack length should be flushed from registers to main memory,
// if it is dirty.
AFlushStackLength
)
// dirtyRegs tracks registers which hold values.
type dirtyRegs struct {
R13 dirtyState
R14 dirtyState
R15 dirtyState
}
func (regs *dirtyRegs) flush(inst *obj.Prog, builder *asm.Builder, reg uint16) {
var regState *dirtyState
switch reg {
case x86.REG_R13:
regState = ®s.R13
default:
panic(fmt.Sprintf("compile: unknown register: %v", reg))
}
switch *regState {
case stateScratch, stateStackFirstElem, stateLocalFirstElem, stateGlobalSliceHeader:
inst.As = obj.ANOP
return // Value does not change - no need to write back.
case stateStackLen:
inst.As = x86.AMOVQ
inst.From.Type = obj.TYPE_REG
inst.From.Reg = x86.REG_R13
inst.To.Type = obj.TYPE_MEM
inst.To.Reg = x86.REG_R10
inst.To.Offset = 8
default:
panic(fmt.Sprintf("compile: unknown regState: %v", regState))
}
*regState = stateScratch
}
// lowerAMD64 converts symbolic instructions into concrete x86-64 instructions.
func (b *AMD64Backend) lowerAMD64(builder *asm.Builder) {
var (
regs = &dirtyRegs{}
inst = builder.Root()
)
for inst = inst.Link; inst.Link != nil; inst = inst.Link {
switch inst.As {
case AFlushStackLength:
regs.flush(inst, builder, x86.REG_R13)
case ALoadGlobalsSliceHeader:
b.emitLoadGlobalsSliceHeader(inst, builder, regs)
case ALoadLocalsFirstElem:
b.emitLoadLocalsFirstElem(inst, builder, regs)
case APushWasmStack:
b.emitWasmStackPush(inst, builder, regs)
case APopWasmStack:
b.emitWasmStackLoad(inst, builder, regs)
}
}
}
func (b *AMD64Backend) emitLoadLocalsFirstElem(inst *obj.Prog, builder *asm.Builder, regs *dirtyRegs) {
if regs.R15 != stateLocalFirstElem {
inst.As = x86.AMOVQ
inst.To.Type = obj.TYPE_REG
inst.To.Reg = x86.REG_R15
inst.From.Type = obj.TYPE_MEM
inst.From.Reg = x86.REG_R11
regs.R15 = stateLocalFirstElem
} else {
inst.As = obj.ANOP
}
}
func (b *AMD64Backend) emitLoadGlobalsSliceHeader(inst *obj.Prog, builder *asm.Builder, regs *dirtyRegs) {
if regs.R15 != stateGlobalSliceHeader {
inst.As = x86.AMOVQ
inst.To.Type = obj.TYPE_REG
inst.To.Reg = x86.REG_R15
inst.From.Type = obj.TYPE_MEM
inst.From.Reg = x86.REG_SP
inst.From.Offset = 32
prog := builder.NewProg()
prog.As = x86.AMOVQ
prog.To.Type = obj.TYPE_REG
prog.To.Reg = x86.REG_R15
prog.From.Type = obj.TYPE_MEM
prog.From.Reg = x86.REG_R15
prog.Link = inst.Link
inst.Link = prog
regs.R15 = stateGlobalSliceHeader
} else {
inst.As = obj.ANOP
}
}
func (b *AMD64Backend) emitWasmStackLoad(inst *obj.Prog, builder *asm.Builder, regs *dirtyRegs) {
// movq r13, [r10+8] (if not already loaded)
// decq r13
// movq r14, [r10] (if not already loaded)
// leaq r12, [r14 + r13*8]
// movq reg, [r12]
to := inst.To
nextInst := inst.Link
ci := inst.From.Val.(currentInstruction)
if regs.R13 != stateStackLen {
inst.As = x86.AMOVQ
inst.To.Type = obj.TYPE_REG
inst.To.Reg = x86.REG_R13
inst.From.Type = obj.TYPE_MEM
inst.From.Reg = x86.REG_R10
inst.From.Offset = 8
regs.R13 = stateStackLen
} else {
inst.As = obj.ANOP
}
prev := inst
prog := builder.NewProg()
prog.As = x86.ADECQ
prog.To.Type = obj.TYPE_REG
prog.To.Reg = x86.REG_R13
prev.Link = prog
prev = prog
if b.EmitBoundsChecks {
// movq r12, [r10+16]
// cmp r12, r13
// ja endbounds
// <emitExit() code>
// endbounds:
prog = builder.NewProg()
prog.As = x86.AMOVQ
prog.To.Type = obj.TYPE_REG
prog.To.Reg = x86.REG_R12
prog.From.Type = obj.TYPE_MEM
prog.From.Reg = x86.REG_R10
prog.From.Offset = 16
prev.Link = prog
prev = prog
prog = builder.NewProg()
prog.As = x86.ACMPQ
prog.To.Type = obj.TYPE_REG
prog.To.Reg = x86.REG_R13
prog.From.Type = obj.TYPE_REG
prog.From.Reg = x86.REG_R12
prev.Link = prog
prev = prog
jmp := builder.NewProg()
jmp.As = x86.AJHI
jmp.To.Type = obj.TYPE_BRANCH
prev.Link = jmp
prev = jmp
retValue, ret := b.exitInstructions(builder, CompletionBadBounds|makeExitIndex(ci.idx))
prev.Link = retValue
retValue.Link = ret
prev = ret
prog = builder.NewProg()
prog.As = obj.ANOP // branch target - assembler will optimize out.
jmp.Pcond = prog
prev.Link = prog
prev = prog
}
if regs.R14 != stateStackFirstElem {
prog = builder.NewProg()
prog.As = x86.AMOVQ
prog.To.Type = obj.TYPE_REG
prog.To.Reg = x86.REG_R14
prog.From.Type = obj.TYPE_MEM
prog.From.Reg = x86.REG_R10
prev.Link = prog
prev = prog
regs.R14 = stateStackFirstElem
}
prog = builder.NewProg()
prog.As = x86.ALEAQ
prog.To.Type = obj.TYPE_REG
prog.To.Reg = x86.REG_R12
prog.From.Type = obj.TYPE_MEM
prog.From.Reg = x86.REG_R14
prog.From.Scale = 8
prog.From.Index = x86.REG_R13
prev.Link = prog
prev = prog
prog = builder.NewProg()
prog.As = x86.AMOVQ
prog.From.Type = obj.TYPE_MEM
prog.From.Reg = x86.REG_R12
prog.To = to
prev.Link = prog
prog.Link = nextInst
}
func (b *AMD64Backend) emitWasmStackPush(inst *obj.Prog, builder *asm.Builder, regs *dirtyRegs) {
// movq r14, [r10] (if not already loaded)
// movq r13, [r10+8] (if not already loaded)
// leaq r12, [r14 + r13*8]
// movq [r12], <data>
// incq r13
from := inst.From
nextInst := inst.Link
ci := inst.To.Val.(currentInstruction)
var prog *obj.Prog
if regs.R14 != stateStackFirstElem {
inst.As = x86.AMOVQ
inst.To.Type = obj.TYPE_REG
inst.To.Reg = x86.REG_R14
inst.From.Type = obj.TYPE_MEM
inst.From.Reg = x86.REG_R10
regs.R14 = stateStackFirstElem
} else {
inst.As = obj.ANOP
}
prev := inst
if regs.R13 != stateStackLen {
prog = builder.NewProg()
prev.Link = prog
prog.As = x86.AMOVQ
prog.To.Type = obj.TYPE_REG
prog.To.Reg = x86.REG_R13
prog.From.Type = obj.TYPE_MEM
prog.From.Reg = x86.REG_R10
prog.From.Offset = 8
prev = prog
regs.R13 = stateStackLen
}
if b.EmitBoundsChecks {
// movq r12, [r10+16]
// cmp r12, r13
// ja endbounds
// <emitExit() code>
// endbounds:
prog = builder.NewProg()
prev.Link = prog
prog.As = x86.AMOVQ
prog.To.Type = obj.TYPE_REG
prog.To.Reg = x86.REG_R12
prog.From.Type = obj.TYPE_MEM
prog.From.Reg = x86.REG_R10
prog.From.Offset = 16
prev = prog
prog = builder.NewProg()
prev.Link = prog
prog.As = x86.ACMPQ
prog.To.Type = obj.TYPE_REG
prog.To.Reg = x86.REG_R13
prog.From.Type = obj.TYPE_REG
prog.From.Reg = x86.REG_R12
prev = prog
jmp := builder.NewProg()
prev.Link = jmp
jmp.As = x86.AJHI
jmp.To.Type = obj.TYPE_BRANCH
prev = jmp
retValue, ret := b.exitInstructions(builder, CompletionBadBounds|makeExitIndex(ci.idx))
prev.Link = retValue
retValue.Link = ret
prev = ret
prog = builder.NewProg()
prog.As = obj.ANOP // branch target - assembler will optimize out.
jmp.Pcond = prog
prev.Link = prog
prev = prog
}
prog = builder.NewProg()
prog.As = x86.ALEAQ
prog.To.Type = obj.TYPE_REG
prog.To.Reg = x86.REG_R12
prog.From.Type = obj.TYPE_MEM
prog.From.Reg = x86.REG_R14
prog.From.Scale = 8
prog.From.Index = x86.REG_R13
prev.Link = prog
prev = prog
prog = builder.NewProg()
prog.As = x86.AMOVQ
prog.To.Type = obj.TYPE_MEM
prog.To.Reg = x86.REG_R12
prog.From = from
prev.Link = prog
prev = prog
prog = builder.NewProg()
prog.As = x86.AINCQ
prog.To.Type = obj.TYPE_REG
prog.To.Reg = x86.REG_R13
prev.Link = prog
prog.Link = nextInst
}