Permalink
Cannot retrieve contributors at this time
// Copyright 2015 The Go Authors. All rights reserved. | |
// Use of this source code is governed by a BSD-style | |
// license that can be found in the LICENSE file. | |
package gc | |
import ( | |
"encoding/binary" | |
"fmt" | |
"html" | |
"os" | |
"path/filepath" | |
"sort" | |
"bufio" | |
"bytes" | |
"cmd/compile/internal/ssa" | |
"cmd/compile/internal/types" | |
"cmd/internal/obj" | |
"cmd/internal/obj/x86" | |
"cmd/internal/objabi" | |
"cmd/internal/src" | |
"cmd/internal/sys" | |
) | |
var ssaConfig *ssa.Config | |
var ssaCaches []ssa.Cache | |
var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for | |
var ssaDir string // optional destination for ssa dump file | |
var ssaDumpStdout bool // whether to dump to stdout | |
var ssaDumpCFG string // generate CFGs for these phases | |
const ssaDumpFile = "ssa.html" | |
// The max number of defers in a function using open-coded defers. We enforce this | |
// limit because the deferBits bitmask is currently a single byte (to minimize code size) | |
const maxOpenDefers = 8 | |
// ssaDumpInlined holds all inlined functions when ssaDump contains a function name. | |
var ssaDumpInlined []*Node | |
func initssaconfig() { | |
types_ := ssa.NewTypes() | |
if thearch.SoftFloat { | |
softfloatInit() | |
} | |
// Generate a few pointer types that are uncommon in the frontend but common in the backend. | |
// Caching is disabled in the backend, so generating these here avoids allocations. | |
_ = types.NewPtr(types.Types[TINTER]) // *interface{} | |
_ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string | |
_ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{} | |
_ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte | |
_ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte | |
_ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string | |
_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8 | |
_ = types.NewPtr(types.Types[TINT16]) // *int16 | |
_ = types.NewPtr(types.Types[TINT64]) // *int64 | |
_ = types.NewPtr(types.Errortype) // *error | |
types.NewPtrCacheEnabled = false | |
ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug.N == 0) | |
ssaConfig.SoftFloat = thearch.SoftFloat | |
ssaConfig.Race = flag_race | |
ssaCaches = make([]ssa.Cache, nBackendWorkers) | |
// Set up some runtime functions we'll need to call. | |
assertE2I = sysfunc("assertE2I") | |
assertE2I2 = sysfunc("assertE2I2") | |
assertI2I = sysfunc("assertI2I") | |
assertI2I2 = sysfunc("assertI2I2") | |
deferproc = sysfunc("deferproc") | |
deferprocStack = sysfunc("deferprocStack") | |
Deferreturn = sysfunc("deferreturn") | |
Duffcopy = sysfunc("duffcopy") | |
Duffzero = sysfunc("duffzero") | |
gcWriteBarrier = sysfunc("gcWriteBarrier") | |
goschedguarded = sysfunc("goschedguarded") | |
growslice = sysfunc("growslice") | |
msanread = sysfunc("msanread") | |
msanwrite = sysfunc("msanwrite") | |
msanmove = sysfunc("msanmove") | |
newobject = sysfunc("newobject") | |
newproc = sysfunc("newproc") | |
panicdivide = sysfunc("panicdivide") | |
panicdottypeE = sysfunc("panicdottypeE") | |
panicdottypeI = sysfunc("panicdottypeI") | |
panicnildottype = sysfunc("panicnildottype") | |
panicoverflow = sysfunc("panicoverflow") | |
panicshift = sysfunc("panicshift") | |
raceread = sysfunc("raceread") | |
racereadrange = sysfunc("racereadrange") | |
racewrite = sysfunc("racewrite") | |
racewriterange = sysfunc("racewriterange") | |
x86HasPOPCNT = sysvar("x86HasPOPCNT") // bool | |
x86HasSSE41 = sysvar("x86HasSSE41") // bool | |
x86HasFMA = sysvar("x86HasFMA") // bool | |
armHasVFPv4 = sysvar("armHasVFPv4") // bool | |
arm64HasATOMICS = sysvar("arm64HasATOMICS") // bool | |
typedmemclr = sysfunc("typedmemclr") | |
typedmemmove = sysfunc("typedmemmove") | |
Udiv = sysvar("udiv") // asm func with special ABI | |
writeBarrier = sysvar("writeBarrier") // struct { bool; ... } | |
zerobaseSym = sysvar("zerobase") | |
// asm funcs with special ABI | |
if thearch.LinkArch.Name == "amd64" { | |
GCWriteBarrierReg = map[int16]*obj.LSym{ | |
x86.REG_AX: sysfunc("gcWriteBarrier"), | |
x86.REG_CX: sysfunc("gcWriteBarrierCX"), | |
x86.REG_DX: sysfunc("gcWriteBarrierDX"), | |
x86.REG_BX: sysfunc("gcWriteBarrierBX"), | |
x86.REG_BP: sysfunc("gcWriteBarrierBP"), | |
x86.REG_SI: sysfunc("gcWriteBarrierSI"), | |
x86.REG_R8: sysfunc("gcWriteBarrierR8"), | |
x86.REG_R9: sysfunc("gcWriteBarrierR9"), | |
} | |
} | |
if thearch.LinkArch.Family == sys.Wasm { | |
BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("goPanicIndex") | |
BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("goPanicIndexU") | |
BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("goPanicSliceAlen") | |
BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("goPanicSliceAlenU") | |
BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("goPanicSliceAcap") | |
BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("goPanicSliceAcapU") | |
BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("goPanicSliceB") | |
BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("goPanicSliceBU") | |
BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("goPanicSlice3Alen") | |
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("goPanicSlice3AlenU") | |
BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("goPanicSlice3Acap") | |
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("goPanicSlice3AcapU") | |
BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("goPanicSlice3B") | |
BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("goPanicSlice3BU") | |
BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("goPanicSlice3C") | |
BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("goPanicSlice3CU") | |
} else { | |
BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("panicIndex") | |
BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("panicIndexU") | |
BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("panicSliceAlen") | |
BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("panicSliceAlenU") | |
BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("panicSliceAcap") | |
BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("panicSliceAcapU") | |
BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("panicSliceB") | |
BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("panicSliceBU") | |
BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("panicSlice3Alen") | |
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("panicSlice3AlenU") | |
BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("panicSlice3Acap") | |
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("panicSlice3AcapU") | |
BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("panicSlice3B") | |
BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("panicSlice3BU") | |
BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("panicSlice3C") | |
BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("panicSlice3CU") | |
} | |
if thearch.LinkArch.PtrSize == 4 { | |
ExtendCheckFunc[ssa.BoundsIndex] = sysvar("panicExtendIndex") | |
ExtendCheckFunc[ssa.BoundsIndexU] = sysvar("panicExtendIndexU") | |
ExtendCheckFunc[ssa.BoundsSliceAlen] = sysvar("panicExtendSliceAlen") | |
ExtendCheckFunc[ssa.BoundsSliceAlenU] = sysvar("panicExtendSliceAlenU") | |
ExtendCheckFunc[ssa.BoundsSliceAcap] = sysvar("panicExtendSliceAcap") | |
ExtendCheckFunc[ssa.BoundsSliceAcapU] = sysvar("panicExtendSliceAcapU") | |
ExtendCheckFunc[ssa.BoundsSliceB] = sysvar("panicExtendSliceB") | |
ExtendCheckFunc[ssa.BoundsSliceBU] = sysvar("panicExtendSliceBU") | |
ExtendCheckFunc[ssa.BoundsSlice3Alen] = sysvar("panicExtendSlice3Alen") | |
ExtendCheckFunc[ssa.BoundsSlice3AlenU] = sysvar("panicExtendSlice3AlenU") | |
ExtendCheckFunc[ssa.BoundsSlice3Acap] = sysvar("panicExtendSlice3Acap") | |
ExtendCheckFunc[ssa.BoundsSlice3AcapU] = sysvar("panicExtendSlice3AcapU") | |
ExtendCheckFunc[ssa.BoundsSlice3B] = sysvar("panicExtendSlice3B") | |
ExtendCheckFunc[ssa.BoundsSlice3BU] = sysvar("panicExtendSlice3BU") | |
ExtendCheckFunc[ssa.BoundsSlice3C] = sysvar("panicExtendSlice3C") | |
ExtendCheckFunc[ssa.BoundsSlice3CU] = sysvar("panicExtendSlice3CU") | |
} | |
// Wasm (all asm funcs with special ABIs) | |
WasmMove = sysvar("wasmMove") | |
WasmZero = sysvar("wasmZero") | |
WasmDiv = sysvar("wasmDiv") | |
WasmTruncS = sysvar("wasmTruncS") | |
WasmTruncU = sysvar("wasmTruncU") | |
SigPanic = sysfunc("sigpanic") | |
} | |
// getParam returns the Field of ith param of node n (which is a | |
// function/method/interface call), where the receiver of a method call is | |
// considered as the 0th parameter. This does not include the receiver of an | |
// interface call. | |
func getParam(n *Node, i int) *types.Field { | |
t := n.Left.Type | |
if n.Op == OCALLMETH { | |
if i == 0 { | |
return t.Recv() | |
} | |
return t.Params().Field(i - 1) | |
} | |
return t.Params().Field(i) | |
} | |
// dvarint writes a varint v to the funcdata in symbol x and returns the new offset | |
func dvarint(x *obj.LSym, off int, v int64) int { | |
if v < 0 || v > 1e9 { | |
panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v)) | |
} | |
if v < 1<<7 { | |
return duint8(x, off, uint8(v)) | |
} | |
off = duint8(x, off, uint8((v&127)|128)) | |
if v < 1<<14 { | |
return duint8(x, off, uint8(v>>7)) | |
} | |
off = duint8(x, off, uint8(((v>>7)&127)|128)) | |
if v < 1<<21 { | |
return duint8(x, off, uint8(v>>14)) | |
} | |
off = duint8(x, off, uint8(((v>>14)&127)|128)) | |
if v < 1<<28 { | |
return duint8(x, off, uint8(v>>21)) | |
} | |
off = duint8(x, off, uint8(((v>>21)&127)|128)) | |
return duint8(x, off, uint8(v>>28)) | |
} | |
// emitOpenDeferInfo emits FUNCDATA information about the defers in a function | |
// that is using open-coded defers. This funcdata is used to determine the active | |
// defers in a function and execute those defers during panic processing. | |
// | |
// The funcdata is all encoded in varints (since values will almost always be less than | |
// 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets) | |
// for stack variables are specified as the number of bytes below varp (pointer to the | |
// top of the local variables) for their starting address. The format is: | |
// | |
// - Max total argument size among all the defers | |
// - Offset of the deferBits variable | |
// - Number of defers in the function | |
// - Information about each defer call, in reverse order of appearance in the function: | |
// - Total argument size of the call | |
// - Offset of the closure value to call | |
// - Number of arguments (including interface receiver or method receiver as first arg) | |
// - Information about each argument | |
// - Offset of the stored defer argument in this function's frame | |
// - Size of the argument | |
// - Offset of where argument should be placed in the args frame when making call | |
func (s *state) emitOpenDeferInfo() { | |
x := Ctxt.Lookup(s.curfn.Func.lsym.Name + ".opendefer") | |
s.curfn.Func.lsym.Func().OpenCodedDeferInfo = x | |
off := 0 | |
// Compute maxargsize (max size of arguments for all defers) | |
// first, so we can output it first to the funcdata | |
var maxargsize int64 | |
for i := len(s.openDefers) - 1; i >= 0; i-- { | |
r := s.openDefers[i] | |
argsize := r.n.Left.Type.ArgWidth() | |
if argsize > maxargsize { | |
maxargsize = argsize | |
} | |
} | |
off = dvarint(x, off, maxargsize) | |
off = dvarint(x, off, -s.deferBitsTemp.Xoffset) | |
off = dvarint(x, off, int64(len(s.openDefers))) | |
// Write in reverse-order, for ease of running in that order at runtime | |
for i := len(s.openDefers) - 1; i >= 0; i-- { | |
r := s.openDefers[i] | |
off = dvarint(x, off, r.n.Left.Type.ArgWidth()) | |
off = dvarint(x, off, -r.closureNode.Xoffset) | |
numArgs := len(r.argNodes) | |
if r.rcvrNode != nil { | |
// If there's an interface receiver, treat/place it as the first | |
// arg. (If there is a method receiver, it's already included as | |
// first arg in r.argNodes.) | |
numArgs++ | |
} | |
off = dvarint(x, off, int64(numArgs)) | |
if r.rcvrNode != nil { | |
off = dvarint(x, off, -r.rcvrNode.Xoffset) | |
off = dvarint(x, off, s.config.PtrSize) | |
off = dvarint(x, off, 0) | |
} | |
for j, arg := range r.argNodes { | |
f := getParam(r.n, j) | |
off = dvarint(x, off, -arg.Xoffset) | |
off = dvarint(x, off, f.Type.Size()) | |
off = dvarint(x, off, f.Offset) | |
} | |
} | |
} | |
// buildssa builds an SSA function for fn. | |
// worker indicates which of the backend workers is doing the processing. | |
func buildssa(fn *Node, worker int) *ssa.Func { | |
name := fn.funcname() | |
printssa := false | |
if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset" | |
printssa = name == ssaDump || myimportpath+"."+name == ssaDump | |
} | |
var astBuf *bytes.Buffer | |
if printssa { | |
astBuf = &bytes.Buffer{} | |
fdumplist(astBuf, "buildssa-enter", fn.Func.Enter) | |
fdumplist(astBuf, "buildssa-body", fn.Nbody) | |
fdumplist(astBuf, "buildssa-exit", fn.Func.Exit) | |
if ssaDumpStdout { | |
fmt.Println("generating SSA for", name) | |
fmt.Print(astBuf.String()) | |
} | |
} | |
var s state | |
s.pushLine(fn.Pos) | |
defer s.popLine() | |
s.hasdefer = fn.Func.HasDefer() | |
if fn.Func.Pragma&CgoUnsafeArgs != 0 { | |
s.cgoUnsafeArgs = true | |
} | |
fe := ssafn{ | |
curfn: fn, | |
log: printssa && ssaDumpStdout, | |
} | |
s.curfn = fn | |
s.f = ssa.NewFunc(&fe) | |
s.config = ssaConfig | |
s.f.Type = fn.Type | |
s.f.Config = ssaConfig | |
s.f.Cache = &ssaCaches[worker] | |
s.f.Cache.Reset() | |
s.f.Name = name | |
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH") | |
s.f.PrintOrHtmlSSA = printssa | |
if fn.Func.Pragma&Nosplit != 0 { | |
s.f.NoSplit = true | |
} | |
s.panics = map[funcLine]*ssa.Block{} | |
s.softFloat = s.config.SoftFloat | |
// Allocate starting block | |
s.f.Entry = s.f.NewBlock(ssa.BlockPlain) | |
s.f.Entry.Pos = fn.Pos | |
if printssa { | |
ssaDF := ssaDumpFile | |
if ssaDir != "" { | |
ssaDF = filepath.Join(ssaDir, myimportpath+"."+name+".html") | |
ssaD := filepath.Dir(ssaDF) | |
os.MkdirAll(ssaD, 0755) | |
} | |
s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG) | |
// TODO: generate and print a mapping from nodes to values and blocks | |
dumpSourcesColumn(s.f.HTMLWriter, fn) | |
s.f.HTMLWriter.WriteAST("AST", astBuf) | |
} | |
// Allocate starting values | |
s.labels = map[string]*ssaLabel{} | |
s.labeledNodes = map[*Node]*ssaLabel{} | |
s.fwdVars = map[*Node]*ssa.Value{} | |
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) | |
s.hasOpenDefers = Debug.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed() | |
switch { | |
case s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386": | |
// Don't support open-coded defers for 386 ONLY when using shared | |
// libraries, because there is extra code (added by rewriteToUseGot()) | |
// preceding the deferreturn/ret code that is generated by gencallret() | |
// that we don't track correctly. | |
s.hasOpenDefers = false | |
} | |
if s.hasOpenDefers && s.curfn.Func.Exit.Len() > 0 { | |
// Skip doing open defers if there is any extra exit code (likely | |
// copying heap-allocated return values or race detection), since | |
// we will not generate that code in the case of the extra | |
// deferreturn/ret segment. | |
s.hasOpenDefers = false | |
} | |
if s.hasOpenDefers && | |
s.curfn.Func.numReturns*s.curfn.Func.numDefers > 15 { | |
// Since we are generating defer calls at every exit for | |
// open-coded defers, skip doing open-coded defers if there are | |
// too many returns (especially if there are multiple defers). | |
// Open-coded defers are most important for improving performance | |
// for smaller functions (which don't have many returns). | |
s.hasOpenDefers = false | |
} | |
s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead | |
s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR]) | |
s.startBlock(s.f.Entry) | |
s.vars[&memVar] = s.startmem | |
if s.hasOpenDefers { | |
// Create the deferBits variable and stack slot. deferBits is a | |
// bitmask showing which of the open-coded defers in this function | |
// have been activated. | |
deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[TUINT8]) | |
s.deferBitsTemp = deferBitsTemp | |
// For this value, AuxInt is initialized to zero by default | |
startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[TUINT8]) | |
s.vars[&deferBitsVar] = startDeferBits | |
s.deferBitsAddr = s.addr(deferBitsTemp) | |
s.store(types.Types[TUINT8], s.deferBitsAddr, startDeferBits) | |
// Make sure that the deferBits stack slot is kept alive (for use | |
// by panics) and stores to deferBits are not eliminated, even if | |
// all checking code on deferBits in the function exit can be | |
// eliminated, because the defer statements were all | |
// unconditional. | |
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false) | |
} | |
// Generate addresses of local declarations | |
s.decladdrs = map[*Node]*ssa.Value{} | |
var args []ssa.Param | |
var results []ssa.Param | |
for _, n := range fn.Func.Dcl { | |
switch n.Class() { | |
case PPARAM: | |
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem) | |
args = append(args, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)}) | |
case PPARAMOUT: | |
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem) | |
results = append(results, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)}) | |
if s.canSSA(n) { | |
// Save ssa-able PPARAMOUT variables so we can | |
// store them back to the stack at the end of | |
// the function. | |
s.returns = append(s.returns, n) | |
} | |
case PAUTO: | |
// processed at each use, to prevent Addr coming | |
// before the decl. | |
case PAUTOHEAP: | |
// moved to heap - already handled by frontend | |
case PFUNC: | |
// local function - already handled by frontend | |
default: | |
s.Fatalf("local variable with class %v unimplemented", n.Class()) | |
} | |
} | |
// Populate SSAable arguments. | |
for _, n := range fn.Func.Dcl { | |
if n.Class() == PPARAM && s.canSSA(n) { | |
v := s.newValue0A(ssa.OpArg, n.Type, n) | |
s.vars[n] = v | |
s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself. | |
} | |
} | |
// Convert the AST-based IR to the SSA-based IR | |
s.stmtList(fn.Func.Enter) | |
s.stmtList(fn.Nbody) | |
// fallthrough to exit | |
if s.curBlock != nil { | |
s.pushLine(fn.Func.Endlineno) | |
s.exit() | |
s.popLine() | |
} | |
for _, b := range s.f.Blocks { | |
if b.Pos != src.NoXPos { | |
s.updateUnsetPredPos(b) | |
} | |
} | |
s.insertPhis() | |
// Main call to ssa package to compile function | |
ssa.Compile(s.f) | |
if s.hasOpenDefers { | |
s.emitOpenDeferInfo() | |
} | |
return s.f | |
} | |
func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *Node) { | |
// Read sources of target function fn. | |
fname := Ctxt.PosTable.Pos(fn.Pos).Filename() | |
targetFn, err := readFuncLines(fname, fn.Pos.Line(), fn.Func.Endlineno.Line()) | |
if err != nil { | |
writer.Logf("cannot read sources for function %v: %v", fn, err) | |
} | |
// Read sources of inlined functions. | |
var inlFns []*ssa.FuncLines | |
for _, fi := range ssaDumpInlined { | |
var elno src.XPos | |
if fi.Name.Defn == nil { | |
// Endlineno is filled from exported data. | |
elno = fi.Func.Endlineno | |
} else { | |
elno = fi.Name.Defn.Func.Endlineno | |
} | |
fname := Ctxt.PosTable.Pos(fi.Pos).Filename() | |
fnLines, err := readFuncLines(fname, fi.Pos.Line(), elno.Line()) | |
if err != nil { | |
writer.Logf("cannot read sources for inlined function %v: %v", fi, err) | |
continue | |
} | |
inlFns = append(inlFns, fnLines) | |
} | |
sort.Sort(ssa.ByTopo(inlFns)) | |
if targetFn != nil { | |
inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...) | |
} | |
writer.WriteSources("sources", inlFns) | |
} | |
func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) { | |
f, err := os.Open(os.ExpandEnv(file)) | |
if err != nil { | |
return nil, err | |
} | |
defer f.Close() | |
var lines []string | |
ln := uint(1) | |
scanner := bufio.NewScanner(f) | |
for scanner.Scan() && ln <= end { | |
if ln >= start { | |
lines = append(lines, scanner.Text()) | |
} | |
ln++ | |
} | |
return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil | |
} | |
// updateUnsetPredPos propagates the earliest-value position information for b | |
// towards all of b's predecessors that need a position, and recurs on that | |
// predecessor if its position is updated. B should have a non-empty position. | |
func (s *state) updateUnsetPredPos(b *ssa.Block) { | |
if b.Pos == src.NoXPos { | |
s.Fatalf("Block %s should have a position", b) | |
} | |
bestPos := src.NoXPos | |
for _, e := range b.Preds { | |
p := e.Block() | |
if !p.LackingPos() { | |
continue | |
} | |
if bestPos == src.NoXPos { | |
bestPos = b.Pos | |
for _, v := range b.Values { | |
if v.LackingPos() { | |
continue | |
} | |
if v.Pos != src.NoXPos { | |
// Assume values are still in roughly textual order; | |
// TODO: could also seek minimum position? | |
bestPos = v.Pos | |
break | |
} | |
} | |
} | |
p.Pos = bestPos | |
s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay. | |
} | |
} | |
// Information about each open-coded defer. | |
type openDeferInfo struct { | |
// The ODEFER node representing the function call of the defer | |
n *Node | |
// If defer call is closure call, the address of the argtmp where the | |
// closure is stored. | |
closure *ssa.Value | |
// The node representing the argtmp where the closure is stored - used for | |
// function, method, or interface call, to store a closure that panic | |
// processing can use for this defer. | |
closureNode *Node | |
// If defer call is interface call, the address of the argtmp where the | |
// receiver is stored | |
rcvr *ssa.Value | |
// The node representing the argtmp where the receiver is stored | |
rcvrNode *Node | |
// The addresses of the argtmps where the evaluated arguments of the defer | |
// function call are stored. | |
argVals []*ssa.Value | |
// The nodes representing the argtmps where the args of the defer are stored | |
argNodes []*Node | |
} | |
type state struct { | |
// configuration (arch) information | |
config *ssa.Config | |
// function we're building | |
f *ssa.Func | |
// Node for function | |
curfn *Node | |
// labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f | |
labels map[string]*ssaLabel | |
labeledNodes map[*Node]*ssaLabel | |
// unlabeled break and continue statement tracking | |
breakTo *ssa.Block // current target for plain break statement | |
continueTo *ssa.Block // current target for plain continue statement | |
// current location where we're interpreting the AST | |
curBlock *ssa.Block | |
// variable assignments in the current block (map from variable symbol to ssa value) | |
// *Node is the unique identifier (an ONAME Node) for the variable. | |
// TODO: keep a single varnum map, then make all of these maps slices instead? | |
vars map[*Node]*ssa.Value | |
// fwdVars are variables that are used before they are defined in the current block. | |
// This map exists just to coalesce multiple references into a single FwdRef op. | |
// *Node is the unique identifier (an ONAME Node) for the variable. | |
fwdVars map[*Node]*ssa.Value | |
// all defined variables at the end of each block. Indexed by block ID. | |
defvars []map[*Node]*ssa.Value | |
// addresses of PPARAM and PPARAMOUT variables. | |
decladdrs map[*Node]*ssa.Value | |
// starting values. Memory, stack pointer, and globals pointer | |
startmem *ssa.Value | |
sp *ssa.Value | |
sb *ssa.Value | |
// value representing address of where deferBits autotmp is stored | |
deferBitsAddr *ssa.Value | |
deferBitsTemp *Node | |
// line number stack. The current line number is top of stack | |
line []src.XPos | |
// the last line number processed; it may have been popped | |
lastPos src.XPos | |
// list of panic calls by function name and line number. | |
// Used to deduplicate panic calls. | |
panics map[funcLine]*ssa.Block | |
// list of PPARAMOUT (return) variables. | |
returns []*Node | |
cgoUnsafeArgs bool | |
hasdefer bool // whether the function contains a defer statement | |
softFloat bool | |
hasOpenDefers bool // whether we are doing open-coded defers | |
// If doing open-coded defers, list of info about the defer calls in | |
// scanning order. Hence, at exit we should run these defers in reverse | |
// order of this list | |
openDefers []*openDeferInfo | |
// For open-coded defers, this is the beginning and end blocks of the last | |
// defer exit code that we have generated so far. We use these to share | |
// code between exits if the shareDeferExits option (disabled by default) | |
// is on. | |
lastDeferExit *ssa.Block // Entry block of last defer exit code we generated | |
lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated | |
lastDeferCount int // Number of defers encountered at that point | |
prevCall *ssa.Value // the previous call; use this to tie results to the call op. | |
} | |
type funcLine struct { | |
f *obj.LSym | |
base *src.PosBase | |
line uint | |
} | |
type ssaLabel struct { | |
target *ssa.Block // block identified by this label | |
breakTarget *ssa.Block // block to break to in control flow node identified by this label | |
continueTarget *ssa.Block // block to continue to in control flow node identified by this label | |
} | |
// label returns the label associated with sym, creating it if necessary. | |
func (s *state) label(sym *types.Sym) *ssaLabel { | |
lab := s.labels[sym.Name] | |
if lab == nil { | |
lab = new(ssaLabel) | |
s.labels[sym.Name] = lab | |
} | |
return lab | |
} | |
func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) } | |
func (s *state) Log() bool { return s.f.Log() } | |
func (s *state) Fatalf(msg string, args ...interface{}) { | |
s.f.Frontend().Fatalf(s.peekPos(), msg, args...) | |
} | |
func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } | |
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } | |
var ( | |
// dummy node for the memory variable | |
memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}} | |
// dummy nodes for temporary variables | |
ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}} | |
lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}} | |
newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}} | |
capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}} | |
typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}} | |
okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}} | |
deferBitsVar = Node{Op: ONAME, Sym: &types.Sym{Name: "deferBits"}} | |
) | |
// startBlock sets the current block we're generating code in to b. | |
func (s *state) startBlock(b *ssa.Block) { | |
if s.curBlock != nil { | |
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) | |
} | |
s.curBlock = b | |
s.vars = map[*Node]*ssa.Value{} | |
for n := range s.fwdVars { | |
delete(s.fwdVars, n) | |
} | |
} | |
// endBlock marks the end of generating code for the current block. | |
// Returns the (former) current block. Returns nil if there is no current | |
// block, i.e. if no code flows to the current execution point. | |
func (s *state) endBlock() *ssa.Block { | |
b := s.curBlock | |
if b == nil { | |
return nil | |
} | |
for len(s.defvars) <= int(b.ID) { | |
s.defvars = append(s.defvars, nil) | |
} | |
s.defvars[b.ID] = s.vars | |
s.curBlock = nil | |
s.vars = nil | |
if b.LackingPos() { | |
// Empty plain blocks get the line of their successor (handled after all blocks created), | |
// except for increment blocks in For statements (handled in ssa conversion of OFOR), | |
// and for blocks ending in GOTO/BREAK/CONTINUE. | |
b.Pos = src.NoXPos | |
} else { | |
b.Pos = s.lastPos | |
} | |
return b | |
} | |
// pushLine pushes a line number on the line number stack. | |
func (s *state) pushLine(line src.XPos) { | |
if !line.IsKnown() { | |
// the frontend may emit node with line number missing, | |
// use the parent line number in this case. | |
line = s.peekPos() | |
if Debug.K != 0 { | |
Warn("buildssa: unknown position (line 0)") | |
} | |
} else { | |
s.lastPos = line | |
} | |
s.line = append(s.line, line) | |
} | |
// popLine pops the top of the line number stack. | |
func (s *state) popLine() { | |
s.line = s.line[:len(s.line)-1] | |
} | |
// peekPos peeks the top of the line number stack. | |
func (s *state) peekPos() src.XPos { | |
return s.line[len(s.line)-1] | |
} | |
// newValue0 adds a new value with no arguments to the current block. | |
func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value { | |
return s.curBlock.NewValue0(s.peekPos(), op, t) | |
} | |
// newValue0A adds a new value with no arguments and an aux value to the current block. | |
func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value { | |
return s.curBlock.NewValue0A(s.peekPos(), op, t, aux) | |
} | |
// newValue0I adds a new value with no arguments and an auxint value to the current block. | |
func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value { | |
return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint) | |
} | |
// newValue1 adds a new value with one argument to the current block. | |
func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { | |
return s.curBlock.NewValue1(s.peekPos(), op, t, arg) | |
} | |
// newValue1A adds a new value with one argument and an aux value to the current block. | |
func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value { | |
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) | |
} | |
// newValue1Apos adds a new value with one argument and an aux value to the current block. | |
// isStmt determines whether the created values may be a statement or not | |
// (i.e., false means never, yes means maybe). | |
func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value, isStmt bool) *ssa.Value { | |
if isStmt { | |
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) | |
} | |
return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg) | |
} | |
// newValue1I adds a new value with one argument and an auxint value to the current block. | |
func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value { | |
return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg) | |
} | |
// newValue2 adds a new value with two arguments to the current block. | |
func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { | |
return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1) | |
} | |
// newValue2A adds a new value with two arguments and an aux value to the current block. | |
func (s *state) newValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value { | |
return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1) | |
} | |
// newValue2Apos adds a new value with two arguments and an aux value to the current block. | |
// isStmt determines whether the created values may be a statement or not | |
// (i.e., false means never, yes means maybe). | |
func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value { | |
if isStmt { | |
return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1) | |
} | |
return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1) | |
} | |
// newValue2I adds a new value with two arguments and an auxint value to the current block. | |
func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { | |
return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1) | |
} | |
// newValue3 adds a new value with three arguments to the current block. | |
func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { | |
return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2) | |
} | |
// newValue3I adds a new value with three arguments and an auxint value to the current block. | |
func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { | |
return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2) | |
} | |
// newValue3A adds a new value with three arguments and an aux value to the current block. | |
func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value { | |
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2) | |
} | |
// newValue3Apos adds a new value with three arguments and an aux value to the current block. | |
// isStmt determines whether the created values may be a statement or not | |
// (i.e., false means never, yes means maybe). | |
func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value { | |
if isStmt { | |
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2) | |
} | |
return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2) | |
} | |
// newValue4 adds a new value with four arguments to the current block. | |
func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { | |
return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3) | |
} | |
// newValue4 adds a new value with four arguments and an auxint value to the current block. | |
func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { | |
return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3) | |
} | |
// entryNewValue0 adds a new value with no arguments to the entry block. | |
func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value { | |
return s.f.Entry.NewValue0(src.NoXPos, op, t) | |
} | |
// entryNewValue0A adds a new value with no arguments and an aux value to the entry block. | |
func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value { | |
return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux) | |
} | |
// entryNewValue1 adds a new value with one argument to the entry block. | |
func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { | |
return s.f.Entry.NewValue1(src.NoXPos, op, t, arg) | |
} | |
// entryNewValue1 adds a new value with one argument and an auxint value to the entry block. | |
func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value { | |
return s.f.Entry.NewValue1I(src.NoXPos, op, t, auxint, arg) | |
} | |
// entryNewValue1A adds a new value with one argument and an aux value to the entry block. | |
func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value { | |
return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg) | |
} | |
// entryNewValue2 adds a new value with two arguments to the entry block. | |
func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { | |
return s.f.Entry.NewValue2(src.NoXPos, op, t, arg0, arg1) | |
} | |
// entryNewValue2A adds a new value with two arguments and an aux value to the entry block. | |
func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value { | |
return s.f.Entry.NewValue2A(src.NoXPos, op, t, aux, arg0, arg1) | |
} | |
// const* routines add a new const value to the entry block. | |
func (s *state) constSlice(t *types.Type) *ssa.Value { | |
return s.f.ConstSlice(t) | |
} | |
func (s *state) constInterface(t *types.Type) *ssa.Value { | |
return s.f.ConstInterface(t) | |
} | |
func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) } | |
func (s *state) constEmptyString(t *types.Type) *ssa.Value { | |
return s.f.ConstEmptyString(t) | |
} | |
func (s *state) constBool(c bool) *ssa.Value { | |
return s.f.ConstBool(types.Types[TBOOL], c) | |
} | |
func (s *state) constInt8(t *types.Type, c int8) *ssa.Value { | |
return s.f.ConstInt8(t, c) | |
} | |
func (s *state) constInt16(t *types.Type, c int16) *ssa.Value { | |
return s.f.ConstInt16(t, c) | |
} | |
func (s *state) constInt32(t *types.Type, c int32) *ssa.Value { | |
return s.f.ConstInt32(t, c) | |
} | |
func (s *state) constInt64(t *types.Type, c int64) *ssa.Value { | |
return s.f.ConstInt64(t, c) | |
} | |
func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value { | |
return s.f.ConstFloat32(t, c) | |
} | |
func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value { | |
return s.f.ConstFloat64(t, c) | |
} | |
func (s *state) constInt(t *types.Type, c int64) *ssa.Value { | |
if s.config.PtrSize == 8 { | |
return s.constInt64(t, c) | |
} | |
if int64(int32(c)) != c { | |
s.Fatalf("integer constant too big %d", c) | |
} | |
return s.constInt32(t, int32(c)) | |
} | |
func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value { | |
return s.f.ConstOffPtrSP(t, c, s.sp) | |
} | |
// newValueOrSfCall* are wrappers around newValue*, which may create a call to a | |
// soft-float runtime function instead (when emitting soft-float code). | |
func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { | |
if s.softFloat { | |
if c, ok := s.sfcall(op, arg); ok { | |
return c | |
} | |
} | |
return s.newValue1(op, t, arg) | |
} | |
func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { | |
if s.softFloat { | |
if c, ok := s.sfcall(op, arg0, arg1); ok { | |
return c | |
} | |
} | |
return s.newValue2(op, t, arg0, arg1) | |
} | |
type instrumentKind uint8 | |
const ( | |
instrumentRead = iota | |
instrumentWrite | |
instrumentMove | |
) | |
func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) { | |
s.instrument2(t, addr, nil, kind) | |
} | |
// instrumentFields instruments a read/write operation on addr. | |
// If it is instrumenting for MSAN and t is a struct type, it instruments | |
// operation for each field, instead of for the whole struct. | |
func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) { | |
if !flag_msan || !t.IsStruct() { | |
s.instrument(t, addr, kind) | |
return | |
} | |
for _, f := range t.Fields().Slice() { | |
if f.Sym.IsBlank() { | |
continue | |
} | |
offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), f.Offset, addr) | |
s.instrumentFields(f.Type, offptr, kind) | |
} | |
} | |
func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) { | |
if flag_msan { | |
s.instrument2(t, dst, src, instrumentMove) | |
} else { | |
s.instrument(t, src, instrumentRead) | |
s.instrument(t, dst, instrumentWrite) | |
} | |
} | |
func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) { | |
if !s.curfn.Func.InstrumentBody() { | |
return | |
} | |
w := t.Size() | |
if w == 0 { | |
return // can't race on zero-sized things | |
} | |
if ssa.IsSanitizerSafeAddr(addr) { | |
return | |
} | |
var fn *obj.LSym | |
needWidth := false | |
if addr2 != nil && kind != instrumentMove { | |
panic("instrument2: non-nil addr2 for non-move instrumentation") | |
} | |
if flag_msan { | |
switch kind { | |
case instrumentRead: | |
fn = msanread | |
case instrumentWrite: | |
fn = msanwrite | |
case instrumentMove: | |
fn = msanmove | |
default: | |
panic("unreachable") | |
} | |
needWidth = true | |
} else if flag_race && t.NumComponents(types.CountBlankFields) > 1 { | |
// for composite objects we have to write every address | |
// because a write might happen to any subobject. | |
// composites with only one element don't have subobjects, though. | |
switch kind { | |
case instrumentRead: | |
fn = racereadrange | |
case instrumentWrite: | |
fn = racewriterange | |
default: | |
panic("unreachable") | |
} | |
needWidth = true | |
} else if flag_race { | |
// for non-composite objects we can write just the start | |
// address, as any write must write the first byte. | |
switch kind { | |
case instrumentRead: | |
fn = raceread | |
case instrumentWrite: | |
fn = racewrite | |
default: | |
panic("unreachable") | |
} | |
} else { | |
panic("unreachable") | |
} | |
args := []*ssa.Value{addr} | |
if addr2 != nil { | |
args = append(args, addr2) | |
} | |
if needWidth { | |
args = append(args, s.constInt(types.Types[TUINTPTR], w)) | |
} | |
s.rtcall(fn, true, nil, args...) | |
} | |
func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value { | |
s.instrumentFields(t, src, instrumentRead) | |
return s.rawLoad(t, src) | |
} | |
func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value { | |
return s.newValue2(ssa.OpLoad, t, src, s.mem()) | |
} | |
func (s *state) store(t *types.Type, dst, val *ssa.Value) { | |
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem()) | |
} | |
func (s *state) zero(t *types.Type, dst *ssa.Value) { | |
s.instrument(t, dst, instrumentWrite) | |
store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem()) | |
store.Aux = t | |
s.vars[&memVar] = store | |
} | |
func (s *state) move(t *types.Type, dst, src *ssa.Value) { | |
s.instrumentMove(t, dst, src) | |
store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem()) | |
store.Aux = t | |
s.vars[&memVar] = store | |
} | |
// stmtList converts the statement list n to SSA and adds it to s. | |
func (s *state) stmtList(l Nodes) { | |
for _, n := range l.Slice() { | |
s.stmt(n) | |
} | |
} | |
// stmt converts the statement n to SSA and adds it to s. | |
func (s *state) stmt(n *Node) { | |
if !(n.Op == OVARKILL || n.Op == OVARLIVE || n.Op == OVARDEF) { | |
// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging. | |
s.pushLine(n.Pos) | |
defer s.popLine() | |
} | |
// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere), | |
// then this code is dead. Stop here. | |
if s.curBlock == nil && n.Op != OLABEL { | |
return | |
} | |
s.stmtList(n.Ninit) | |
switch n.Op { | |
case OBLOCK: | |
s.stmtList(n.List) | |
// No-ops | |
case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: | |
// Expression statements | |
case OCALLFUNC: | |
if isIntrinsicCall(n) { | |
s.intrinsicCall(n) | |
return | |
} | |
fallthrough | |
case OCALLMETH, OCALLINTER: | |
s.callResult(n, callNormal) | |
if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC { | |
if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" || | |
n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") { | |
m := s.mem() | |
b := s.endBlock() | |
b.Kind = ssa.BlockExit | |
b.SetControl(m) | |
// TODO: never rewrite OPANIC to OCALLFUNC in the | |
// first place. Need to wait until all backends | |
// go through SSA. | |
} | |
} | |
case ODEFER: | |
if Debug_defer > 0 { | |
var defertype string | |
if s.hasOpenDefers { | |
defertype = "open-coded" | |
} else if n.Esc == EscNever { | |
defertype = "stack-allocated" | |
} else { | |
defertype = "heap-allocated" | |
} | |
Warnl(n.Pos, "%s defer", defertype) | |
} | |
if s.hasOpenDefers { | |
s.openDeferRecord(n.Left) | |
} else { | |
d := callDefer | |
if n.Esc == EscNever { | |
d = callDeferStack | |
} | |
s.callResult(n.Left, d) | |
} | |
case OGO: | |
s.callResult(n.Left, callGo) | |
case OAS2DOTTYPE: | |
res, resok := s.dottype(n.Right, true) | |
deref := false | |
if !canSSAType(n.Right.Type) { | |
if res.Op != ssa.OpLoad { | |
s.Fatalf("dottype of non-load") | |
} | |
mem := s.mem() | |
if mem.Op == ssa.OpVarKill { | |
mem = mem.Args[0] | |
} | |
if res.Args[1] != mem { | |
s.Fatalf("memory no longer live from 2-result dottype load") | |
} | |
deref = true | |
res = res.Args[0] | |
} | |
s.assign(n.List.First(), res, deref, 0) | |
s.assign(n.List.Second(), resok, false, 0) | |
return | |
case OAS2FUNC: | |
// We come here only when it is an intrinsic call returning two values. | |
if !isIntrinsicCall(n.Right) { | |
s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right) | |
} | |
v := s.intrinsicCall(n.Right) | |
v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v) | |
v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v) | |
s.assign(n.List.First(), v1, false, 0) | |
s.assign(n.List.Second(), v2, false, 0) | |
return | |
case ODCL: | |
if n.Left.Class() == PAUTOHEAP { | |
s.Fatalf("DCL %v", n) | |
} | |
case OLABEL: | |
sym := n.Sym | |
lab := s.label(sym) | |
// Associate label with its control flow node, if any | |
if ctl := n.labeledControl(); ctl != nil { | |
s.labeledNodes[ctl] = lab | |
} | |
// The label might already have a target block via a goto. | |
if lab.target == nil { | |
lab.target = s.f.NewBlock(ssa.BlockPlain) | |
} | |
// Go to that label. | |
// (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.) | |
if s.curBlock != nil { | |
b := s.endBlock() | |
b.AddEdgeTo(lab.target) | |
} | |
s.startBlock(lab.target) | |
case OGOTO: | |
sym := n.Sym | |
lab := s.label(sym) | |
if lab.target == nil { | |
lab.target = s.f.NewBlock(ssa.BlockPlain) | |
} | |
b := s.endBlock() | |
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block. | |
b.AddEdgeTo(lab.target) | |
case OAS: | |
if n.Left == n.Right && n.Left.Op == ONAME { | |
// An x=x assignment. No point in doing anything | |
// here. In addition, skipping this assignment | |
// prevents generating: | |
// VARDEF x | |
// COPY x -> x | |
// which is bad because x is incorrectly considered | |
// dead before the vardef. See issue #14904. | |
return | |
} | |
// Evaluate RHS. | |
rhs := n.Right | |
if rhs != nil { | |
switch rhs.Op { | |
case OSTRUCTLIT, OARRAYLIT, OSLICELIT: | |
// All literals with nonzero fields have already been | |
// rewritten during walk. Any that remain are just T{} | |
// or equivalents. Use the zero value. | |
if !isZero(rhs) { | |
s.Fatalf("literal with nonzero value in SSA: %v", rhs) | |
} | |
rhs = nil | |
case OAPPEND: | |
// Check whether we're writing the result of an append back to the same slice. | |
// If so, we handle it specially to avoid write barriers on the fast | |
// (non-growth) path. | |
if !samesafeexpr(n.Left, rhs.List.First()) || Debug.N != 0 { | |
break | |
} | |
// If the slice can be SSA'd, it'll be on the stack, | |
// so there will be no write barriers, | |
// so there's no need to attempt to prevent them. | |
if s.canSSA(n.Left) { | |
if Debug_append > 0 { // replicating old diagnostic message | |
Warnl(n.Pos, "append: len-only update (in local slice)") | |
} | |
break | |
} | |
if Debug_append > 0 { | |
Warnl(n.Pos, "append: len-only update") | |
} | |
s.append(rhs, true) | |
return | |
} | |
} | |
if n.Left.isBlank() { | |
// _ = rhs | |
// Just evaluate rhs for side-effects. | |
if rhs != nil { | |
s.expr(rhs) | |
} | |
return | |
} | |
var t *types.Type | |
if n.Right != nil { | |
t = n.Right.Type | |
} else { | |
t = n.Left.Type | |
} | |
var r *ssa.Value | |
deref := !canSSAType(t) | |
if deref { | |
if rhs == nil { | |
r = nil // Signal assign to use OpZero. | |
} else { | |
r = s.addr(rhs) | |
} | |
} else { | |
if rhs == nil { | |
r = s.zeroVal(t) | |
} else { | |
r = s.expr(rhs) | |
} | |
} | |
var skip skipMask | |
if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { | |
// We're assigning a slicing operation back to its source. | |
// Don't write back fields we aren't changing. See issue #14855. | |
i, j, k := rhs.SliceBounds() | |
if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64Val() == 0) { | |
// [0:...] is the same as [:...] | |
i = nil | |
} | |
// TODO: detect defaults for len/cap also. | |
// Currently doesn't really work because (*p)[:len(*p)] appears here as: | |
// tmp = len(*p) | |
// (*p)[:tmp] | |
//if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { | |
// j = nil | |
//} | |
//if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { | |
// k = nil | |
//} | |
if i == nil { | |
skip |= skipPtr | |
if j == nil { | |
skip |= skipLen | |
} | |
if k == nil { | |
skip |= skipCap | |
} | |
} | |
} | |
s.assign(n.Left, r, deref, skip) | |
case OIF: | |
if Isconst(n.Left, CTBOOL) { | |
s.stmtList(n.Left.Ninit) | |
if n.Left.BoolVal() { | |
s.stmtList(n.Nbody) | |
} else { | |
s.stmtList(n.Rlist) | |
} | |
break | |
} | |
bEnd := s.f.NewBlock(ssa.BlockPlain) | |
var likely int8 | |
if n.Likely() { | |
likely = 1 | |
} | |
var bThen *ssa.Block | |
if n.Nbody.Len() != 0 { | |
bThen = s.f.NewBlock(ssa.BlockPlain) | |
} else { | |
bThen = bEnd | |
} | |
var bElse *ssa.Block | |
if n.Rlist.Len() != 0 { | |
bElse = s.f.NewBlock(ssa.BlockPlain) | |
} else { | |
bElse = bEnd | |
} | |
s.condBranch(n.Left, bThen, bElse, likely) | |
if n.Nbody.Len() != 0 { | |
s.startBlock(bThen) | |
s.stmtList(n.Nbody) | |
if b := s.endBlock(); b != nil { | |
b.AddEdgeTo(bEnd) | |
} | |
} | |
if n.Rlist.Len() != 0 { | |
s.startBlock(bElse) | |
s.stmtList(n.Rlist) | |
if b := s.endBlock(); b != nil { | |
b.AddEdgeTo(bEnd) | |
} | |
} | |
s.startBlock(bEnd) | |
case ORETURN: | |
s.stmtList(n.List) | |
b := s.exit() | |
b.Pos = s.lastPos.WithIsStmt() | |
case ORETJMP: | |
s.stmtList(n.List) | |
b := s.exit() | |
b.Kind = ssa.BlockRetJmp // override BlockRet | |
b.Aux = n.Sym.Linksym() | |
case OCONTINUE, OBREAK: | |
var to *ssa.Block | |
if n.Sym == nil { | |
// plain break/continue | |
switch n.Op { | |
case OCONTINUE: | |
to = s.continueTo | |
case OBREAK: | |
to = s.breakTo | |
} | |
} else { | |
// labeled break/continue; look up the target | |
sym := n.Sym | |
lab := s.label(sym) | |
switch n.Op { | |
case OCONTINUE: | |
to = lab.continueTarget | |
case OBREAK: | |
to = lab.breakTarget | |
} | |
} | |
b := s.endBlock() | |
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block. | |
b.AddEdgeTo(to) | |
case OFOR, OFORUNTIL: | |
// OFOR: for Ninit; Left; Right { Nbody } | |
// cond (Left); body (Nbody); incr (Right) | |
// | |
// OFORUNTIL: for Ninit; Left; Right; List { Nbody } | |
// => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end: | |
bCond := s.f.NewBlock(ssa.BlockPlain) | |
bBody := s.f.NewBlock(ssa.BlockPlain) | |
bIncr := s.f.NewBlock(ssa.BlockPlain) | |
bEnd := s.f.NewBlock(ssa.BlockPlain) | |
// ensure empty for loops have correct position; issue #30167 | |
bBody.Pos = n.Pos | |
// first, jump to condition test (OFOR) or body (OFORUNTIL) | |
b := s.endBlock() | |
if n.Op == OFOR { | |
b.AddEdgeTo(bCond) | |
// generate code to test condition | |
s.startBlock(bCond) | |
if n.Left != nil { | |
s.condBranch(n.Left, bBody, bEnd, 1) | |
} else { | |
b := s.endBlock() | |
b.Kind = ssa.BlockPlain | |
b.AddEdgeTo(bBody) | |
} | |
} else { | |
b.AddEdgeTo(bBody) | |
} | |
// set up for continue/break in body | |
prevContinue := s.continueTo | |
prevBreak := s.breakTo | |
s.continueTo = bIncr | |
s.breakTo = bEnd | |
lab := s.labeledNodes[n] | |
if lab != nil { | |
// labeled for loop | |
lab.continueTarget = bIncr | |
lab.breakTarget = bEnd | |
} | |
// generate body | |
s.startBlock(bBody) | |
s.stmtList(n.Nbody) | |
// tear down continue/break | |
s.continueTo = prevContinue | |
s.breakTo = prevBreak | |
if lab != nil { | |
lab.continueTarget = nil | |
lab.breakTarget = nil | |
} | |
// done with body, goto incr | |
if b := s.endBlock(); b != nil { | |
b.AddEdgeTo(bIncr) | |
} | |
// generate incr (and, for OFORUNTIL, condition) | |
s.startBlock(bIncr) | |
if n.Right != nil { | |
s.stmt(n.Right) | |
} | |
if n.Op == OFOR { | |
if b := s.endBlock(); b != nil { | |
b.AddEdgeTo(bCond) | |
// It can happen that bIncr ends in a block containing only VARKILL, | |
// and that muddles the debugging experience. | |
if n.Op != OFORUNTIL && b.Pos == src.NoXPos { | |
b.Pos = bCond.Pos | |
} | |
} | |
} else { | |
// bCond is unused in OFORUNTIL, so repurpose it. | |
bLateIncr := bCond | |
// test condition | |
s.condBranch(n.Left, bLateIncr, bEnd, 1) | |
// generate late increment | |
s.startBlock(bLateIncr) | |
s.stmtList(n.List) | |
s.endBlock().AddEdgeTo(bBody) | |
} | |
s.startBlock(bEnd) | |
case OSWITCH, OSELECT: | |
// These have been mostly rewritten by the front end into their Nbody fields. | |
// Our main task is to correctly hook up any break statements. | |
bEnd := s.f.NewBlock(ssa.BlockPlain) | |
prevBreak := s.breakTo | |
s.breakTo = bEnd | |
lab := s.labeledNodes[n] | |
if lab != nil { | |
// labeled | |
lab.breakTarget = bEnd | |
} | |
// generate body code | |
s.stmtList(n.Nbody) | |
s.breakTo = prevBreak | |
if lab != nil { | |
lab.breakTarget = nil | |
} | |
// walk adds explicit OBREAK nodes to the end of all reachable code paths. | |
// If we still have a current block here, then mark it unreachable. | |
if s.curBlock != nil { | |
m := s.mem() | |
b := s.endBlock() | |
b.Kind = ssa.BlockExit | |
b.SetControl(m) | |
} | |
s.startBlock(bEnd) | |
case OVARDEF: | |
if !s.canSSA(n.Left) { | |
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false) | |
} | |
case OVARKILL: | |
// Insert a varkill op to record that a variable is no longer live. | |
// We only care about liveness info at call sites, so putting the | |
// varkill in the store chain is enough to keep it correctly ordered | |
// with respect to call ops. | |
if !s.canSSA(n.Left) { | |
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false) | |
} | |
case OVARLIVE: | |
// Insert a varlive op to record that a variable is still live. | |
if !n.Left.Name.Addrtaken() { | |
s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left) | |
} | |
switch n.Left.Class() { | |
case PAUTO, PPARAM, PPARAMOUT: | |
default: | |
s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left) | |
} | |
s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem()) | |
case OCHECKNIL: | |
p := s.expr(n.Left) | |
s.nilCheck(p) | |
case OINLMARK: | |
s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Xoffset, s.mem()) | |
default: | |
s.Fatalf("unhandled stmt %v", n.Op) | |
} | |
} | |
// If true, share as many open-coded defer exits as possible (with the downside of | |
// worse line-number information) | |
const shareDeferExits = false | |
// exit processes any code that needs to be generated just before returning. | |
// It returns a BlockRet block that ends the control flow. Its control value | |
// will be set to the final memory state. | |
func (s *state) exit() *ssa.Block { | |
if s.hasdefer { | |
if s.hasOpenDefers { | |
if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount { | |
if s.curBlock.Kind != ssa.BlockPlain { | |
panic("Block for an exit should be BlockPlain") | |
} | |
s.curBlock.AddEdgeTo(s.lastDeferExit) | |
s.endBlock() | |
return s.lastDeferFinalBlock | |
} | |
s.openDeferExit() | |
} else { | |
s.rtcall(Deferreturn, true, nil) | |
} | |
} | |
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT | |
// variables back to the stack. | |
s.stmtList(s.curfn.Func.Exit) | |
// Store SSAable PPARAMOUT variables back to stack locations. | |
for _, n := range s.returns { | |
addr := s.decladdrs[n] | |
val := s.variable(n, n.Type) | |
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) | |
s.store(n.Type, addr, val) | |
// TODO: if val is ever spilled, we'd like to use the | |
// PPARAMOUT slot for spilling it. That won't happen | |
// currently. | |
} | |
// Do actual return. | |
m := s.mem() | |
b := s.endBlock() | |
b.Kind = ssa.BlockRet | |
b.SetControl(m) | |
if s.hasdefer && s.hasOpenDefers { | |
s.lastDeferFinalBlock = b | |
} | |
return b | |
} | |
type opAndType struct { | |
op Op | |
etype types.EType | |
} | |
var opToSSA = map[opAndType]ssa.Op{ | |
opAndType{OADD, TINT8}: ssa.OpAdd8, | |
opAndType{OADD, TUINT8}: ssa.OpAdd8, | |
opAndType{OADD, TINT16}: ssa.OpAdd16, | |
opAndType{OADD, TUINT16}: ssa.OpAdd16, | |
opAndType{OADD, TINT32}: ssa.OpAdd32, | |
opAndType{OADD, TUINT32}: ssa.OpAdd32, | |
opAndType{OADD, TINT64}: ssa.OpAdd64, | |
opAndType{OADD, TUINT64}: ssa.OpAdd64, | |
opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, | |
opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, | |
opAndType{OSUB, TINT8}: ssa.OpSub8, | |
opAndType{OSUB, TUINT8}: ssa.OpSub8, | |
opAndType{OSUB, TINT16}: ssa.OpSub16, | |
opAndType{OSUB, TUINT16}: ssa.OpSub16, | |
opAndType{OSUB, TINT32}: ssa.OpSub32, | |
opAndType{OSUB, TUINT32}: ssa.OpSub32, | |
opAndType{OSUB, TINT64}: ssa.OpSub64, | |
opAndType{OSUB, TUINT64}: ssa.OpSub64, | |
opAndType{OSUB, TFLOAT32}: ssa.OpSub32F, | |
opAndType{OSUB, TFLOAT64}: ssa.OpSub64F, | |
opAndType{ONOT, TBOOL}: ssa.OpNot, | |
opAndType{ONEG, TINT8}: ssa.OpNeg8, | |
opAndType{ONEG, TUINT8}: ssa.OpNeg8, | |
opAndType{ONEG, TINT16}: ssa.OpNeg16, | |
opAndType{ONEG, TUINT16}: ssa.OpNeg16, | |
opAndType{ONEG, TINT32}: ssa.OpNeg32, | |
opAndType{ONEG, TUINT32}: ssa.OpNeg32, | |
opAndType{ONEG, TINT64}: ssa.OpNeg64, | |
opAndType{ONEG, TUINT64}: ssa.OpNeg64, | |
opAndType{ONEG, TFLOAT32}: ssa.OpNeg32F, | |
opAndType{ONEG, TFLOAT64}: ssa.OpNeg64F, | |
opAndType{OBITNOT, TINT8}: ssa.OpCom8, | |
opAndType{OBITNOT, TUINT8}: ssa.OpCom8, | |
opAndType{OBITNOT, TINT16}: ssa.OpCom16, | |
opAndType{OBITNOT, TUINT16}: ssa.OpCom16, | |
opAndType{OBITNOT, TINT32}: ssa.OpCom32, | |
opAndType{OBITNOT, TUINT32}: ssa.OpCom32, | |
opAndType{OBITNOT, TINT64}: ssa.OpCom64, | |
opAndType{OBITNOT, TUINT64}: ssa.OpCom64, | |
opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag, | |
opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag, | |
opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal, | |
opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal, | |
opAndType{OMUL, TINT8}: ssa.OpMul8, | |
opAndType{OMUL, TUINT8}: ssa.OpMul8, | |
opAndType{OMUL, TINT16}: ssa.OpMul16, | |
opAndType{OMUL, TUINT16}: ssa.OpMul16, | |
opAndType{OMUL, TINT32}: ssa.OpMul32, | |
opAndType{OMUL, TUINT32}: ssa.OpMul32, | |
opAndType{OMUL, TINT64}: ssa.OpMul64, | |
opAndType{OMUL, TUINT64}: ssa.OpMul64, | |
opAndType{OMUL, TFLOAT32}: ssa.OpMul32F, | |
opAndType{OMUL, TFLOAT64}: ssa.OpMul64F, | |
opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F, | |
opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F, | |
opAndType{ODIV, TINT8}: ssa.OpDiv8, | |
opAndType{ODIV, TUINT8}: ssa.OpDiv8u, | |
opAndType{ODIV, TINT16}: ssa.OpDiv16, | |
opAndType{ODIV, TUINT16}: ssa.OpDiv16u, | |
opAndType{ODIV, TINT32}: ssa.OpDiv32, | |
opAndType{ODIV, TUINT32}: ssa.OpDiv32u, | |
opAndType{ODIV, TINT64}: ssa.OpDiv64, | |
opAndType{ODIV, TUINT64}: ssa.OpDiv64u, | |
opAndType{OMOD, TINT8}: ssa.OpMod8, | |
opAndType{OMOD, TUINT8}: ssa.OpMod8u, | |
opAndType{OMOD, TINT16}: ssa.OpMod16, | |
opAndType{OMOD, TUINT16}: ssa.OpMod16u, | |
opAndType{OMOD, TINT32}: ssa.OpMod32, | |
opAndType{OMOD, TUINT32}: ssa.OpMod32u, | |
opAndType{OMOD, TINT64}: ssa.OpMod64, | |
opAndType{OMOD, TUINT64}: ssa.OpMod64u, | |
opAndType{OAND, TINT8}: ssa.OpAnd8, | |
opAndType{OAND, TUINT8}: ssa.OpAnd8, | |
opAndType{OAND, TINT16}: ssa.OpAnd16, | |
opAndType{OAND, TUINT16}: ssa.OpAnd16, | |
opAndType{OAND, TINT32}: ssa.OpAnd32, | |
opAndType{OAND, TUINT32}: ssa.OpAnd32, | |
opAndType{OAND, TINT64}: ssa.OpAnd64, | |
opAndType{OAND, TUINT64}: ssa.OpAnd64, | |
opAndType{OOR, TINT8}: ssa.OpOr8, | |
opAndType{OOR, TUINT8}: ssa.OpOr8, | |
opAndType{OOR, TINT16}: ssa.OpOr16, | |
opAndType{OOR, TUINT16}: ssa.OpOr16, | |
opAndType{OOR, TINT32}: ssa.OpOr32, | |
opAndType{OOR, TUINT32}: ssa.OpOr32, | |
opAndType{OOR, TINT64}: ssa.OpOr64, | |
opAndType{OOR, TUINT64}: ssa.OpOr64, | |
opAndType{OXOR, TINT8}: ssa.OpXor8, | |
opAndType{OXOR, TUINT8}: ssa.OpXor8, | |
opAndType{OXOR, TINT16}: ssa.OpXor16, | |
opAndType{OXOR, TUINT16}: ssa.OpXor16, | |
opAndType{OXOR, TINT32}: ssa.OpXor32, | |
opAndType{OXOR, TUINT32}: ssa.OpXor32, | |
opAndType{OXOR, TINT64}: ssa.OpXor64, | |
opAndType{OXOR, TUINT64}: ssa.OpXor64, | |
opAndType{OEQ, TBOOL}: ssa.OpEqB, | |
opAndType{OEQ, TINT8}: ssa.OpEq8, | |
opAndType{OEQ, TUINT8}: ssa.OpEq8, | |
opAndType{OEQ, TINT16}: ssa.OpEq16, | |
opAndType{OEQ, TUINT16}: ssa.OpEq16, | |
opAndType{OEQ, TINT32}: ssa.OpEq32, | |
opAndType{OEQ, TUINT32}: ssa.OpEq32, | |
opAndType{OEQ, TINT64}: ssa.OpEq64, | |
opAndType{OEQ, TUINT64}: ssa.OpEq64, | |
opAndType{OEQ, TINTER}: ssa.OpEqInter, | |
opAndType{OEQ, TSLICE}: ssa.OpEqSlice, | |
opAndType{OEQ, TFUNC}: ssa.OpEqPtr, | |
opAndType{OEQ, TMAP}: ssa.OpEqPtr, | |
opAndType{OEQ, TCHAN}: ssa.OpEqPtr, | |
opAndType{OEQ, TPTR}: ssa.OpEqPtr, | |
opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, | |
opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, | |
opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, | |
opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, | |
opAndType{ONE, TBOOL}: ssa.OpNeqB, | |
opAndType{ONE, TINT8}: ssa.OpNeq8, | |
opAndType{ONE, TUINT8}: ssa.OpNeq8, | |
opAndType{ONE, TINT16}: ssa.OpNeq16, | |
opAndType{ONE, TUINT16}: ssa.OpNeq16, | |
opAndType{ONE, TINT32}: ssa.OpNeq32, | |
opAndType{ONE, TUINT32}: ssa.OpNeq32, | |
opAndType{ONE, TINT64}: ssa.OpNeq64, | |
opAndType{ONE, TUINT64}: ssa.OpNeq64, | |
opAndType{ONE, TINTER}: ssa.OpNeqInter, | |
opAndType{ONE, TSLICE}: ssa.OpNeqSlice, | |
opAndType{ONE, TFUNC}: ssa.OpNeqPtr, | |
opAndType{ONE, TMAP}: ssa.OpNeqPtr, | |
opAndType{ONE, TCHAN}: ssa.OpNeqPtr, | |
opAndType{ONE, TPTR}: ssa.OpNeqPtr, | |
opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, | |
opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, | |
opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, | |
opAndType{ONE, TFLOAT32}: ssa.OpNeq32F, | |
opAndType{OLT, TINT8}: ssa.OpLess8, | |
opAndType{OLT, TUINT8}: ssa.OpLess8U, | |
opAndType{OLT, TINT16}: ssa.OpLess16, | |
opAndType{OLT, TUINT16}: ssa.OpLess16U, | |
opAndType{OLT, TINT32}: ssa.OpLess32, | |
opAndType{OLT, TUINT32}: ssa.OpLess32U, | |
opAndType{OLT, TINT64}: ssa.OpLess64, | |
opAndType{OLT, TUINT64}: ssa.OpLess64U, | |
opAndType{OLT, TFLOAT64}: ssa.OpLess64F, | |
opAndType{OLT, TFLOAT32}: ssa.OpLess32F, | |
opAndType{OLE, TINT8}: ssa.OpLeq8, | |
opAndType{OLE, TUINT8}: ssa.OpLeq8U, | |
opAndType{OLE, TINT16}: ssa.OpLeq16, | |
opAndType{OLE, TUINT16}: ssa.OpLeq16U, | |
opAndType{OLE, TINT32}: ssa.OpLeq32, | |
opAndType{OLE, TUINT32}: ssa.OpLeq32U, | |
opAndType{OLE, TINT64}: ssa.OpLeq64, | |
opAndType{OLE, TUINT64}: ssa.OpLeq64U, | |
opAndType{OLE, TFLOAT64}: ssa.OpLeq64F, | |
opAndType{OLE, TFLOAT32}: ssa.OpLeq32F, | |
} | |
func (s *state) concreteEtype(t *types.Type) types.EType { | |
e := t.Etype | |
switch e { | |
default: | |
return e | |
case TINT: | |
if s.config.PtrSize == 8 { | |
return TINT64 | |
} | |
return TINT32 | |
case TUINT: | |
if s.config.PtrSize == 8 { | |
return TUINT64 | |
} | |
return TUINT32 | |
case TUINTPTR: | |
if s.config.PtrSize == 8 { | |
return TUINT64 | |
} | |
return TUINT32 | |
} | |
} | |
func (s *state) ssaOp(op Op, t *types.Type) ssa.Op { | |
etype := s.concreteEtype(t) | |
x, ok := opToSSA[opAndType{op, etype}] | |
if !ok { | |
s.Fatalf("unhandled binary op %v %s", op, etype) | |
} | |
return x | |
} | |
func floatForComplex(t *types.Type) *types.Type { | |
switch t.Etype { | |
case TCOMPLEX64: | |
return types.Types[TFLOAT32] | |
case TCOMPLEX128: | |
return types.Types[TFLOAT64] | |
} | |
Fatalf("unexpected type: %v", t) | |
return nil | |
} | |
func complexForFloat(t *types.Type) *types.Type { | |
switch t.Etype { | |
case TFLOAT32: | |
return types.Types[TCOMPLEX64] | |
case TFLOAT64: | |
return types.Types[TCOMPLEX128] | |
} | |
Fatalf("unexpected type: %v", t) | |
return nil | |
} | |
type opAndTwoTypes struct { | |
op Op | |
etype1 types.EType | |
etype2 types.EType | |
} | |
type twoTypes struct { | |
etype1 types.EType | |
etype2 types.EType | |
} | |
type twoOpsAndType struct { | |
op1 ssa.Op | |
op2 ssa.Op | |
intermediateType types.EType | |
} | |
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ | |
twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, | |
twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, | |
twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, | |
twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, | |
twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, | |
twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, | |
twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, | |
twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, | |
twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, | |
twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, | |
twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, | |
twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, | |
twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, | |
twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, | |
twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, | |
twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, | |
// unsigned | |
twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, | |
twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, | |
twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned | |
twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead | |
twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, | |
twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, | |
twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned | |
twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead | |
twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, | |
twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, | |
twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned | |
twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead | |
twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, | |
twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, | |
twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned | |
twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead | |
// float | |
twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, | |
twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64}, | |
twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32}, | |
twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, | |
} | |
// this map is used only for 32-bit arch, and only includes the difference | |
// on 32-bit arch, don't use int64<->float conversion for uint32 | |
var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{ | |
twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32}, | |
twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32}, | |
twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32}, | |
twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32}, | |
} | |
// uint64<->float conversions, only on machines that have instructions for that | |
var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{ | |
twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64}, | |
twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64}, | |
twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64}, | |
twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64}, | |
} | |
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ | |
opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, | |
opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, | |
opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, | |
opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, | |
opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, | |
opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, | |
opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, | |
opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, | |
opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, | |
opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, | |
opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, | |
opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, | |
opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, | |
opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, | |
opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, | |
opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, | |
opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, | |
opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, | |
opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, | |
opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, | |
opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, | |
opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, | |
opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, | |
opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, | |
opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, | |
opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, | |
opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, | |
opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, | |
opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, | |
opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, | |
opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, | |
opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, | |
opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, | |
opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, | |
opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, | |
opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, | |
opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, | |
opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, | |
opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, | |
opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, | |
opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, | |
opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, | |
opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, | |
opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, | |
opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, | |
opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, | |
opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, | |
opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, | |
opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, | |
opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, | |
opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, | |
opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, | |
opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, | |
opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, | |
opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, | |
opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, | |
opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, | |
opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, | |
opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, | |
opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, | |
opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, | |
opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, | |
opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, | |
opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, | |
} | |
func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op { | |
etype1 := s.concreteEtype(t) | |
etype2 := s.concreteEtype(u) | |
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] | |
if !ok { | |
s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2) | |
} | |
return x | |
} | |
// expr converts the expression n to ssa, adds it to s and returns the ssa result. | |
func (s *state) expr(n *Node) *ssa.Value { | |
if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { | |
// ONAMEs and named OLITERALs have the line number | |
// of the decl, not the use. See issue 14742. | |
s.pushLine(n.Pos) | |
defer s.popLine() | |
} | |
s.stmtList(n.Ninit) | |
switch n.Op { | |
case OBYTES2STRTMP: | |
slice := s.expr(n.Left) | |
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) | |
len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) | |
return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) | |
case OSTR2BYTESTMP: | |
str := s.expr(n.Left) | |
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str) | |
len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str) | |
return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len) | |
case OCFUNC: | |
aux := n.Left.Sym.Linksym() | |
return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) | |
case ONAME: | |
if n.Class() == PFUNC { | |
// "value" of a function is the address of the function's closure | |
sym := funcsym(n.Sym).Linksym() | |
return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb) | |
} | |
if s.canSSA(n) { | |
return s.variable(n, n.Type) | |
} | |
addr := s.addr(n) | |
return s.load(n.Type, addr) | |
case OCLOSUREVAR: | |
addr := s.addr(n) | |
return s.load(n.Type, addr) | |
case OLITERAL: | |
switch u := n.Val().U.(type) { | |
case *Mpint: | |
i := u.Int64() | |
switch n.Type.Size() { | |
case 1: | |
return s.constInt8(n.Type, int8(i)) | |
case 2: | |
return s.constInt16(n.Type, int16(i)) | |
case 4: | |
return s.constInt32(n.Type, int32(i)) | |
case 8: | |
return s.constInt64(n.Type, i) | |
default: | |
s.Fatalf("bad integer size %d", n.Type.Size()) | |
return nil | |
} | |
case string: | |
if u == "" { | |
return s.constEmptyString(n.Type) | |
} | |
return s.entryNewValue0A(ssa.OpConstString, n.Type, u) | |
case bool: | |
return s.constBool(u) | |
case *NilVal: | |
t := n.Type | |
switch { | |
case t.IsSlice(): | |
return s.constSlice(t) | |
case t.IsInterface(): | |
return s.constInterface(t) | |
default: | |
return s.constNil(t) | |
} | |
case *Mpflt: | |
switch n.Type.Size() { | |
case 4: | |
return s.constFloat32(n.Type, u.Float32()) | |
case 8: | |
return s.constFloat64(n.Type, u.Float64()) | |
default: | |
s.Fatalf("bad float size %d", n.Type.Size()) | |
return nil | |
} | |
case *Mpcplx: | |
r := &u.Real | |
i := &u.Imag | |
switch n.Type.Size() { | |
case 8: | |
pt := types.Types[TFLOAT32] | |
return s.newValue2(ssa.OpComplexMake, n.Type, | |
s.constFloat32(pt, r.Float32()), | |
s.constFloat32(pt, i.Float32())) | |
case 16: | |
pt := types.Types[TFLOAT64] | |
return s.newValue2(ssa.OpComplexMake, n.Type, | |
s.constFloat64(pt, r.Float64()), | |
s.constFloat64(pt, i.Float64())) | |
default: | |
s.Fatalf("bad float size %d", n.Type.Size()) | |
return nil | |
} | |
default: | |
s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype()) | |
return nil | |
} | |
case OCONVNOP: | |
to := n.Type | |
from := n.Left.Type | |
// Assume everything will work out, so set up our return value. | |
// Anything interesting that happens from here is a fatal. | |
x := s.expr(n.Left) | |
// Special case for not confusing GC and liveness. | |
// We don't want pointers accidentally classified | |
// as not-pointers or vice-versa because of copy | |
// elision. | |
if to.IsPtrShaped() != from.IsPtrShaped() { | |
return s.newValue2(ssa.OpConvert, to, x, s.mem()) | |
} | |
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type | |
// CONVNOP closure | |
if to.Etype == TFUNC && from.IsPtrShaped() { | |
return v | |
} | |
// named <--> unnamed type or typed <--> untyped const | |
if from.Etype == to.Etype { | |
return v | |
} | |
// unsafe.Pointer <--> *T | |
if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() { | |
return v | |
} | |
// map <--> *hmap | |
if to.Etype == TMAP && from.IsPtr() && | |
to.MapType().Hmap == from.Elem() { | |
return v | |
} | |
dowidth(from) | |
dowidth(to) | |
if from.Width != to.Width { | |
s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) | |
return nil | |
} | |
if etypesign(from.Etype) != etypesign(to.Etype) { | |
s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) | |
return nil | |
} | |
if instrumenting { | |
// These appear to be fine, but they fail the | |
// integer constraint below, so okay them here. | |
// Sample non-integer conversion: map[string]string -> *uint8 | |
return v | |
} | |
if etypesign(from.Etype) == 0 { | |
s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) | |
return nil | |
} | |
// integer, same width, same sign | |
return v | |
case OCONV: | |
x := s.expr(n.Left) | |
ft := n.Left.Type // from type | |
tt := n.Type // to type | |
if ft.IsBoolean() && tt.IsKind(TUINT8) { | |
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte. | |
return s.newValue1(ssa.OpCopy, n.Type, x) | |
} | |
if ft.IsInteger() && tt.IsInteger() { | |
var op ssa.Op | |
if tt.Size() == ft.Size() { | |
op = ssa.OpCopy | |
} else if tt.Size() < ft.Size() { | |
// truncation | |
switch 10*ft.Size() + tt.Size() { | |
case 21: | |
op = ssa.OpTrunc16to8 | |
case 41: | |
op = ssa.OpTrunc32to8 | |
case 42: | |
op = ssa.OpTrunc32to16 | |
case 81: | |
op = ssa.OpTrunc64to8 | |
case 82: | |
op = ssa.OpTrunc64to16 | |
case 84: | |
op = ssa.OpTrunc64to32 | |
default: | |
s.Fatalf("weird integer truncation %v -> %v", ft, tt) | |
} | |
} else if ft.IsSigned() { | |
// sign extension | |
switch 10*ft.Size() + tt.Size() { | |
case 12: | |
op = ssa.OpSignExt8to16 | |
case 14: | |
op = ssa.OpSignExt8to32 | |
case 18: | |
op = ssa.OpSignExt8to64 | |
case 24: | |
op = ssa.OpSignExt16to32 | |
case 28: | |
op = ssa.OpSignExt16to64 | |
case 48: | |
op = ssa.OpSignExt32to64 | |
default: | |
s.Fatalf("bad integer sign extension %v -> %v", ft, tt) | |
} | |
} else { | |
// zero extension | |
switch 10*ft.Size() + tt.Size() { | |
case 12: | |
op = ssa.OpZeroExt8to16 | |
case 14: | |
op = ssa.OpZeroExt8to32 | |
case 18: | |
op = ssa.OpZeroExt8to64 | |
case 24: | |
op = ssa.OpZeroExt16to32 | |
case 28: | |
op = ssa.OpZeroExt16to64 | |
case 48: | |
op = ssa.OpZeroExt32to64 | |
default: | |
s.Fatalf("weird integer sign extension %v -> %v", ft, tt) | |
} | |
} | |
return s.newValue1(op, n.Type, x) | |
} | |
if ft.IsFloat() || tt.IsFloat() { | |
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] | |
if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS && !s.softFloat { | |
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { | |
conv = conv1 | |
} | |
} | |
if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || thearch.LinkArch.Family == sys.S390X || s.softFloat { | |
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { | |
conv = conv1 | |
} | |
} | |
if thearch.LinkArch.Family == sys.MIPS && !s.softFloat { | |
if ft.Size() == 4 &&a |