From 6ec993adc3373d31392b301ebe0c376b02d68051 Mon Sep 17 00:00:00 2001 From: Michael Munday Date: Mon, 12 Sep 2016 14:50:10 -0400 Subject: [PATCH] cmd/compile: add SSA backend for s390x and enable by default The new SSA backend modifies the ABI slightly: R0 is now a usable general purpose register. Fixes #16677. Change-Id: I367435ce921e0c7e79e021c80cf8ef5d1d1466cf Reviewed-on: https://go-review.googlesource.com/28978 Run-TryBot: Michael Munday TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/ssa.go | 2 +- src/cmd/compile/internal/s390x/galign.go | 5 + src/cmd/compile/internal/s390x/ggen.go | 8 +- src/cmd/compile/internal/s390x/prog.go | 50 +- src/cmd/compile/internal/s390x/ssa.go | 885 + src/cmd/compile/internal/ssa/config.go | 11 + src/cmd/compile/internal/ssa/gen/S390X.rules | 1455 ++ src/cmd/compile/internal/ssa/gen/S390XOps.go | 527 + src/cmd/compile/internal/ssa/opGen.go | 2640 ++- src/cmd/compile/internal/ssa/regalloc.go | 2 + src/cmd/compile/internal/ssa/rewrite.go | 5 + src/cmd/compile/internal/ssa/rewriteS390X.go | 15822 +++++++++++++++++ src/cmd/compile/internal/ssa/schedule.go | 5 +- test/live.go | 2 +- test/live_ssa.go | 2 +- test/nilptr3_ssa.go | 2 +- test/phiopt.go | 2 +- test/sliceopt.go | 2 +- 18 files changed, 21338 insertions(+), 89 deletions(-) create mode 100644 src/cmd/compile/internal/s390x/ssa.go create mode 100644 src/cmd/compile/internal/ssa/gen/S390X.rules create mode 100644 src/cmd/compile/internal/ssa/gen/S390XOps.go create mode 100644 src/cmd/compile/internal/ssa/rewriteS390X.go diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 1e4b907f8e409..db411720143fb 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -40,7 +40,7 @@ func shouldssa(fn *Node) bool { if os.Getenv("SSATEST") == "" { return false } - case "amd64", "amd64p32", "arm", "386", "arm64", "ppc64le", "mips64", "mips64le": + case "amd64", "amd64p32", "arm", "386", "arm64", "ppc64le", "mips64", "mips64le", "s390x": // Generally available. } if !ssaEnabled { diff --git a/src/cmd/compile/internal/s390x/galign.go b/src/cmd/compile/internal/s390x/galign.go index 09bc5d1f5d8f6..f7a0f5605a8b2 100644 --- a/src/cmd/compile/internal/s390x/galign.go +++ b/src/cmd/compile/internal/s390x/galign.go @@ -58,6 +58,11 @@ func Main() { gc.Thearch.Doregbits = doregbits gc.Thearch.Regnames = regnames + gc.Thearch.SSARegToReg = ssaRegToReg + gc.Thearch.SSAMarkMoves = ssaMarkMoves + gc.Thearch.SSAGenValue = ssaGenValue + gc.Thearch.SSAGenBlock = ssaGenBlock + gc.Main() gc.Exit(0) } diff --git a/src/cmd/compile/internal/s390x/ggen.go b/src/cmd/compile/internal/s390x/ggen.go index 505d676c77316..1dd353a0ec03e 100644 --- a/src/cmd/compile/internal/s390x/ggen.go +++ b/src/cmd/compile/internal/s390x/ggen.go @@ -233,8 +233,8 @@ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) { // Handle divide-by-zero panic. p1 := gins(optoas(gc.OCMP, t), &tr, nil) - p1.To.Type = obj.TYPE_REG - p1.To.Reg = s390x.REGZERO + p1.To.Type = obj.TYPE_CONST + p1.To.Offset = 0 p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") @@ -561,8 +561,8 @@ func expandchecks(firstp *obj.Prog) { // crash by write to memory address 0. p1.As = s390x.AMOVD - p1.From.Type = obj.TYPE_REG - p1.From.Reg = s390x.REGZERO + p1.From.Type = obj.TYPE_CONST + p1.From.Offset = 0 p1.To.Type = obj.TYPE_MEM p1.To.Reg = s390x.REGZERO p1.To.Offset = 0 diff --git a/src/cmd/compile/internal/s390x/prog.go b/src/cmd/compile/internal/s390x/prog.go index 306adf85c360d..a00924a8ea7fb 100644 --- a/src/cmd/compile/internal/s390x/prog.go +++ b/src/cmd/compile/internal/s390x/prog.go @@ -36,23 +36,38 @@ var progtable = [s390x.ALAST & obj.AMask]obj.ProgInfo{ // Integer s390x.AADD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ASUB & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ASUBE & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AADDW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ASUBW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ANEG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ANEGW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AAND & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AOR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AXOR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AMULLD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AMULLW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, - s390x.AMULHD & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, - s390x.AMULHDU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AMULHD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AMULHDU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ADIVD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ADIVDU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ADIVW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ADIVWU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ASLD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ASLW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ASRD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ASRW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ASRAD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ASRAW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ARLL & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ARLLG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ACMP & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead}, s390x.ACMPU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead}, + s390x.ACMPW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightRead}, + s390x.ACMPWU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightRead}, + s390x.AMODD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AMODDU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AMODW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AMODWU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, // Floating point. s390x.AFADD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite}, @@ -68,6 +83,8 @@ var progtable = [s390x.ALAST & obj.AMask]obj.ProgInfo{ s390x.ALEDBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv}, s390x.ALDEBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv}, s390x.AFSQRT & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite}, + s390x.AFNEG & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite}, + s390x.AFNEGS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite}, // Conversions s390x.ACEFBRA & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv}, @@ -88,15 +105,24 @@ var progtable = [s390x.ALAST & obj.AMask]obj.ProgInfo{ s390x.ACLGDBR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv}, // Moves - s390x.AMOVB & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, - s390x.AMOVBZ & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, - s390x.AMOVH & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, - s390x.AMOVHZ & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, - s390x.AMOVW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, - s390x.AMOVWZ & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, - s390x.AMOVD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move}, - s390x.AFMOVS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, - s390x.AFMOVD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move}, + s390x.AMOVB & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVBZ & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVH & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVHZ & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVWZ & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move}, + s390x.AMOVHBR & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVWBR & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVDBR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move}, + s390x.AFMOVS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AFMOVD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move}, + s390x.AMOVDEQ & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move}, + s390x.AMOVDGE & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move}, + s390x.AMOVDGT & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move}, + s390x.AMOVDLE & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move}, + s390x.AMOVDLT & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move}, + s390x.AMOVDNE & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move}, // Storage operations s390x.AMVC & obj.AMask: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr}, @@ -114,6 +140,8 @@ var progtable = [s390x.ALAST & obj.AMask]obj.ProgInfo{ s390x.ABLT & obj.AMask: {Flags: gc.Cjmp}, s390x.ABGT & obj.AMask: {Flags: gc.Cjmp}, s390x.ABLE & obj.AMask: {Flags: gc.Cjmp}, + s390x.ABLEU & obj.AMask: {Flags: gc.Cjmp}, + s390x.ABLTU & obj.AMask: {Flags: gc.Cjmp}, s390x.ACMPBEQ & obj.AMask: {Flags: gc.Cjmp}, s390x.ACMPBNE & obj.AMask: {Flags: gc.Cjmp}, s390x.ACMPBGE & obj.AMask: {Flags: gc.Cjmp}, diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go new file mode 100644 index 0000000000000..aac333f16becd --- /dev/null +++ b/src/cmd/compile/internal/s390x/ssa.go @@ -0,0 +1,885 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s390x + +import ( + "math" + + "cmd/compile/internal/gc" + "cmd/compile/internal/ssa" + "cmd/internal/obj" + "cmd/internal/obj/s390x" +) + +// Smallest possible faulting page at address zero. +const minZeroPage = 4096 + +// ssaRegToReg maps ssa register numbers to obj register numbers. +var ssaRegToReg = []int16{ + s390x.REG_R0, + s390x.REG_R1, + s390x.REG_R2, + s390x.REG_R3, + s390x.REG_R4, + s390x.REG_R5, + s390x.REG_R6, + s390x.REG_R7, + s390x.REG_R8, + s390x.REG_R9, + s390x.REG_R10, + s390x.REG_R11, + s390x.REG_R12, + s390x.REG_R13, + s390x.REG_R14, + s390x.REG_R15, + s390x.REG_F0, + s390x.REG_F1, + s390x.REG_F2, + s390x.REG_F3, + s390x.REG_F4, + s390x.REG_F5, + s390x.REG_F6, + s390x.REG_F7, + s390x.REG_F8, + s390x.REG_F9, + s390x.REG_F10, + s390x.REG_F11, + s390x.REG_F12, + s390x.REG_F13, + s390x.REG_F14, + s390x.REG_F15, + 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case. +} + +// markMoves marks any MOVXconst ops that need to avoid clobbering flags. +func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { + flive := b.FlagsLiveAtEnd + if b.Control != nil && b.Control.Type.IsFlags() { + flive = true + } + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + if flive && v.Op == ssa.OpS390XMOVDconst { + // The "mark" is any non-nil Aux value. + v.Aux = v + } + if v.Type.IsFlags() { + flive = false + } + for _, a := range v.Args { + if a.Type.IsFlags() { + flive = true + } + } + } +} + +// loadByType returns the load instruction of the given type. +func loadByType(t ssa.Type) obj.As { + if t.IsFloat() { + switch t.Size() { + case 4: + return s390x.AFMOVS + case 8: + return s390x.AFMOVD + } + } else { + switch t.Size() { + case 1: + if t.IsSigned() { + return s390x.AMOVB + } else { + return s390x.AMOVBZ + } + case 2: + if t.IsSigned() { + return s390x.AMOVH + } else { + return s390x.AMOVHZ + } + case 4: + if t.IsSigned() { + return s390x.AMOVW + } else { + return s390x.AMOVWZ + } + case 8: + return s390x.AMOVD + } + } + panic("bad load type") +} + +// storeByType returns the store instruction of the given type. +func storeByType(t ssa.Type) obj.As { + width := t.Size() + if t.IsFloat() { + switch width { + case 4: + return s390x.AFMOVS + case 8: + return s390x.AFMOVD + } + } else { + switch width { + case 1: + return s390x.AMOVB + case 2: + return s390x.AMOVH + case 4: + return s390x.AMOVW + case 8: + return s390x.AMOVD + } + } + panic("bad store type") +} + +// moveByType returns the reg->reg move instruction of the given type. +func moveByType(t ssa.Type) obj.As { + if t.IsFloat() { + return s390x.AFMOVD + } else { + switch t.Size() { + case 1: + if t.IsSigned() { + return s390x.AMOVB + } else { + return s390x.AMOVBZ + } + case 2: + if t.IsSigned() { + return s390x.AMOVH + } else { + return s390x.AMOVHZ + } + case 4: + if t.IsSigned() { + return s390x.AMOVW + } else { + return s390x.AMOVWZ + } + case 8: + return s390x.AMOVD + } + } + panic("bad load type") +} + +// opregreg emits instructions for +// dest := dest(To) op src(From) +// and also returns the created obj.Prog so it +// may be further adjusted (offset, scale, etc). +func opregreg(op obj.As, dest, src int16) *obj.Prog { + p := gc.Prog(op) + p.From.Type = obj.TYPE_REG + p.To.Type = obj.TYPE_REG + p.To.Reg = dest + p.From.Reg = src + return p +} + +// opregregimm emits instructions for +// dest := src(From) op off +// and also returns the created obj.Prog so it +// may be further adjusted (offset, scale, etc). +func opregregimm(op obj.As, dest, src int16, off int64) *obj.Prog { + p := gc.Prog(op) + p.From.Type = obj.TYPE_CONST + p.From.Offset = off + p.Reg = src + p.To.Reg = dest + p.To.Type = obj.TYPE_REG + return p +} + +func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { + s.SetLineno(v.Line) + switch v.Op { + case ssa.OpS390XSLD, ssa.OpS390XSLW, + ssa.OpS390XSRD, ssa.OpS390XSRW, + ssa.OpS390XSRAD, ssa.OpS390XSRAW: + r := gc.SSARegNum(v) + r1 := gc.SSARegNum(v.Args[0]) + r2 := gc.SSARegNum(v.Args[1]) + if r2 == s390x.REG_R0 { + v.Fatalf("cannot use R0 as shift value %s", v.LongString()) + } + p := opregreg(v.Op.Asm(), r, r2) + if r != r1 { + p.Reg = r1 + } + case ssa.OpS390XADD, ssa.OpS390XADDW, + ssa.OpS390XSUB, ssa.OpS390XSUBW, + ssa.OpS390XAND, ssa.OpS390XANDW, + ssa.OpS390XOR, ssa.OpS390XORW, + ssa.OpS390XXOR, ssa.OpS390XXORW: + r := gc.SSARegNum(v) + r1 := gc.SSARegNum(v.Args[0]) + r2 := gc.SSARegNum(v.Args[1]) + p := opregreg(v.Op.Asm(), r, r2) + if r != r1 { + p.Reg = r1 + } + // 2-address opcode arithmetic + case ssa.OpS390XMULLD, ssa.OpS390XMULLW, + ssa.OpS390XMULHD, ssa.OpS390XMULHDU, + ssa.OpS390XFADDS, ssa.OpS390XFADD, ssa.OpS390XFSUBS, ssa.OpS390XFSUB, + ssa.OpS390XFMULS, ssa.OpS390XFMUL, ssa.OpS390XFDIVS, ssa.OpS390XFDIV: + r := gc.SSARegNum(v) + if r != gc.SSARegNum(v.Args[0]) { + v.Fatalf("input[0] and output not in same register %s", v.LongString()) + } + opregreg(v.Op.Asm(), r, gc.SSARegNum(v.Args[1])) + case ssa.OpS390XDIVD, ssa.OpS390XDIVW, + ssa.OpS390XDIVDU, ssa.OpS390XDIVWU, + ssa.OpS390XMODD, ssa.OpS390XMODW, + ssa.OpS390XMODDU, ssa.OpS390XMODWU: + + // TODO(mundaym): use the temp registers every time like x86 does with AX? + dividend := gc.SSARegNum(v.Args[0]) + divisor := gc.SSARegNum(v.Args[1]) + + // CPU faults upon signed overflow, which occurs when most + // negative int is divided by -1. + var j *obj.Prog + if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW || + v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW { + + var c *obj.Prog + c = gc.Prog(s390x.ACMP) + j = gc.Prog(s390x.ABEQ) + + c.From.Type = obj.TYPE_REG + c.From.Reg = divisor + c.To.Type = obj.TYPE_CONST + c.To.Offset = -1 + + j.To.Type = obj.TYPE_BRANCH + + } + + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = divisor + p.Reg = 0 + p.To.Type = obj.TYPE_REG + p.To.Reg = dividend + + // signed division, rest of the check for -1 case + if j != nil { + j2 := gc.Prog(s390x.ABR) + j2.To.Type = obj.TYPE_BRANCH + + var n *obj.Prog + if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW { + // n * -1 = -n + n = gc.Prog(s390x.ANEG) + n.To.Type = obj.TYPE_REG + n.To.Reg = dividend + } else { + // n % -1 == 0 + n = gc.Prog(s390x.AXOR) + n.From.Type = obj.TYPE_REG + n.From.Reg = dividend + n.To.Type = obj.TYPE_REG + n.To.Reg = dividend + } + + j.To.Val = n + j2.To.Val = s.Pc() + } + case ssa.OpS390XADDconst, ssa.OpS390XADDWconst: + opregregimm(v.Op.Asm(), gc.SSARegNum(v), gc.SSARegNum(v.Args[0]), v.AuxInt) + case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst, + ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst, + ssa.OpS390XANDconst, ssa.OpS390XANDWconst, + ssa.OpS390XORconst, ssa.OpS390XORWconst, + ssa.OpS390XXORconst, ssa.OpS390XXORWconst: + r := gc.SSARegNum(v) + if r != gc.SSARegNum(v.Args[0]) { + v.Fatalf("input[0] and output not in same register %s", v.LongString()) + } + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst, + ssa.OpS390XSRDconst, ssa.OpS390XSRWconst, + ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst, + ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst: + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + r := gc.SSARegNum(v) + r1 := gc.SSARegNum(v.Args[0]) + if r != r1 { + p.Reg = r1 + } + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpS390XSUBEcarrymask, ssa.OpS390XSUBEWcarrymask: + r := gc.SSARegNum(v) + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpS390XMOVDaddridx: + r := gc.SSARegNum(v.Args[0]) + i := gc.SSARegNum(v.Args[1]) + p := gc.Prog(s390x.AMOVD) + p.From.Scale = 1 + if i == s390x.REGSP { + r, i = i, r + } + p.From.Type = obj.TYPE_ADDR + p.From.Reg = r + p.From.Index = i + gc.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = gc.SSARegNum(v) + case ssa.OpS390XMOVDaddr: + p := gc.Prog(s390x.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Reg = gc.SSARegNum(v.Args[0]) + gc.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = gc.SSARegNum(v) + case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU: + opregreg(v.Op.Asm(), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[0])) + case ssa.OpS390XTESTB: + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 0xFF + p.Reg = gc.SSARegNum(v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = s390x.REGTMP + case ssa.OpS390XFCMPS, ssa.OpS390XFCMP: + opregreg(v.Op.Asm(), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[0])) + case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst, ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst: + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = gc.SSARegNum(v.Args[0]) + p.To.Type = obj.TYPE_CONST + p.To.Offset = v.AuxInt + case ssa.OpS390XMOVDconst: + x := gc.SSARegNum(v) + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = x + case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst: + x := gc.SSARegNum(v) + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = x + case ssa.OpS390XMOVDload, + ssa.OpS390XMOVWZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVBZload, + ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload, + ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload, + ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload: + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = gc.SSARegNum(v.Args[0]) + gc.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = gc.SSARegNum(v) + case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx, ssa.OpS390XMOVDloadidx, + ssa.OpS390XMOVHBRloadidx, ssa.OpS390XMOVWBRloadidx, ssa.OpS390XMOVDBRloadidx, + ssa.OpS390XFMOVSloadidx, ssa.OpS390XFMOVDloadidx: + r := gc.SSARegNum(v.Args[0]) + i := gc.SSARegNum(v.Args[1]) + if i == s390x.REGSP { + r, i = i, r + } + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = r + p.From.Scale = 1 + p.From.Index = i + gc.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = gc.SSARegNum(v) + case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore, + ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore: + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = gc.SSARegNum(v.Args[1]) + p.To.Type = obj.TYPE_MEM + p.To.Reg = gc.SSARegNum(v.Args[0]) + gc.AddAux(&p.To, v) + case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx, + ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx: + r := gc.SSARegNum(v.Args[0]) + i := gc.SSARegNum(v.Args[1]) + if i == s390x.REGSP { + r, i = i, r + } + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = gc.SSARegNum(v.Args[2]) + p.To.Type = obj.TYPE_MEM + p.To.Reg = r + p.To.Scale = 1 + p.To.Index = i + gc.AddAux(&p.To, v) + case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst: + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + sc := v.AuxValAndOff() + p.From.Offset = sc.Val() + p.To.Type = obj.TYPE_MEM + p.To.Reg = gc.SSARegNum(v.Args[0]) + gc.AddAux2(&p.To, v, sc.Off()) + case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg, + ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg, + ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA, + ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA, + ssa.OpS390XLDEBR, ssa.OpS390XLEDBR, + ssa.OpS390XFNEG, ssa.OpS390XFNEGS: + opregreg(v.Op.Asm(), gc.SSARegNum(v), gc.SSARegNum(v.Args[0])) + case ssa.OpS390XCLEAR: + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + sc := v.AuxValAndOff() + p.From.Offset = sc.Val() + p.To.Type = obj.TYPE_MEM + p.To.Reg = gc.SSARegNum(v.Args[0]) + gc.AddAux2(&p.To, v, sc.Off()) + case ssa.OpCopy, ssa.OpS390XMOVDconvert: + if v.Type.IsMemory() { + return + } + x := gc.SSARegNum(v.Args[0]) + y := gc.SSARegNum(v) + if x != y { + opregreg(moveByType(v.Type), y, x) + } + case ssa.OpLoadReg: + if v.Type.IsFlags() { + v.Unimplementedf("load flags not implemented: %v", v.LongString()) + return + } + p := gc.Prog(loadByType(v.Type)) + n, off := gc.AutoVar(v.Args[0]) + p.From.Type = obj.TYPE_MEM + p.From.Node = n + p.From.Sym = gc.Linksym(n.Sym) + p.From.Offset = off + if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT { + p.From.Name = obj.NAME_PARAM + p.From.Offset += n.Xoffset + } else { + p.From.Name = obj.NAME_AUTO + } + p.To.Type = obj.TYPE_REG + p.To.Reg = gc.SSARegNum(v) + case ssa.OpStoreReg: + if v.Type.IsFlags() { + v.Unimplementedf("store flags not implemented: %v", v.LongString()) + return + } + p := gc.Prog(storeByType(v.Type)) + p.From.Type = obj.TYPE_REG + p.From.Reg = gc.SSARegNum(v.Args[0]) + n, off := gc.AutoVar(v) + p.To.Type = obj.TYPE_MEM + p.To.Node = n + p.To.Sym = gc.Linksym(n.Sym) + p.To.Offset = off + if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT { + p.To.Name = obj.NAME_PARAM + p.To.Offset += n.Xoffset + } else { + p.To.Name = obj.NAME_AUTO + } + case ssa.OpPhi: + gc.CheckLoweredPhi(v) + case ssa.OpInitMem: + // memory arg needs no code + case ssa.OpArg: + // input args need no code + case ssa.OpS390XLoweredGetClosurePtr: + // Closure pointer is R12 (already) + gc.CheckLoweredGetClosurePtr(v) + case ssa.OpS390XLoweredGetG: + r := gc.SSARegNum(v) + p := gc.Prog(s390x.AMOVD) + p.From.Type = obj.TYPE_REG + p.From.Reg = s390x.REGG + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpS390XCALLstatic: + if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym { + // Deferred calls will appear to be returning to + // the CALL deferreturn(SB) that we are about to emit. + // However, the stack trace code will show the line + // of the instruction byte before the return PC. + // To avoid that being an unrelated instruction, + // insert an actual hardware NOP that will have the right line number. + // This is different from obj.ANOP, which is a virtual no-op + // that doesn't make it into the instruction stream. + ginsnop() + } + p := gc.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym)) + if gc.Maxarg < v.AuxInt { + gc.Maxarg = v.AuxInt + } + case ssa.OpS390XCALLclosure: + p := gc.Prog(obj.ACALL) + p.To.Type = obj.TYPE_REG + p.To.Reg = gc.SSARegNum(v.Args[0]) + if gc.Maxarg < v.AuxInt { + gc.Maxarg = v.AuxInt + } + case ssa.OpS390XCALLdefer: + p := gc.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = gc.Linksym(gc.Deferproc.Sym) + if gc.Maxarg < v.AuxInt { + gc.Maxarg = v.AuxInt + } + case ssa.OpS390XCALLgo: + p := gc.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = gc.Linksym(gc.Newproc.Sym) + if gc.Maxarg < v.AuxInt { + gc.Maxarg = v.AuxInt + } + case ssa.OpS390XCALLinter: + p := gc.Prog(obj.ACALL) + p.To.Type = obj.TYPE_REG + p.To.Reg = gc.SSARegNum(v.Args[0]) + if gc.Maxarg < v.AuxInt { + gc.Maxarg = v.AuxInt + } + case ssa.OpS390XNEG, ssa.OpS390XNEGW: + r := gc.SSARegNum(v) + p := gc.Prog(v.Op.Asm()) + r1 := gc.SSARegNum(v.Args[0]) + if r != r1 { + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + } + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpS390XNOT, ssa.OpS390XNOTW: + v.Fatalf("NOT/NOTW generated %s", v.LongString()) + case ssa.OpS390XMOVDEQ, ssa.OpS390XMOVDNE, + ssa.OpS390XMOVDLT, ssa.OpS390XMOVDLE, + ssa.OpS390XMOVDGT, ssa.OpS390XMOVDGE, + ssa.OpS390XMOVDGTnoinv, ssa.OpS390XMOVDGEnoinv: + r := gc.SSARegNum(v) + if r != gc.SSARegNum(v.Args[0]) { + v.Fatalf("input[0] and output not in same register %s", v.LongString()) + } + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = gc.SSARegNum(v.Args[1]) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpS390XFSQRT: + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = gc.SSARegNum(v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = gc.SSARegNum(v) + case ssa.OpSP, ssa.OpSB: + // nothing to do + case ssa.OpVarDef: + gc.Gvardef(v.Aux.(*gc.Node)) + case ssa.OpVarKill: + gc.Gvarkill(v.Aux.(*gc.Node)) + case ssa.OpVarLive: + gc.Gvarlive(v.Aux.(*gc.Node)) + case ssa.OpKeepAlive: + gc.KeepAlive(v) + case ssa.OpS390XInvertFlags: + v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) + case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT: + v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) + case ssa.OpS390XLoweredNilCheck: + // Optimization - if the subsequent block has a load or store + // at the same address, we don't need to issue this instruction. + mem := v.Args[1] + for _, w := range v.Block.Succs[0].Block().Values { + if w.Op == ssa.OpPhi { + if w.Type.IsMemory() { + mem = w + } + continue + } + if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() { + // w doesn't use a store - can't be a memory op. + continue + } + if w.Args[len(w.Args)-1] != mem { + v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w) + } + switch w.Op { + case ssa.OpS390XMOVDload, + ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload, + ssa.OpS390XMOVBZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVWZload, + ssa.OpS390XMOVHBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVDBRload, + ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore, + ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload, + ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore, + ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4, + ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4: + if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage { + if gc.Debug_checknil != 0 && int(v.Line) > 1 { + gc.Warnl(v.Line, "removed nil check") + } + return + } + case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst, + ssa.OpS390XCLEAR: + off := ssa.ValAndOff(v.AuxInt).Off() + if w.Args[0] == v.Args[0] && w.Aux == nil && off >= 0 && off < minZeroPage { + if gc.Debug_checknil != 0 && int(v.Line) > 1 { + gc.Warnl(v.Line, "removed nil check") + } + return + } + case ssa.OpS390XMVC: + off := ssa.ValAndOff(v.AuxInt).Off() + if (w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0]) && w.Aux == nil && off >= 0 && off < minZeroPage { + if gc.Debug_checknil != 0 && int(v.Line) > 1 { + gc.Warnl(v.Line, "removed nil check") + } + return + } + } + if w.Type.IsMemory() { + if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive { + // these ops are OK + mem = w + continue + } + // We can't delay the nil check past the next store. + break + } + } + // Issue a load which will fault if the input is nil. + p := gc.Prog(s390x.AMOVBZ) + p.From.Type = obj.TYPE_MEM + p.From.Reg = gc.SSARegNum(v.Args[0]) + gc.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = s390x.REGTMP + if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers + gc.Warnl(v.Line, "generated nil check") + } + case ssa.OpS390XMVC: + vo := v.AuxValAndOff() + p := gc.Prog(s390x.AMVC) + p.From.Type = obj.TYPE_MEM + p.From.Reg = gc.SSARegNum(v.Args[1]) + p.From.Offset = vo.Off() + p.To.Type = obj.TYPE_MEM + p.To.Reg = gc.SSARegNum(v.Args[0]) + p.To.Offset = vo.Off() + p.From3 = new(obj.Addr) + p.From3.Type = obj.TYPE_CONST + p.From3.Offset = vo.Val() + case ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4, + ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4: + for i := 2; i < len(v.Args)-1; i++ { + if gc.SSARegNum(v.Args[i]) != gc.SSARegNum(v.Args[i-1])+1 { + v.Fatalf("invalid store multiple %s", v.LongString()) + } + } + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = gc.SSARegNum(v.Args[1]) + p.Reg = gc.SSARegNum(v.Args[len(v.Args)-2]) + p.To.Type = obj.TYPE_MEM + p.To.Reg = gc.SSARegNum(v.Args[0]) + gc.AddAux(&p.To, v) + case ssa.OpS390XLoweredMove: + // Inputs must be valid pointers to memory, + // so adjust arg0 and arg1 as part of the expansion. + // arg2 should be src+size, + // + // mvc: MVC $256, 0(R2), 0(R1) + // MOVD $256(R1), R1 + // MOVD $256(R2), R2 + // CMP R2, Rarg2 + // BNE mvc + // MVC $rem, 0(R2), 0(R1) // if rem > 0 + // arg2 is the last address to move in the loop + 256 + mvc := gc.Prog(s390x.AMVC) + mvc.From.Type = obj.TYPE_MEM + mvc.From.Reg = gc.SSARegNum(v.Args[1]) + mvc.To.Type = obj.TYPE_MEM + mvc.To.Reg = gc.SSARegNum(v.Args[0]) + mvc.From3 = new(obj.Addr) + mvc.From3.Type = obj.TYPE_CONST + mvc.From3.Offset = 256 + + for i := 0; i < 2; i++ { + movd := gc.Prog(s390x.AMOVD) + movd.From.Type = obj.TYPE_ADDR + movd.From.Reg = gc.SSARegNum(v.Args[i]) + movd.From.Offset = 256 + movd.To.Type = obj.TYPE_REG + movd.To.Reg = gc.SSARegNum(v.Args[i]) + } + + cmpu := gc.Prog(s390x.ACMPU) + cmpu.From.Reg = gc.SSARegNum(v.Args[1]) + cmpu.From.Type = obj.TYPE_REG + cmpu.To.Reg = gc.SSARegNum(v.Args[2]) + cmpu.To.Type = obj.TYPE_REG + + bne := gc.Prog(s390x.ABLT) + bne.To.Type = obj.TYPE_BRANCH + gc.Patch(bne, mvc) + + if v.AuxInt > 0 { + mvc := gc.Prog(s390x.AMVC) + mvc.From.Type = obj.TYPE_MEM + mvc.From.Reg = gc.SSARegNum(v.Args[1]) + mvc.To.Type = obj.TYPE_MEM + mvc.To.Reg = gc.SSARegNum(v.Args[0]) + mvc.From3 = new(obj.Addr) + mvc.From3.Type = obj.TYPE_CONST + mvc.From3.Offset = v.AuxInt + } + case ssa.OpS390XLoweredZero: + // Input must be valid pointers to memory, + // so adjust arg0 as part of the expansion. + // arg1 should be src+size, + // + // clear: CLEAR $256, 0(R1) + // MOVD $256(R1), R1 + // CMP R1, Rarg1 + // BNE clear + // CLEAR $rem, 0(R1) // if rem > 0 + // arg1 is the last address to zero in the loop + 256 + clear := gc.Prog(s390x.ACLEAR) + clear.From.Type = obj.TYPE_CONST + clear.From.Offset = 256 + clear.To.Type = obj.TYPE_MEM + clear.To.Reg = gc.SSARegNum(v.Args[0]) + + movd := gc.Prog(s390x.AMOVD) + movd.From.Type = obj.TYPE_ADDR + movd.From.Reg = gc.SSARegNum(v.Args[0]) + movd.From.Offset = 256 + movd.To.Type = obj.TYPE_REG + movd.To.Reg = gc.SSARegNum(v.Args[0]) + + cmpu := gc.Prog(s390x.ACMPU) + cmpu.From.Reg = gc.SSARegNum(v.Args[0]) + cmpu.From.Type = obj.TYPE_REG + cmpu.To.Reg = gc.SSARegNum(v.Args[1]) + cmpu.To.Type = obj.TYPE_REG + + bne := gc.Prog(s390x.ABLT) + bne.To.Type = obj.TYPE_BRANCH + gc.Patch(bne, clear) + + if v.AuxInt > 0 { + clear := gc.Prog(s390x.ACLEAR) + clear.From.Type = obj.TYPE_CONST + clear.From.Offset = v.AuxInt + clear.To.Type = obj.TYPE_MEM + clear.To.Reg = gc.SSARegNum(v.Args[0]) + } + default: + v.Unimplementedf("genValue not implemented: %s", v.LongString()) + } +} + +var blockJump = [...]struct { + asm, invasm obj.As +}{ + ssa.BlockS390XEQ: {s390x.ABEQ, s390x.ABNE}, + ssa.BlockS390XNE: {s390x.ABNE, s390x.ABEQ}, + ssa.BlockS390XLT: {s390x.ABLT, s390x.ABGE}, + ssa.BlockS390XGE: {s390x.ABGE, s390x.ABLT}, + ssa.BlockS390XLE: {s390x.ABLE, s390x.ABGT}, + ssa.BlockS390XGT: {s390x.ABGT, s390x.ABLE}, + ssa.BlockS390XGTF: {s390x.ABGT, s390x.ABLEU}, + ssa.BlockS390XGEF: {s390x.ABGE, s390x.ABLTU}, +} + +func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { + s.SetLineno(b.Line) + + switch b.Kind { + case ssa.BlockPlain, ssa.BlockCheck: + if b.Succs[0].Block() != next { + p := gc.Prog(s390x.ABR) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockDefer: + // defer returns in R3: + // 0 if we should continue executing + // 1 if we should jump to deferreturn call + p := gc.Prog(s390x.AAND) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 0xFFFFFFFF + p.Reg = s390x.REG_R3 + p.To.Type = obj.TYPE_REG + p.To.Reg = s390x.REG_R3 + p = gc.Prog(s390x.ABNE) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) + if b.Succs[0].Block() != next { + p := gc.Prog(s390x.ABR) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockExit: + gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here + case ssa.BlockRet: + gc.Prog(obj.ARET) + case ssa.BlockRetJmp: + p := gc.Prog(s390x.ABR) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym)) + case ssa.BlockS390XEQ, ssa.BlockS390XNE, + ssa.BlockS390XLT, ssa.BlockS390XGE, + ssa.BlockS390XLE, ssa.BlockS390XGT, + ssa.BlockS390XGEF, ssa.BlockS390XGTF: + jmp := blockJump[b.Kind] + likely := b.Likely + var p *obj.Prog + switch next { + case b.Succs[0].Block(): + p = gc.Prog(jmp.invasm) + likely *= -1 + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) + case b.Succs[1].Block(): + p = gc.Prog(jmp.asm) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + default: + p = gc.Prog(jmp.asm) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + q := gc.Prog(s390x.ABR) + q.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) + } + default: + b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString()) + } +} diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 743ff29138bd7..cb5baa1c7b4d1 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -206,6 +206,17 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config c.specialRegMask = specialRegMaskMIPS64 c.FPReg = framepointerRegMIPS64 c.hasGReg = true + case "s390x": + c.IntSize = 8 + c.PtrSize = 8 + c.lowerBlock = rewriteBlockS390X + c.lowerValue = rewriteValueS390X + c.registers = registersS390X[:] + c.gpRegMask = gpRegMaskS390X + c.fpRegMask = fpRegMaskS390X + c.FPReg = framepointerRegS390X + c.hasGReg = true + c.noDuffDevice = true default: fe.Unimplementedf(0, "arch %s not implemented", arch) } diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules new file mode 100644 index 0000000000000..264e2805f99c1 --- /dev/null +++ b/src/cmd/compile/internal/ssa/gen/S390X.rules @@ -0,0 +1,1455 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Lowering arithmetic +(Add64 x y) -> (ADD x y) +(AddPtr x y) -> (ADD x y) +(Add32 x y) -> (ADDW x y) +(Add16 x y) -> (ADDW x y) +(Add8 x y) -> (ADDW x y) +(Add32F x y) -> (FADDS x y) +(Add64F x y) -> (FADD x y) + +(Sub64 x y) -> (SUB x y) +(SubPtr x y) -> (SUB x y) +(Sub32 x y) -> (SUBW x y) +(Sub16 x y) -> (SUBW x y) +(Sub8 x y) -> (SUBW x y) +(Sub32F x y) -> (FSUBS x y) +(Sub64F x y) -> (FSUB x y) + +(Mul64 x y) -> (MULLD x y) +(Mul32 x y) -> (MULLW x y) +(Mul16 x y) -> (MULLW x y) +(Mul8 x y) -> (MULLW x y) +(Mul32F x y) -> (FMULS x y) +(Mul64F x y) -> (FMUL x y) + +(Div32F x y) -> (FDIVS x y) +(Div64F x y) -> (FDIV x y) + +(Div64 x y) -> (DIVD x y) +(Div64u x y) -> (DIVDU x y) +// DIVW/DIVWU has a 64-bit dividend and a 32-bit divisor, +// so a sign/zero extension of the dividend is required. +(Div32 x y) -> (DIVW (MOVWreg x) y) +(Div32u x y) -> (DIVWU (MOVWZreg x) y) +(Div16 x y) -> (DIVW (MOVHreg x) (MOVHreg y)) +(Div16u x y) -> (DIVWU (MOVHZreg x) (MOVHZreg y)) +(Div8 x y) -> (DIVW (MOVBreg x) (MOVBreg y)) +(Div8u x y) -> (DIVWU (MOVBZreg x) (MOVBZreg y)) + +(Hmul64 x y) -> (MULHD x y) +(Hmul64u x y) -> (MULHDU x y) +(Hmul32 x y) -> (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y))) +(Hmul32u x y) -> (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y))) +(Hmul16 x y) -> (SRDconst [16] (MULLW (MOVHreg x) (MOVHreg y))) +(Hmul16u x y) -> (SRDconst [16] (MULLW (MOVHZreg x) (MOVHZreg y))) +(Hmul8 x y) -> (SRDconst [8] (MULLW (MOVBreg x) (MOVBreg y))) +(Hmul8u x y) -> (SRDconst [8] (MULLW (MOVBZreg x) (MOVBZreg y))) + +(Mod64 x y) -> (MODD x y) +(Mod64u x y) -> (MODDU x y) +// MODW/MODWU has a 64-bit dividend and a 32-bit divisor, +// so a sign/zero extension of the dividend is required. +(Mod32 x y) -> (MODW (MOVWreg x) y) +(Mod32u x y) -> (MODWU (MOVWZreg x) y) +(Mod16 x y) -> (MODW (MOVHreg x) (MOVHreg y)) +(Mod16u x y) -> (MODWU (MOVHZreg x) (MOVHZreg y)) +(Mod8 x y) -> (MODW (MOVBreg x) (MOVBreg y)) +(Mod8u x y) -> (MODWU (MOVBZreg x) (MOVBZreg y)) + +(Avg64u x y) -> (ADD (ADD (SRDconst x [1]) (SRDconst y [1])) (ANDconst (AND x y) [1])) + +(And64 x y) -> (AND x y) +(And32 x y) -> (ANDW x y) +(And16 x y) -> (ANDW x y) +(And8 x y) -> (ANDW x y) + +(Or64 x y) -> (OR x y) +(Or32 x y) -> (ORW x y) +(Or16 x y) -> (ORW x y) +(Or8 x y) -> (ORW x y) + +(Xor64 x y) -> (XOR x y) +(Xor32 x y) -> (XORW x y) +(Xor16 x y) -> (XORW x y) +(Xor8 x y) -> (XORW x y) + +(Neg64 x) -> (NEG x) +(Neg32 x) -> (NEGW x) +(Neg16 x) -> (NEGW (MOVHreg x)) +(Neg8 x) -> (NEGW (MOVBreg x)) +(Neg32F x) -> (FNEGS x) +(Neg64F x) -> (FNEG x) + +(Com64 x) -> (NOT x) +(Com32 x) -> (NOTW x) +(Com16 x) -> (NOTW x) +(Com8 x) -> (NOTW x) +(NOT x) && true -> (XORconst [-1] x) +(NOTW x) && true -> (XORWconst [-1] x) + +// Lowering boolean ops +(AndB x y) -> (ANDW x y) +(OrB x y) -> (ORW x y) +(Not x) -> (XORWconst [1] x) + +// Lowering pointer arithmetic +(OffPtr [off] ptr:(SP)) -> (MOVDaddr [off] ptr) +(OffPtr [off] ptr) && is32Bit(off) -> (ADDconst [off] ptr) +(OffPtr [off] ptr) -> (ADD (MOVDconst [off]) ptr) + +(Sqrt x) -> (FSQRT x) + +// Lowering extension +// Note: we always extend to 64 bits even though some ops don't need that many result bits. +(SignExt8to16 x) -> (MOVBreg x) +(SignExt8to32 x) -> (MOVBreg x) +(SignExt8to64 x) -> (MOVBreg x) +(SignExt16to32 x) -> (MOVHreg x) +(SignExt16to64 x) -> (MOVHreg x) +(SignExt32to64 x) -> (MOVWreg x) + +(ZeroExt8to16 x) -> (MOVBZreg x) +(ZeroExt8to32 x) -> (MOVBZreg x) +(ZeroExt8to64 x) -> (MOVBZreg x) +(ZeroExt16to32 x) -> (MOVHZreg x) +(ZeroExt16to64 x) -> (MOVHZreg x) +(ZeroExt32to64 x) -> (MOVWZreg x) + +// Lowering truncation +// Because we ignore high parts of registers, truncates are just copies. +(Trunc16to8 x) -> x +(Trunc32to8 x) -> x +(Trunc32to16 x) -> x +(Trunc64to8 x) -> x +(Trunc64to16 x) -> x +(Trunc64to32 x) -> x + +// Lowering float <-> int +(Cvt32to32F x) -> (CEFBRA x) +(Cvt32to64F x) -> (CDFBRA x) +(Cvt64to32F x) -> (CEGBRA x) +(Cvt64to64F x) -> (CDGBRA x) + +(Cvt32Fto32 x) -> (CFEBRA x) +(Cvt32Fto64 x) -> (CGEBRA x) +(Cvt64Fto32 x) -> (CFDBRA x) +(Cvt64Fto64 x) -> (CGDBRA x) + +(Cvt32Fto64F x) -> (LDEBR x) +(Cvt64Fto32F x) -> (LEDBR x) + +// Lowering shifts +// Unsigned shifts need to return 0 if shift amount is >= width of shifted value. +// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff) +(Lsh64x64 x y) -> (AND (SLD x y) (SUBEcarrymask (CMPUconst y [63]))) +(Lsh64x32 x y) -> (AND (SLD x y) (SUBEcarrymask (CMPWUconst y [63]))) +(Lsh64x16 x y) -> (AND (SLD x y) (SUBEcarrymask (CMPWUconst (MOVHZreg y) [63]))) +(Lsh64x8 x y) -> (AND (SLD x y) (SUBEcarrymask (CMPWUconst (MOVBZreg y) [63]))) + +(Lsh32x64 x y) -> (ANDW (SLW x y) (SUBEWcarrymask (CMPUconst y [31]))) +(Lsh32x32 x y) -> (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst y [31]))) +(Lsh32x16 x y) -> (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [31]))) +(Lsh32x8 x y) -> (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))) + +(Lsh16x64 x y) -> (ANDW (SLW x y) (SUBEWcarrymask (CMPUconst y [31]))) +(Lsh16x32 x y) -> (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst y [31]))) +(Lsh16x16 x y) -> (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [31]))) +(Lsh16x8 x y) -> (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))) + +(Lsh8x64 x y) -> (ANDW (SLW x y) (SUBEWcarrymask (CMPUconst y [31]))) +(Lsh8x32 x y) -> (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst y [31]))) +(Lsh8x16 x y) -> (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [31]))) +(Lsh8x8 x y) -> (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))) + +(Lrot64 x [c]) -> (RLLGconst [c&63] x) +(Lrot32 x [c]) -> (RLLconst [c&31] x) + +(Rsh64Ux64 x y) -> (AND (SRD x y) (SUBEcarrymask (CMPUconst y [63]))) +(Rsh64Ux32 x y) -> (AND (SRD x y) (SUBEcarrymask (CMPWUconst y [63]))) +(Rsh64Ux16 x y) -> (AND (SRD x y) (SUBEcarrymask (CMPWUconst (MOVHZreg y) [63]))) +(Rsh64Ux8 x y) -> (AND (SRD x y) (SUBEcarrymask (CMPWUconst (MOVBZreg y) [63]))) + +(Rsh32Ux64 x y) -> (ANDW (SRW x y) (SUBEWcarrymask (CMPUconst y [31]))) +(Rsh32Ux32 x y) -> (ANDW (SRW x y) (SUBEWcarrymask (CMPWUconst y [31]))) +(Rsh32Ux16 x y) -> (ANDW (SRW x y) (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [31]))) +(Rsh32Ux8 x y) -> (ANDW (SRW x y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))) + +(Rsh16Ux64 x y) -> (ANDW (SRW (MOVHZreg x) y) (SUBEWcarrymask (CMPUconst y [15]))) +(Rsh16Ux32 x y) -> (ANDW (SRW (MOVHZreg x) y) (SUBEWcarrymask (CMPWUconst y [15]))) +(Rsh16Ux16 x y) -> (ANDW (SRW (MOVHZreg x) y) (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [15]))) +(Rsh16Ux8 x y) -> (ANDW (SRW (MOVHZreg x) y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [15]))) + +(Rsh8Ux64 x y) -> (ANDW (SRW (MOVBZreg x) y) (SUBEWcarrymask (CMPUconst y [7]))) +(Rsh8Ux32 x y) -> (ANDW (SRW (MOVBZreg x) y) (SUBEWcarrymask (CMPWUconst y [7]))) +(Rsh8Ux16 x y) -> (ANDW (SRW (MOVBZreg x) y) (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [7]))) +(Rsh8Ux8 x y) -> (ANDW (SRW (MOVBZreg x) y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [7]))) + +// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value. +// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width. +(Rsh64x64 x y) -> (SRAD x (OR y (NOT (SUBEcarrymask (CMPUconst y [63]))))) +(Rsh64x32 x y) -> (SRAD x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst y [63]))))) +(Rsh64x16 x y) -> (SRAD x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [63]))))) +(Rsh64x8 x y) -> (SRAD x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [63]))))) + +(Rsh32x64 x y) -> (SRAW x (OR y (NOT (SUBEcarrymask (CMPUconst y [31]))))) +(Rsh32x32 x y) -> (SRAW x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst y [31]))))) +(Rsh32x16 x y) -> (SRAW x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [31]))))) +(Rsh32x8 x y) -> (SRAW x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))))) + +(Rsh16x64 x y) -> (SRAW (MOVHreg x) (OR y (NOT (SUBEcarrymask (CMPUconst y [15]))))) +(Rsh16x32 x y) -> (SRAW (MOVHreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst y [15]))))) +(Rsh16x16 x y) -> (SRAW (MOVHreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [15]))))) +(Rsh16x8 x y) -> (SRAW (MOVHreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [15]))))) + +(Rsh8x64 x y) -> (SRAW (MOVBreg x) (OR y (NOT (SUBEcarrymask (CMPUconst y [7]))))) +(Rsh8x32 x y) -> (SRAW (MOVBreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst y [7]))))) +(Rsh8x16 x y) -> (SRAW (MOVBreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [7]))))) +(Rsh8x8 x y) -> (SRAW (MOVBreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [7]))))) + +// Lowering comparisons +(Less64 x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) +(Less32 x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) +(Less16 x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) +(Less8 x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) +(Less64U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) +(Less32U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) +(Less16U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y))) +(Less8U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y))) +// Use SETG with reversed operands to dodge NaN case. +(Less64F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x)) +(Less32F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x)) + +(Leq64 x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) +(Leq32 x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) +(Leq16 x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) +(Leq8 x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) +(Leq64U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) +(Leq32U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) +(Leq16U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y))) +(Leq8U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y))) +// Use SETGE with reversed operands to dodge NaN case. +(Leq64F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x)) +(Leq32F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x)) + +(Greater64 x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) +(Greater32 x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) +(Greater16 x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) +(Greater8 x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) +(Greater64U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) +(Greater32U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) +(Greater16U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y))) +(Greater8U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y))) +(Greater64F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) +(Greater32F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + +(Geq64 x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) +(Geq32 x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) +(Geq16 x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) +(Geq8 x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) +(Geq64U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) +(Geq32U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) +(Geq16U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y))) +(Geq8U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y))) +(Geq64F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) +(Geq32F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + +(Eq64 x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) +(Eq32 x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) +(Eq16 x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) +(Eq8 x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) +(EqB x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) +(EqPtr x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) +(Eq64F x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) +(Eq32F x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + +(Neq64 x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) +(Neq32 x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) +(Neq16 x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) +(Neq8 x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) +(NeqB x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) +(NeqPtr x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) +(Neq64F x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) +(Neq32F x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + +// Lowering loads +(Load ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem) +(Load ptr mem) && is32BitInt(t) -> (MOVWZload ptr mem) +(Load ptr mem) && is16BitInt(t) -> (MOVHZload ptr mem) +(Load ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBZload ptr mem) +(Load ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem) +(Load ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem) + +// Lowering stores +// These more-specific FP versions of Store pattern should come first. +(Store [8] ptr val mem) && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem) +(Store [4] ptr val mem) && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem) + +(Store [8] ptr val mem) -> (MOVDstore ptr val mem) +(Store [4] ptr val mem) -> (MOVWstore ptr val mem) +(Store [2] ptr val mem) -> (MOVHstore ptr val mem) +(Store [1] ptr val mem) -> (MOVBstore ptr val mem) + +// Lowering moves + +// Load and store for small copies. +(Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem +(Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBZload src mem) mem) +(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 -> (MOVHstore dst (MOVHZload src mem) mem) +(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 -> (MOVWstore dst (MOVWZload src mem) mem) +(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 -> (MOVDstore dst (MOVDload src mem) mem) +(Move [s] dst src mem) && SizeAndAlign(s).Size() == 16 -> + (MOVDstore [8] dst (MOVDload [8] src mem) + (MOVDstore dst (MOVDload src mem) mem)) +(Move [s] dst src mem) && SizeAndAlign(s).Size() == 24 -> + (MOVDstore [16] dst (MOVDload [16] src mem) + (MOVDstore [8] dst (MOVDload [8] src mem) + (MOVDstore dst (MOVDload src mem) mem))) +(Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 -> + (MOVBstore [2] dst (MOVBZload [2] src mem) + (MOVHstore dst (MOVHZload src mem) mem)) +(Move [s] dst src mem) && SizeAndAlign(s).Size() == 5 -> + (MOVBstore [4] dst (MOVBZload [4] src mem) + (MOVWstore dst (MOVWZload src mem) mem)) +(Move [s] dst src mem) && SizeAndAlign(s).Size() == 6 -> + (MOVHstore [4] dst (MOVHZload [4] src mem) + (MOVWstore dst (MOVWZload src mem) mem)) +(Move [s] dst src mem) && SizeAndAlign(s).Size() == 7 -> + (MOVBstore [6] dst (MOVBZload [6] src mem) + (MOVHstore [4] dst (MOVHZload [4] src mem) + (MOVWstore dst (MOVWZload src mem) mem))) + +// MVC for other moves. Use up to 4 instructions (sizes up to 1024 bytes). +(Move [s] dst src mem) && SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 256 -> + (MVC [makeValAndOff(SizeAndAlign(s).Size(), 0)] dst src mem) +(Move [s] dst src mem) && SizeAndAlign(s).Size() > 256 && SizeAndAlign(s).Size() <= 512 -> + (MVC [makeValAndOff(SizeAndAlign(s).Size()-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)) +(Move [s] dst src mem) && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Size() <= 768 -> + (MVC [makeValAndOff(SizeAndAlign(s).Size()-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))) +(Move [s] dst src mem) && SizeAndAlign(s).Size() > 768 && SizeAndAlign(s).Size() <= 1024 -> + (MVC [makeValAndOff(SizeAndAlign(s).Size()-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))) + +// Move more than 1024 bytes using a loop. +(Move [s] dst src mem) && SizeAndAlign(s).Size() > 1024 -> + (LoweredMove [SizeAndAlign(s).Size()%256] dst src (ADDconst src [(SizeAndAlign(s).Size()/256)*256]) mem) + +// Lowering Zero instructions +(Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem +(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstoreconst [0] destptr mem) +(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 -> (MOVHstoreconst [0] destptr mem) +(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 -> (MOVWstoreconst [0] destptr mem) +(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 -> (MOVDstoreconst [0] destptr mem) +(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 3 -> + (MOVBstoreconst [makeValAndOff(0,2)] destptr + (MOVHstoreconst [0] destptr mem)) +(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 5 -> + (MOVBstoreconst [makeValAndOff(0,4)] destptr + (MOVWstoreconst [0] destptr mem)) +(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 6 -> + (MOVHstoreconst [makeValAndOff(0,4)] destptr + (MOVWstoreconst [0] destptr mem)) +(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 7 -> + (MOVWstoreconst [makeValAndOff(0,3)] destptr + (MOVWstoreconst [0] destptr mem)) + +(Zero [s] destptr mem) && SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 1024 -> + (CLEAR [makeValAndOff(SizeAndAlign(s).Size(), 0)] destptr mem) + +// Move more than 1024 bytes using a loop. +(Zero [s] destptr mem) && SizeAndAlign(s).Size() > 1024 -> + (LoweredZero [SizeAndAlign(s).Size()%256] destptr (ADDconst destptr [(SizeAndAlign(s).Size()/256)*256]) mem) + +// Lowering constants +(Const8 [val]) -> (MOVDconst [val]) +(Const16 [val]) -> (MOVDconst [val]) +(Const32 [val]) -> (MOVDconst [val]) +(Const64 [val]) -> (MOVDconst [val]) +(Const32F [val]) -> (FMOVSconst [val]) +(Const64F [val]) -> (FMOVDconst [val]) +(ConstNil) -> (MOVDconst [0]) +(ConstBool [b]) -> (MOVDconst [b]) + +// Lowering calls +(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem) +(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem) +(DeferCall [argwid] mem) -> (CALLdefer [argwid] mem) +(GoCall [argwid] mem) -> (CALLgo [argwid] mem) +(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem) + +// Miscellaneous +(Convert x mem) -> (MOVDconvert x mem) +(IsNonNil p) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0])) +(IsInBounds idx len) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len)) +(IsSliceInBounds idx len) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len)) +(NilCheck ptr mem) -> (LoweredNilCheck ptr mem) +(GetG mem) -> (LoweredGetG mem) +(GetClosurePtr) -> (LoweredGetClosurePtr) +(Addr {sym} base) -> (MOVDaddr {sym} base) +(ITab (Load ptr mem)) -> (MOVDload ptr mem) + +// block rewrites +(If (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (LT cmp yes no) +(If (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (LE cmp yes no) +(If (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GT cmp yes no) +(If (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GE cmp yes no) +(If (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (EQ cmp yes no) +(If (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (NE cmp yes no) + +// Special case for floating point - LF/LEF not generated. +(If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GTF cmp yes no) +(If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GEF cmp yes no) + +(If cond yes no) -> (NE (TESTB cond) yes no) + +// *************************** +// Above: lowering rules +// Below: optimizations +// *************************** +// TODO: Should the optimizations be a separate pass? + +// Fold boolean tests into blocks +(NE (TESTB (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (LT cmp yes no) +(NE (TESTB (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (LE cmp yes no) +(NE (TESTB (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GT cmp yes no) +(NE (TESTB (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GE cmp yes no) +(NE (TESTB (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (EQ cmp yes no) +(NE (TESTB (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (NE cmp yes no) +(NE (TESTB (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GTF cmp yes no) +(NE (TESTB (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GEF cmp yes no) + +// Fold constants into instructions. +(ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x) +(ADD (MOVDconst [c]) x) && is32Bit(c) -> (ADDconst [c] x) +(ADDW x (MOVDconst [c])) -> (ADDWconst [c] x) +(ADDW (MOVDconst [c]) x) -> (ADDWconst [c] x) + +(SUB x (MOVDconst [c])) && is32Bit(c) -> (SUBconst x [c]) +(SUB (MOVDconst [c]) x) && is32Bit(c) -> (NEG (SUBconst x [c])) +(SUBW x (MOVDconst [c])) -> (SUBWconst x [c]) +(SUBW (MOVDconst [c]) x) -> (NEGW (SUBWconst x [c])) + +(MULLD x (MOVDconst [c])) && is32Bit(c) -> (MULLDconst [c] x) +(MULLD (MOVDconst [c]) x) && is32Bit(c) -> (MULLDconst [c] x) +(MULLW x (MOVDconst [c])) -> (MULLWconst [c] x) +(MULLW (MOVDconst [c]) x) -> (MULLWconst [c] x) + +(AND x (MOVDconst [c])) && is32Bit(c) -> (ANDconst [c] x) +(AND (MOVDconst [c]) x) && is32Bit(c) -> (ANDconst [c] x) +(ANDW x (MOVDconst [c])) -> (ANDWconst [c] x) +(ANDW (MOVDconst [c]) x) -> (ANDWconst [c] x) + +(ANDWconst [c] (ANDWconst [d] x)) -> (ANDWconst [c & d] x) +(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c & d] x) + +(OR x (MOVDconst [c])) && is32Bit(c) -> (ORconst [c] x) +(OR (MOVDconst [c]) x) && is32Bit(c) -> (ORconst [c] x) +(ORW x (MOVDconst [c])) -> (ORWconst [c] x) +(ORW (MOVDconst [c]) x) -> (ORWconst [c] x) + +(XOR x (MOVDconst [c])) && is32Bit(c) -> (XORconst [c] x) +(XOR (MOVDconst [c]) x) && is32Bit(c) -> (XORconst [c] x) +(XORW x (MOVDconst [c])) -> (XORWconst [c] x) +(XORW (MOVDconst [c]) x) -> (XORWconst [c] x) + +(SLD x (MOVDconst [c])) -> (SLDconst [c&63] x) +(SLW x (MOVDconst [c])) -> (SLWconst [c&63] x) +(SRD x (MOVDconst [c])) -> (SRDconst [c&63] x) +(SRW x (MOVDconst [c])) -> (SRWconst [c&63] x) +(SRAD x (MOVDconst [c])) -> (SRADconst [c&63] x) +(SRAW x (MOVDconst [c])) -> (SRAWconst [c&63] x) + +(SRAW x (ANDWconst [63] y)) -> (SRAW x y) +(SRAD x (ANDconst [63] y)) -> (SRAD x y) +(SLW x (ANDWconst [63] y)) -> (SLW x y) +(SLD x (ANDconst [63] y)) -> (SLD x y) +(SRW x (ANDWconst [63] y)) -> (SRW x y) +(SRD x (ANDconst [63] y)) -> (SRD x y) + +(CMP x (MOVDconst [c])) && is32Bit(c) -> (CMPconst x [c]) +(CMP (MOVDconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPconst x [c])) +(CMPW x (MOVDconst [c])) -> (CMPWconst x [c]) +(CMPW (MOVDconst [c]) x) -> (InvertFlags (CMPWconst x [c])) +(CMPU x (MOVDconst [c])) && is32Bit(c) -> (CMPUconst x [int64(uint32(c))]) +(CMPU (MOVDconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPUconst x [int64(uint32(c))])) +(CMPWU x (MOVDconst [c])) -> (CMPWUconst x [int64(uint32(c))]) +(CMPWU (MOVDconst [c]) x) -> (InvertFlags (CMPWUconst x [int64(uint32(c))])) + +// Using MOVBZreg instead of AND is cheaper. +(ANDconst [0xFF] x) -> (MOVBZreg x) +(ANDconst [0xFFFF] x) -> (MOVHZreg x) +(ANDconst [0xFFFFFFFF] x) -> (MOVWZreg x) + +// strength reduction +(MULLDconst [-1] x) -> (NEG x) +(MULLDconst [0] _) -> (MOVDconst [0]) +(MULLDconst [1] x) -> x +(MULLDconst [c] x) && isPowerOfTwo(c) -> (SLDconst [log2(c)] x) +(MULLDconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUB (SLDconst [log2(c+1)] x) x) +(MULLDconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (ADD (SLDconst [log2(c-1)] x) x) + +(MULLWconst [-1] x) -> (NEGW x) +(MULLWconst [0] _) -> (MOVDconst [0]) +(MULLWconst [1] x) -> x +(MULLWconst [c] x) && isPowerOfTwo(c) -> (SLWconst [log2(c)] x) +(MULLWconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBW (SLWconst [log2(c+1)] x) x) +(MULLWconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (ADDW (SLWconst [log2(c-1)] x) x) + +// Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them). +(ADDconst [c] (MOVDaddr [d] {s} x)) && ((c+d)&1 == 0) && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x) +(MOVDaddr [c] {s} (ADDconst [d] x)) && ((c+d)&1 == 0) && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x) +(ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x) +(MOVDaddr [c] {s} (ADDconst [d] x)) && x.Op != OpSB && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x) +(MOVDaddr [c] {s} (ADD x y)) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y) +(ADD x (MOVDaddr [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y) +(ADD (MOVDaddr [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y) + +// fold ADDconst into MOVDaddrx +(ADDconst [c] (MOVDaddridx [d] {s} x y)) && is32Bit(c+d) -> (MOVDaddridx [c+d] {s} x y) +(MOVDaddridx [c] {s} (ADDconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (MOVDaddridx [c+d] {s} x y) +(MOVDaddridx [c] {s} x (ADDconst [d] y)) && is32Bit(c+d) && y.Op != OpSB -> (MOVDaddridx [c+d] {s} x y) + +// reverse ordering of compare instruction +(MOVDLT x y (InvertFlags cmp)) -> (MOVDGT x y cmp) +(MOVDGT x y (InvertFlags cmp)) -> (MOVDLT x y cmp) +(MOVDLE x y (InvertFlags cmp)) -> (MOVDGE x y cmp) +(MOVDGE x y (InvertFlags cmp)) -> (MOVDLE x y cmp) +(MOVDEQ x y (InvertFlags cmp)) -> (MOVDEQ x y cmp) +(MOVDNE x y (InvertFlags cmp)) -> (MOVDNE x y cmp) + +// don't extend after proper load +(MOVBreg x:(MOVBload _ _)) -> x +(MOVBZreg x:(MOVBZload _ _)) -> x +(MOVHreg x:(MOVBload _ _)) -> x +(MOVHreg x:(MOVBZload _ _)) -> x +(MOVHreg x:(MOVHload _ _)) -> x +(MOVHZreg x:(MOVBZload _ _)) -> x +(MOVHZreg x:(MOVHZload _ _)) -> x +(MOVWreg x:(MOVBload _ _)) -> x +(MOVWreg x:(MOVBZload _ _)) -> x +(MOVWreg x:(MOVHload _ _)) -> x +(MOVWreg x:(MOVHZload _ _)) -> x +(MOVWreg x:(MOVWload _ _)) -> x +(MOVWZreg x:(MOVBZload _ _)) -> x +(MOVWZreg x:(MOVHZload _ _)) -> x +(MOVWZreg x:(MOVWZload _ _)) -> x + +// fold double extensions +(MOVBreg x:(MOVBreg _)) -> x +(MOVBZreg x:(MOVBZreg _)) -> x +(MOVHreg x:(MOVBreg _)) -> x +(MOVHreg x:(MOVBZreg _)) -> x +(MOVHreg x:(MOVHreg _)) -> x +(MOVHZreg x:(MOVBZreg _)) -> x +(MOVHZreg x:(MOVHZreg _)) -> x +(MOVWreg x:(MOVBreg _)) -> x +(MOVWreg x:(MOVBZreg _)) -> x +(MOVWreg x:(MOVHreg _)) -> x +(MOVWreg x:(MOVHreg _)) -> x +(MOVWreg x:(MOVWreg _)) -> x +(MOVWZreg x:(MOVBZreg _)) -> x +(MOVWZreg x:(MOVHZreg _)) -> x +(MOVWZreg x:(MOVWZreg _)) -> x + +// sign extended loads +// Note: The combined instruction must end up in the same block +// as the original load. If not, we end up making a value with +// memory type live in two different blocks, which can lead to +// multiple memory values alive simultaneously. +// Make sure we don't combine these ops if the load has another use. +// This prevents a single load from being split into multiple loads +// which then might return different values. See test/atomicload.go. +(MOVBreg x:(MOVBZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload [off] {sym} ptr mem) +(MOVBZreg x:(MOVBZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZload [off] {sym} ptr mem) +(MOVHreg x:(MOVHZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHload [off] {sym} ptr mem) +(MOVHZreg x:(MOVHZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZload [off] {sym} ptr mem) +(MOVWreg x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload [off] {sym} ptr mem) +(MOVWZreg x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZload [off] {sym} ptr mem) + +(MOVBZreg x:(MOVBZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZloadidx [off] {sym} ptr idx mem) +(MOVHZreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) +(MOVWZreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) + +// replace load from same location as preceding store with copy +(MOVBZload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x +(MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x +(MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x +(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x + +// Fold extensions and ANDs together. +(MOVBZreg (ANDWconst [c] x)) -> (ANDconst [c & 0xff] x) +(MOVHZreg (ANDWconst [c] x)) -> (ANDconst [c & 0xffff] x) +(MOVWZreg (ANDWconst [c] x)) -> (ANDconst [c & 0xffffffff] x) +(MOVBreg (ANDWconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c & 0x7f] x) +(MOVHreg (ANDWconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c & 0x7fff] x) +(MOVWreg (ANDWconst [c] x)) && c & 0x80000000 == 0 -> (ANDconst [c & 0x7fffffff] x) + +(MOVBZreg (ANDconst [c] x)) -> (ANDconst [c & 0xff] x) +(MOVHZreg (ANDconst [c] x)) -> (ANDconst [c & 0xffff] x) +(MOVWZreg (ANDconst [c] x)) -> (ANDconst [c & 0xffffffff] x) +(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c & 0x7f] x) +(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c & 0x7fff] x) +(MOVWreg (ANDconst [c] x)) && c & 0x80000000 == 0 -> (ANDconst [c & 0x7fffffff] x) + +// Don't extend before storing +(MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWZreg x) mem) -> (MOVWstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHZreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBZreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) + +// Fold constants into memory operations. +// Note that this is not always a good idea because if not all the uses of +// the ADDconst get eliminated, we still have to compute the ADDconst and we now +// have potentially two live values (ptr and (ADDconst [off] ptr)) instead of one. +// Nevertheless, let's do it! +(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDload [off1+off2] {sym} ptr mem) +(MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} ptr mem) +(MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} ptr mem) +(MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} ptr mem) +(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem) +(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem) + +(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} ptr val mem) +(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem) +(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} ptr val mem) +(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem) +(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem) +(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem) + +// Fold constants into stores. +(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && validValAndOff(c,off) && int64(int16(c)) == c && ptr.Op != OpSB -> + (MOVDstoreconst [makeValAndOff(c,off)] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB -> + (MOVWstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) +(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && validOff(off) && ptr.Op != OpSB -> + (MOVHstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) +(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && validOff(off) && ptr.Op != OpSB -> + (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) + +// Fold address offsets into constant stores. +(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> + (MOVDstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) +(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> + (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) +(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> + (MOVHstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) +(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> + (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + +// We need to fold MOVDaddr into the MOVx ops so that the live variable analysis knows +// what variables are being read/written by the ops. +(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + +(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + +(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + +(MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> + (MOVDstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) +(MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> + (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) +(MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> + (MOVHstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) +(MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> + (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + +// generating indexed loads and stores +(MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVBZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) +(MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVHZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) +(MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVWZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) +(MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) +(FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (FMOVSloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) +(FMOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (FMOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + +(MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVBstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) +(MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVHstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) +(MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVWstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) +(MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) +(FMOVSstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (FMOVSstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) +(FMOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (FMOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + +(MOVBZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVBZloadidx [off] {sym} ptr idx mem) +(MOVHZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVHZloadidx [off] {sym} ptr idx mem) +(MOVWZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVWZloadidx [off] {sym} ptr idx mem) +(MOVDload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVDloadidx [off] {sym} ptr idx mem) +(FMOVSload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (FMOVSloadidx [off] {sym} ptr idx mem) +(FMOVDload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (FMOVDloadidx [off] {sym} ptr idx mem) +(MOVBstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx [off] {sym} ptr idx val mem) +(MOVHstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVHstoreidx [off] {sym} ptr idx val mem) +(MOVWstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx [off] {sym} ptr idx val mem) +(MOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVDstoreidx [off] {sym} ptr idx val mem) +(FMOVSstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (FMOVSstoreidx [off] {sym} ptr idx val mem) +(FMOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (FMOVDstoreidx [off] {sym} ptr idx val mem) + +// combine ADD into indexed loads and stores +(MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVBZloadidx [c+d] {sym} ptr idx mem) +(MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVHZloadidx [c+d] {sym} ptr idx mem) +(MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVWZloadidx [c+d] {sym} ptr idx mem) +(MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVDloadidx [c+d] {sym} ptr idx mem) +(FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (FMOVSloadidx [c+d] {sym} ptr idx mem) +(FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (FMOVDloadidx [c+d] {sym} ptr idx mem) + +(MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem) +(MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem) +(MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem) +(MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem) +(FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem) +(FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem) + +(MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVBZloadidx [c+d] {sym} ptr idx mem) +(MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVHZloadidx [c+d] {sym} ptr idx mem) +(MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVWZloadidx [c+d] {sym} ptr idx mem) +(MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVDloadidx [c+d] {sym} ptr idx mem) +(FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (FMOVSloadidx [c+d] {sym} ptr idx mem) +(FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (FMOVDloadidx [c+d] {sym} ptr idx mem) + +(MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem) +(MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem) +(MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem) +(MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem) +(FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem) +(FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem) + +// fold MOVDaddrs together +(MOVDaddr [off1] {sym1} (MOVDaddr [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVDaddr [off1+off2] {mergeSym(sym1,sym2)} x) + +// MOVDaddr into MOVDaddridx +(MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> + (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) +(MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB -> + (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) + +// MOVDaddridx into MOVDaddr +(MOVDaddr [off1] {sym1} (MOVDaddridx [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) + +// Absorb InvertFlags into branches. +(LT (InvertFlags cmp) yes no) -> (GT cmp yes no) +(GT (InvertFlags cmp) yes no) -> (LT cmp yes no) +(LE (InvertFlags cmp) yes no) -> (GE cmp yes no) +(GE (InvertFlags cmp) yes no) -> (LE cmp yes no) +(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no) +(NE (InvertFlags cmp) yes no) -> (NE cmp yes no) + +// Constant comparisons. +(CMPconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ) +(CMPconst (MOVDconst [x]) [y]) && x (FlagLT) +(CMPconst (MOVDconst [x]) [y]) && x>y -> (FlagGT) +(CMPUconst (MOVDconst [x]) [y]) && uint64(x)==uint64(y) -> (FlagEQ) +(CMPUconst (MOVDconst [x]) [y]) && uint64(x) (FlagLT) +(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT) + +(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ) +(CMPWconst (MOVDconst [x]) [y]) && int32(x) (FlagLT) +(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) -> (FlagGT) +(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)==uint32(y) -> (FlagEQ) +(CMPWUconst (MOVDconst [x]) [y]) && uint32(x) (FlagLT) +(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT) + +// Other known comparisons. +(CMPconst (MOVBZreg _) [c]) && 0xFF < c -> (FlagLT) +(CMPconst (MOVHZreg _) [c]) && 0xFFFF < c -> (FlagLT) +(CMPconst (MOVWZreg _) [c]) && 0xFFFFFFFF < c -> (FlagLT) +(CMPWconst (SRWconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1< (FlagLT) +(CMPconst (SRDconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1< (FlagLT) +(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT) +(CMPWconst (ANDWconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT) + +// Absorb flag constants into SBB ops. +(SUBEcarrymask (FlagEQ)) -> (MOVDconst [-1]) +(SUBEcarrymask (FlagLT)) -> (MOVDconst [-1]) +(SUBEcarrymask (FlagGT)) -> (MOVDconst [0]) +(SUBEWcarrymask (FlagEQ)) -> (MOVDconst [-1]) +(SUBEWcarrymask (FlagLT)) -> (MOVDconst [-1]) +(SUBEWcarrymask (FlagGT)) -> (MOVDconst [0]) + +// Absorb flag constants into branches. +(EQ (FlagEQ) yes no) -> (First nil yes no) +(EQ (FlagLT) yes no) -> (First nil no yes) +(EQ (FlagGT) yes no) -> (First nil no yes) + +(NE (FlagEQ) yes no) -> (First nil no yes) +(NE (FlagLT) yes no) -> (First nil yes no) +(NE (FlagGT) yes no) -> (First nil yes no) + +(LT (FlagEQ) yes no) -> (First nil no yes) +(LT (FlagLT) yes no) -> (First nil yes no) +(LT (FlagGT) yes no) -> (First nil no yes) + +(LE (FlagEQ) yes no) -> (First nil yes no) +(LE (FlagLT) yes no) -> (First nil yes no) +(LE (FlagGT) yes no) -> (First nil no yes) + +(GT (FlagEQ) yes no) -> (First nil no yes) +(GT (FlagLT) yes no) -> (First nil no yes) +(GT (FlagGT) yes no) -> (First nil yes no) + +(GE (FlagEQ) yes no) -> (First nil yes no) +(GE (FlagLT) yes no) -> (First nil no yes) +(GE (FlagGT) yes no) -> (First nil yes no) + +// Absorb flag constants into SETxx ops. +(MOVDEQ _ x (FlagEQ)) -> x +(MOVDEQ y _ (FlagLT)) -> y +(MOVDEQ y _ (FlagGT)) -> y + +(MOVDNE _ y (FlagEQ)) -> y +(MOVDNE x _ (FlagLT)) -> x +(MOVDNE x _ (FlagGT)) -> x + +(MOVDLT y _ (FlagEQ)) -> y +(MOVDLT _ x (FlagLT)) -> x +(MOVDLT y _ (FlagGT)) -> y + +(MOVDLE _ x (FlagEQ)) -> x +(MOVDLE _ x (FlagLT)) -> x +(MOVDLE y _ (FlagGT)) -> y + +(MOVDGT y _ (FlagEQ)) -> y +(MOVDGT y _ (FlagLT)) -> y +(MOVDGT _ x (FlagGT)) -> x + +(MOVDGE _ x (FlagEQ)) -> x +(MOVDGE y _ (FlagLT)) -> y +(MOVDGE _ x (FlagGT)) -> x + +// Remove redundant *const ops +(ADDconst [0] x) -> x +(ADDWconst [c] x) && int32(c)==0 -> x +(SUBconst [0] x) -> x +(SUBWconst [c] x) && int32(c) == 0 -> x +(ANDconst [0] _) -> (MOVDconst [0]) +(ANDWconst [c] _) && int32(c)==0 -> (MOVDconst [0]) +(ANDconst [-1] x) -> x +(ANDWconst [c] x) && int32(c)==-1 -> x +(ORconst [0] x) -> x +(ORWconst [c] x) && int32(c)==0 -> x +(ORconst [-1] _) -> (MOVDconst [-1]) +(ORWconst [c] _) && int32(c)==-1 -> (MOVDconst [-1]) +(XORconst [0] x) -> x +(XORWconst [c] x) && int32(c)==0 -> x + +// Convert constant subtracts to constant adds. +(SUBconst [c] x) && c != -(1<<31) -> (ADDconst [-c] x) +(SUBWconst [c] x) -> (ADDWconst [int64(int32(-c))] x) + +// generic constant folding +// TODO: more of this +(ADDconst [c] (MOVDconst [d])) -> (MOVDconst [c+d]) +(ADDWconst [c] (MOVDconst [d])) -> (MOVDconst [int64(int32(c+d))]) +(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x) +(ADDWconst [c] (ADDWconst [d] x)) -> (ADDWconst [int64(int32(c+d))] x) +(SUBconst (MOVDconst [d]) [c]) -> (MOVDconst [d-c]) +(SUBconst (SUBconst x [d]) [c]) && is32Bit(-c-d) -> (ADDconst [-c-d] x) +(SRADconst [c] (MOVDconst [d])) -> (MOVDconst [d>>uint64(c)]) +(SRAWconst [c] (MOVDconst [d])) -> (MOVDconst [d>>uint64(c)]) +(NEG (MOVDconst [c])) -> (MOVDconst [-c]) +(NEGW (MOVDconst [c])) -> (MOVDconst [int64(int32(-c))]) +(MULLDconst [c] (MOVDconst [d])) -> (MOVDconst [c*d]) +(MULLWconst [c] (MOVDconst [d])) -> (MOVDconst [int64(int32(c*d))]) +(ANDconst [c] (MOVDconst [d])) -> (MOVDconst [c&d]) +(ANDWconst [c] (MOVDconst [d])) -> (MOVDconst [c&d]) +(ORconst [c] (MOVDconst [d])) -> (MOVDconst [c|d]) +(ORWconst [c] (MOVDconst [d])) -> (MOVDconst [c|d]) +(XORconst [c] (MOVDconst [d])) -> (MOVDconst [c^d]) +(XORWconst [c] (MOVDconst [d])) -> (MOVDconst [c^d]) +(NOT (MOVDconst [c])) -> (MOVDconst [^c]) +(NOTW (MOVDconst [c])) -> (MOVDconst [^c]) + +// generic simplifications +// TODO: more of this +(ADD x (NEG y)) -> (SUB x y) +(ADDW x (NEGW y)) -> (SUBW x y) +(SUB x x) -> (MOVDconst [0]) +(SUBW x x) -> (MOVDconst [0]) +(AND x x) -> x +(ANDW x x) -> x +(OR x x) -> x +(ORW x x) -> x +(XOR x x) -> (MOVDconst [0]) +(XORW x x) -> (MOVDconst [0]) + +// Combine constant stores into larger (unaligned) stores. +// It doesn't work to global data (based on SB), +// because STGRL doesn't support unaligned address +(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) + && p.Op != OpSB + && x.Uses == 1 + && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() + && clobber(x) + -> (MOVHstoreconst [makeValAndOff(ValAndOff(c).Val()&0xff | ValAndOff(a).Val()<<8, ValAndOff(a).Off())] {s} p mem) +(MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem)) + && p.Op != OpSB + && x.Uses == 1 + && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() + && clobber(x) + -> (MOVWstoreconst [makeValAndOff(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16, ValAndOff(a).Off())] {s} p mem) +(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) + && p.Op != OpSB + && x.Uses == 1 + && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() + && clobber(x) + -> (MOVDstore [ValAndOff(a).Off()] {s} p (MOVDconst [ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32]) mem) + +// Combine stores into larger (unaligned) stores. +// It doesn't work to global data (based on SB), +// because STGRL doesn't support unaligned address +(MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem)) + && p.Op != OpSB + && x.Uses == 1 + && clobber(x) + -> (MOVHstore [i-1] {s} p w mem) +(MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem)) + && p.Op != OpSB + && x.Uses == 1 + && clobber(x) + -> (MOVHstore [i-1] {s} p w0 mem) +(MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem)) + && p.Op != OpSB + && x.Uses == 1 + && clobber(x) + -> (MOVWstore [i-2] {s} p w mem) +(MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem)) + && p.Op != OpSB + && x.Uses == 1 + && clobber(x) + -> (MOVWstore [i-2] {s} p w0 mem) +(MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem)) + && p.Op != OpSB + && x.Uses == 1 + && clobber(x) + -> (MOVDstore [i-4] {s} p w mem) +(MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem)) + && p.Op != OpSB + && x.Uses == 1 + && clobber(x) + -> (MOVDstore [i-4] {s} p w0 mem) + +(MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem)) + && p.Op != OpSB + && x.Uses == 1 + && clobber(x) + -> (MOVHstoreidx [i-1] {s} p idx w mem) +(MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem)) + && p.Op != OpSB + && x.Uses == 1 + && clobber(x) + -> (MOVHstoreidx [i-1] {s} p idx w0 mem) +(MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem)) + && p.Op != OpSB + && x.Uses == 1 + && clobber(x) + -> (MOVWstoreidx [i-2] {s} p idx w mem) +(MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem)) + && p.Op != OpSB + && x.Uses == 1 + && clobber(x) + -> (MOVWstoreidx [i-2] {s} p idx w0 mem) +(MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem)) + && p.Op != OpSB + && x.Uses == 1 + && clobber(x) + -> (MOVDstoreidx [i-4] {s} p idx w mem) +(MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem)) + && p.Op != OpSB + && x.Uses == 1 + && clobber(x) + -> (MOVDstoreidx [i-4] {s} p idx w0 mem) + +// Combining byte loads into larger (unaligned) loads. + +// Little endian loads. + +// b[0] | b[1]<<8 -> load 16-bit, reverse bytes +(ORW x0:(MOVBZload [i] {s} p mem) + s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && s0.Uses == 1 + && mergePoint(b,x0,x1) != nil + && clobber(x0) + && clobber(x1) + && clobber(s0) + -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i] {s} p mem)) + +// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit, reverse bytes +(ORW o0:(ORW o1:(ORW + x0:(MOVBZload [i] {s} p mem) + s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem))) + s1:(SLWconst [16] x2:(MOVBZload [i+2] {s} p mem))) + s2:(SLWconst [24] x3:(MOVBZload [i+3] {s} p mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && s2.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && mergePoint(b,x0,x1,x2,x3) != nil + && clobber(x0) + && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(s0) + && clobber(s1) + && clobber(s2) + && clobber(o0) + && clobber(o1) + -> @mergePoint(b,x0,x1,x2,x3) (MOVWZreg (MOVWBRload [i] {s} p mem)) + +// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit, reverse bytes +(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR + x0:(MOVBZload [i] {s} p mem) + s0:(SLDconst [8] x1:(MOVBZload [i+1] {s} p mem))) + s1:(SLDconst [16] x2:(MOVBZload [i+2] {s} p mem))) + s2:(SLDconst [24] x3:(MOVBZload [i+3] {s} p mem))) + s3:(SLDconst [32] x4:(MOVBZload [i+4] {s} p mem))) + s4:(SLDconst [40] x5:(MOVBZload [i+5] {s} p mem))) + s5:(SLDconst [48] x6:(MOVBZload [i+6] {s} p mem))) + s6:(SLDconst [56] x7:(MOVBZload [i+7] {s} p mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil + && clobber(x0) + && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) + && clobber(s0) + && clobber(s1) + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRload [i] {s} p mem) + +// b[0] | b[1]<<8 -> load 16-bit, reverse bytes +(ORW x0:(MOVBZloadidx [i] {s} p idx mem) + s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && s0.Uses == 1 + && mergePoint(b,x0,x1) != nil + && clobber(x0) + && clobber(x1) + && clobber(s0) + -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i] {s} p idx mem)) + +// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit, reverse bytes +(ORW o0:(ORW o1:(ORW + x0:(MOVBZloadidx [i] {s} p idx mem) + s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem))) + s1:(SLWconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem))) + s2:(SLWconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && s2.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && mergePoint(b,x0,x1,x2,x3) != nil + && clobber(x0) + && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(s0) + && clobber(s1) + && clobber(s2) + && clobber(o0) + && clobber(o1) + -> @mergePoint(b,x0,x1,x2,x3) (MOVWZreg (MOVWBRloadidx [i] {s} p idx mem)) + +// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit, reverse bytes +(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR + x0:(MOVBZloadidx [i] {s} p idx mem) + s0:(SLDconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem))) + s1:(SLDconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem))) + s2:(SLDconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem))) + s3:(SLDconst [32] x4:(MOVBZloadidx [i+4] {s} p idx mem))) + s4:(SLDconst [40] x5:(MOVBZloadidx [i+5] {s} p idx mem))) + s5:(SLDconst [48] x6:(MOVBZloadidx [i+6] {s} p idx mem))) + s6:(SLDconst [56] x7:(MOVBZloadidx [i+7] {s} p idx mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil + && clobber(x0) + && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) + && clobber(s0) + && clobber(s1) + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRloadidx [i] {s} p idx mem) + +// Big endian loads. + +// b[1] | b[0]<<8 -> load 16-bit +(ORW x0:(MOVBZload [i] {s} p mem) + s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && s0.Uses == 1 + && mergePoint(b,x0,x1) != nil + && clobber(x0) + && clobber(x1) + && clobber(s0) + -> @mergePoint(b,x0,x1) (MOVHZload [i-1] {s} p mem) + +// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit +(ORW o0:(ORW o1:(ORW + x0:(MOVBZload [i] {s} p mem) + s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem))) + s1:(SLWconst [16] x2:(MOVBZload [i-2] {s} p mem))) + s2:(SLWconst [24] x3:(MOVBZload [i-3] {s} p mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && s2.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && mergePoint(b,x0,x1,x2,x3) != nil + && clobber(x0) + && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(s0) + && clobber(s1) + && clobber(s2) + && clobber(o0) + && clobber(o1) + -> @mergePoint(b,x0,x1,x2,x3) (MOVWZload [i-3] {s} p mem) + +// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit +(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR + x0:(MOVBZload [i] {s} p mem) + s0:(SLDconst [8] x1:(MOVBZload [i-1] {s} p mem))) + s1:(SLDconst [16] x2:(MOVBZload [i-2] {s} p mem))) + s2:(SLDconst [24] x3:(MOVBZload [i-3] {s} p mem))) + s3:(SLDconst [32] x4:(MOVBZload [i-4] {s} p mem))) + s4:(SLDconst [40] x5:(MOVBZload [i-5] {s} p mem))) + s5:(SLDconst [48] x6:(MOVBZload [i-6] {s} p mem))) + s6:(SLDconst [56] x7:(MOVBZload [i-7] {s} p mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil + && clobber(x0) + && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) + && clobber(s0) + && clobber(s1) + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload [i-7] {s} p mem) + +// b[1] | b[0]<<8 -> load 16-bit +(ORW x0:(MOVBZloadidx [i] {s} p idx mem) + s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && s0.Uses == 1 + && mergePoint(b,x0,x1) != nil + && clobber(x0) + && clobber(x1) + && clobber(s0) + -> @mergePoint(b,x0,x1) (MOVHZloadidx [i-1] {s} p idx mem) + +// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit +(ORW o0:(ORW o1:(ORW + x0:(MOVBZloadidx [i] {s} p idx mem) + s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem))) + s1:(SLWconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem))) + s2:(SLWconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && s2.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && mergePoint(b,x0,x1,x2,x3) != nil + && clobber(x0) + && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(s0) + && clobber(s1) + && clobber(s2) + && clobber(o0) + && clobber(o1) + -> @mergePoint(b,x0,x1,x2,x3) (MOVWZloadidx [i-3] {s} p idx mem) + +// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit +(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR + x0:(MOVBZloadidx [i] {s} p idx mem) + s0:(SLDconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem))) + s1:(SLDconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem))) + s2:(SLDconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem))) + s3:(SLDconst [32] x4:(MOVBZloadidx [i-4] {s} p idx mem))) + s4:(SLDconst [40] x5:(MOVBZloadidx [i-5] {s} p idx mem))) + s5:(SLDconst [48] x6:(MOVBZloadidx [i-6] {s} p idx mem))) + s6:(SLDconst [56] x7:(MOVBZloadidx [i-7] {s} p idx mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil + && clobber(x0) + && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) + && clobber(s0) + && clobber(s1) + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx [i-7] {s} p idx mem) + +// Combine stores into store multiples. +(MOVWstore [i] {s} p w3 + x2:(MOVWstore [i-4] {s} p w2 + x1:(MOVWstore [i-8] {s} p w1 + x0:(MOVWstore [i-12] {s} p w0 mem)))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && is20Bit(i-12) + && clobber(x0) + && clobber(x1) + && clobber(x2) + -> (STM4 [i-12] {s} p w0 w1 w2 w3 mem) +(MOVWstore [i] {s} p w2 + x1:(MOVWstore [i-4] {s} p w1 + x0:(MOVWstore [i-8] {s} p w0 mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && is20Bit(i-8) + && clobber(x0) + && clobber(x1) + -> (STM3 [i-8] {s} p w0 w1 w2 mem) +(MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem)) + && p.Op != OpSB + && x.Uses == 1 + && is20Bit(i-4) + && clobber(x) + -> (STM2 [i-4] {s} p w0 w1 mem) +(MOVDstore [i] {s} p w3 + x2:(MOVDstore [i-8] {s} p w2 + x1:(MOVDstore [i-16] {s} p w1 + x0:(MOVDstore [i-24] {s} p w0 mem)))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && is20Bit(i-24) + && clobber(x0) + && clobber(x1) + && clobber(x2) + -> (STMG4 [i-24] {s} p w0 w1 w2 w3 mem) +(MOVDstore [i] {s} p w2 + x1:(MOVDstore [i-8] {s} p w1 + x0:(MOVDstore [i-16] {s} p w0 mem))) + && p.Op != OpSB + && x0.Uses == 1 + && x1.Uses == 1 + && is20Bit(i-16) + && clobber(x0) + && clobber(x1) + -> (STMG3 [i-16] {s} p w0 w1 w2 mem) +(MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem)) + && p.Op != OpSB + && x.Uses == 1 + && is20Bit(i-8) + && clobber(x) + -> (STMG2 [i-8] {s} p w0 w1 mem) diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go new file mode 100644 index 0000000000000..7d4eff83751e6 --- /dev/null +++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go @@ -0,0 +1,527 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import "strings" + +// Notes: +// - Integer types live in the low portion of registers. Upper portions are junk. +// - Boolean types use the low-order byte of a register. 0=false, 1=true. +// Upper bytes are junk. +// - When doing sub-register operations, we try to write the whole +// destination register to avoid a partial-register write. +// - Unused portions of AuxInt (or the Val portion of ValAndOff) are +// filled by sign-extending the used portion. Users of AuxInt which interpret +// AuxInt as unsigned (e.g. shifts) must be careful. + +// Suffixes encode the bit width of various instructions. +// D (double word) = 64 bit (frequently omitted) +// W (word) = 32 bit +// H (half word) = 16 bit +// B (byte) = 8 bit + +// copied from ../../s390x/reg.go +var regNamesS390X = []string{ + "R0", + "R1", + "R2", + "R3", + "R4", + "R5", + "R6", + "R7", + "R8", + "R9", + "R10", + "R11", + "R12", + "g", // R13 + "R14", + "SP", // R15 + "F0", + "F1", + "F2", + "F3", + "F4", + "F5", + "F6", + "F7", + "F8", + "F9", + "F10", + "F11", + "F12", + "F13", + "F14", + "F15", + + //pseudo-registers + "SB", +} + +func init() { + // Make map from reg names to reg integers. + if len(regNamesS390X) > 64 { + panic("too many registers") + } + num := map[string]int{} + for i, name := range regNamesS390X { + num[name] = i + } + buildReg := func(s string) regMask { + m := regMask(0) + for _, r := range strings.Split(s, " ") { + if n, ok := num[r]; ok { + m |= regMask(1) << uint(n) + continue + } + panic("register " + r + " not found") + } + return m + } + + // Common individual register masks + var ( + sp = buildReg("SP") + sb = buildReg("SB") + r0 = buildReg("R0") + + // R10 and R11 are reserved by the assembler. + gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12") + gpsp = gp | sp + + // R0 is considered to contain the value 0 in address calculations. + ptr = gp &^ r0 + ptrsp = ptr | sp + ptrspsb = ptrsp | sb + + fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15") + callerSave = gp | fp + ) + // Common slices of register masks + var ( + gponly = []regMask{gp} + fponly = []regMask{fp} + ) + + // Common regInfo + var ( + gp01 = regInfo{inputs: []regMask{}, outputs: gponly} + gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly} + gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly} + gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} + gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly} + + // R0 evaluates to 0 when used as the number of bits to shift + // so we need to exclude it from that operand. + sh21 = regInfo{inputs: []regMask{gp, ptr}, outputs: gponly} + + addr = regInfo{inputs: []regMask{sp | sb}, outputs: gponly} + addridx = regInfo{inputs: []regMask{sp | sb, ptrsp}, outputs: gponly} + + gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}} + gp1flags = regInfo{inputs: []regMask{gpsp}} + flagsgp = regInfo{outputs: gponly} + gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} + + gpload = regInfo{inputs: []regMask{ptrspsb, 0}, outputs: gponly} + gploadidx = regInfo{inputs: []regMask{ptrspsb, ptrsp, 0}, outputs: gponly} + gpstore = regInfo{inputs: []regMask{ptrspsb, gpsp, 0}} + gpstoreconst = regInfo{inputs: []regMask{ptrspsb, 0}} + gpstoreidx = regInfo{inputs: []regMask{ptrsp, ptrsp, gpsp, 0}} + + gpmvc = regInfo{inputs: []regMask{ptrsp, ptrsp, 0}} + + fp01 = regInfo{inputs: []regMask{}, outputs: fponly} + fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly} + fp21clobber = regInfo{inputs: []regMask{fp, fp}, outputs: fponly} + fpgp = regInfo{inputs: fponly, outputs: gponly} + gpfp = regInfo{inputs: gponly, outputs: fponly} + fp11 = regInfo{inputs: fponly, outputs: fponly} + fp11clobber = regInfo{inputs: fponly, outputs: fponly} + fp2flags = regInfo{inputs: []regMask{fp, fp}} + + fpload = regInfo{inputs: []regMask{ptrspsb, 0}, outputs: fponly} + fploadidx = regInfo{inputs: []regMask{ptrsp, ptrsp, 0}, outputs: fponly} + + fpstore = regInfo{inputs: []regMask{ptrspsb, fp, 0}} + fpstoreidx = regInfo{inputs: []regMask{ptrsp, ptrsp, fp, 0}} + ) + + var S390Xops = []opData{ + // fp ops + {name: "FADDS", argLength: 2, reg: fp21clobber, asm: "FADDS", commutative: true, resultInArg0: true, clobberFlags: true}, // fp32 add + {name: "FADD", argLength: 2, reg: fp21clobber, asm: "FADD", commutative: true, resultInArg0: true, clobberFlags: true}, // fp64 add + {name: "FSUBS", argLength: 2, reg: fp21clobber, asm: "FSUBS", resultInArg0: true, clobberFlags: true}, // fp32 sub + {name: "FSUB", argLength: 2, reg: fp21clobber, asm: "FSUB", resultInArg0: true, clobberFlags: true}, // fp64 sub + {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, resultInArg0: true}, // fp32 mul + {name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true, resultInArg0: true}, // fp64 mul + {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", resultInArg0: true}, // fp32 div + {name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV", resultInArg0: true}, // fp64 div + {name: "FNEGS", argLength: 1, reg: fp11clobber, asm: "FNEGS", clobberFlags: true}, // fp32 neg + {name: "FNEG", argLength: 1, reg: fp11clobber, asm: "FNEG", clobberFlags: true}, // fp64 neg + + {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff"}, // fp32 load + {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff"}, // fp64 load + {name: "FMOVSconst", reg: fp01, asm: "FMOVS", aux: "Float32", rematerializeable: true}, // fp32 constant + {name: "FMOVDconst", reg: fp01, asm: "FMOVD", aux: "Float64", rematerializeable: true}, // fp64 constant + {name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", aux: "SymOff"}, // fp32 load indexed by i + {name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", aux: "SymOff"}, // fp64 load indexed by i + + {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff"}, // fp32 store + {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff"}, // fp64 store + {name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", aux: "SymOff"}, // fp32 indexed by i store + {name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", aux: "SymOff"}, // fp64 indexed by i store + + // binary ops + {name: "ADD", argLength: 2, reg: gp21sp, asm: "ADD", commutative: true, clobberFlags: true}, // arg0 + arg1 + {name: "ADDW", argLength: 2, reg: gp21sp, asm: "ADDW", commutative: true, clobberFlags: true}, // arg0 + arg1 + {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int64", typ: "UInt64", clobberFlags: true}, // arg0 + auxint + {name: "ADDWconst", argLength: 1, reg: gp11sp, asm: "ADDW", aux: "Int32", clobberFlags: true}, // arg0 + auxint + + {name: "SUB", argLength: 2, reg: gp21, asm: "SUB", clobberFlags: true}, // arg0 - arg1 + {name: "SUBW", argLength: 2, reg: gp21, asm: "SUBW", clobberFlags: true}, // arg0 - arg1 + {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 - auxint + {name: "SUBWconst", argLength: 1, reg: gp11, asm: "SUBW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint + + {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 + {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 + {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int64", typ: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint + {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint + + {name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", typ: "Int64", resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width + {name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", typ: "Int64", resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width + + {name: "DIVD", argLength: 2, reg: gp21, asm: "DIVD", resultInArg0: true, clobberFlags: true}, // arg0 / arg1 + {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", resultInArg0: true, clobberFlags: true}, // arg0 / arg1 + {name: "DIVDU", argLength: 2, reg: gp21, asm: "DIVDU", resultInArg0: true, clobberFlags: true}, // arg0 / arg1 + {name: "DIVWU", argLength: 2, reg: gp21, asm: "DIVWU", resultInArg0: true, clobberFlags: true}, // arg0 / arg1 + + {name: "MODD", argLength: 2, reg: gp21, asm: "MODD", resultInArg0: true, clobberFlags: true}, // arg0 % arg1 + {name: "MODW", argLength: 2, reg: gp21, asm: "MODW", resultInArg0: true, clobberFlags: true}, // arg0 % arg1 + + {name: "MODDU", argLength: 2, reg: gp21, asm: "MODDU", resultInArg0: true, clobberFlags: true}, // arg0 % arg1 + {name: "MODWU", argLength: 2, reg: gp21, asm: "MODWU", resultInArg0: true, clobberFlags: true}, // arg0 % arg1 + + {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true, clobberFlags: true}, // arg0 & arg1 + {name: "ANDW", argLength: 2, reg: gp21, asm: "AND", commutative: true, clobberFlags: true}, // arg0 & arg1 + {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 & auxint + {name: "ANDWconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint + + {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true, clobberFlags: true}, // arg0 | arg1 + {name: "ORW", argLength: 2, reg: gp21, asm: "OR", commutative: true, clobberFlags: true}, // arg0 | arg1 + {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 | auxint + {name: "ORWconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint + + {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, clobberFlags: true}, // arg0 ^ arg1 + {name: "XORW", argLength: 2, reg: gp21, asm: "XOR", commutative: true, clobberFlags: true}, // arg0 ^ arg1 + {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint + {name: "XORWconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint + + {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1 + {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1 + + {name: "CMPU", argLength: 2, reg: gp2flags, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1 + {name: "CMPWU", argLength: 2, reg: gp2flags, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1 + + {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", typ: "Flags", aux: "Int64"}, // arg0 compare to auxint + {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint + {name: "CMPUconst", argLength: 1, reg: gp1flags, asm: "CMPU", typ: "Flags", aux: "Int64"}, // arg0 compare to auxint + {name: "CMPWUconst", argLength: 1, reg: gp1flags, asm: "CMPWU", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint + + {name: "FCMPS", argLength: 2, reg: fp2flags, asm: "CEBR", typ: "Flags"}, // arg0 compare to arg1, f32 + {name: "FCMP", argLength: 2, reg: fp2flags, asm: "FCMPU", typ: "Flags"}, // arg0 compare to arg1, f64 + + {name: "TESTB", argLength: 1, reg: gp1flags, asm: "AND", typ: "Flags"}, // (arg0 & 0xFF) compare to 0 + + {name: "SLD", argLength: 2, reg: sh21, asm: "SLD"}, // arg0 << arg1, shift amount is mod 64 + {name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 32 + {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << auxint, shift amount 0-63 + {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int32"}, // arg0 << auxint, shift amount 0-31 + + {name: "SRD", argLength: 2, reg: sh21, asm: "SRD"}, // unsigned arg0 >> arg1, shift amount is mod 64 + {name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned arg0 >> arg1, shift amount is mod 32 + {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // unsigned arg0 >> auxint, shift amount 0-63 + {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int32"}, // unsigned arg0 >> auxint, shift amount 0-31 + + // Arithmetic shifts clobber flags. + {name: "SRAD", argLength: 2, reg: sh21, asm: "SRAD", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64 + {name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 + {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63 + {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int32", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31 + + {name: "RLLGconst", argLength: 1, reg: gp11, asm: "RLLG", aux: "Int64"}, // arg0 rotate left auxint, rotate amount 0-63 + {name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "Int32"}, // arg0 rotate left auxint, rotate amount 0-31 + + // unary ops + {name: "NEG", argLength: 1, reg: gp11, asm: "NEG", clobberFlags: true}, // -arg0 + {name: "NEGW", argLength: 1, reg: gp11, asm: "NEGW", clobberFlags: true}, // -arg0 + + {name: "NOT", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0 + {name: "NOTW", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0 + + {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) + + {name: "SUBEcarrymask", argLength: 1, reg: flagsgp, asm: "SUBE"}, // (int64)(-1) if carry is set, 0 if carry is clear. + {name: "SUBEWcarrymask", argLength: 1, reg: flagsgp, asm: "SUBE"}, // (int32)(-1) if carry is set, 0 if carry is clear. + // Note: 32-bits subtraction is not implemented in S390X. Temporarily use SUBE (64-bits). + + {name: "MOVDEQ", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDEQ"}, // extract == condition from arg0 + {name: "MOVDNE", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDNE"}, // extract != condition from arg0 + {name: "MOVDLT", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDLT"}, // extract signed < condition from arg0 + {name: "MOVDLE", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDLE"}, // extract signed <= condition from arg0 + {name: "MOVDGT", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDGT"}, // extract signed > condition from arg0 + {name: "MOVDGE", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDGE"}, // extract signed >= condition from arg0 + + // Different rules for floating point conditions because + // any comparison involving a NaN is always false and thus + // the patterns for inverting conditions cannot be used. + {name: "MOVDGTnoinv", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDGT"}, // extract floating > condition from arg0 + {name: "MOVDGEnoinv", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDGE"}, // extract floating >= condition from arg0 + + {name: "MOVBreg", argLength: 1, reg: gp11sp, asm: "MOVB", typ: "Int64"}, // sign extend arg0 from int8 to int64 + {name: "MOVBZreg", argLength: 1, reg: gp11sp, asm: "MOVBZ", typ: "UInt64"}, // zero extend arg0 from int8 to int64 + {name: "MOVHreg", argLength: 1, reg: gp11sp, asm: "MOVH", typ: "Int64"}, // sign extend arg0 from int16 to int64 + {name: "MOVHZreg", argLength: 1, reg: gp11sp, asm: "MOVHZ", typ: "UInt64"}, // zero extend arg0 from int16 to int64 + {name: "MOVWreg", argLength: 1, reg: gp11sp, asm: "MOVW", typ: "Int64"}, // sign extend arg0 from int32 to int64 + {name: "MOVWZreg", argLength: 1, reg: gp11sp, asm: "MOVWZ", typ: "UInt64"}, // zero extend arg0 from int32 to int64 + + {name: "MOVDconst", reg: gp01, asm: "MOVD", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint + + {name: "CFDBRA", argLength: 1, reg: fpgp, asm: "CFDBRA"}, // convert float64 to int32 + {name: "CGDBRA", argLength: 1, reg: fpgp, asm: "CGDBRA"}, // convert float64 to int64 + {name: "CFEBRA", argLength: 1, reg: fpgp, asm: "CFEBRA"}, // convert float32 to int32 + {name: "CGEBRA", argLength: 1, reg: fpgp, asm: "CGEBRA"}, // convert float32 to int64 + {name: "CEFBRA", argLength: 1, reg: gpfp, asm: "CEFBRA"}, // convert int32 to float32 + {name: "CDFBRA", argLength: 1, reg: gpfp, asm: "CDFBRA"}, // convert int32 to float64 + {name: "CEGBRA", argLength: 1, reg: gpfp, asm: "CEGBRA"}, // convert int64 to float32 + {name: "CDGBRA", argLength: 1, reg: gpfp, asm: "CDGBRA"}, // convert int64 to float64 + {name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32 + {name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64 + + {name: "MOVDaddr", argLength: 1, reg: addr, aux: "SymOff", rematerializeable: true, clobberFlags: true}, // arg0 + auxint + offset encoded in aux + {name: "MOVDaddridx", argLength: 2, reg: addridx, aux: "SymOff", clobberFlags: true}, // arg0 + arg1 + auxint + aux + + // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address + {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", clobberFlags: true}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend. + {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", clobberFlags: true}, // ditto, sign extend to int64 + {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", clobberFlags: true}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend. + {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", clobberFlags: true}, // ditto, sign extend to int64 + {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", clobberFlags: true}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend. + {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", clobberFlags: true}, // ditto, sign extend to int64 + {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "UInt64", clobberFlags: true}, // load 8 bytes from arg0+auxint+aux. arg1=mem + + {name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", aux: "SymOff", typ: "UInt16", clobberFlags: true}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes. + {name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", aux: "SymOff", typ: "UInt32", clobberFlags: true}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes. + {name: "MOVDBRload", argLength: 2, reg: gpload, asm: "MOVDBR", aux: "SymOff", typ: "UInt64", clobberFlags: true}, // load 8 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes. + + {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", clobberFlags: true}, // store byte in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", clobberFlags: true}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", clobberFlags: true}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", clobberFlags: true}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem + + {name: "MVC", argLength: 3, reg: gpmvc, asm: "MVC", aux: "SymValAndOff", typ: "Mem", clobberFlags: true}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size,off + + // indexed loads/stores + // TODO(mundaym): add sign-extended indexed loads + {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, asm: "MOVBZ", aux: "SymOff", clobberFlags: true}, // load a byte from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, asm: "MOVHZ", aux: "SymOff", clobberFlags: true}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, asm: "MOVWZ", aux: "SymOff", clobberFlags: true}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVDloadidx", argLength: 3, reg: gploadidx, asm: "MOVD", aux: "SymOff", clobberFlags: true}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVHBR", aux: "SymOff", clobberFlags: true}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. + {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVWBR", aux: "SymOff", clobberFlags: true}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. + {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVDBR", aux: "SymOff", clobberFlags: true}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. + {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVB", aux: "SymOff", clobberFlags: true}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVH", aux: "SymOff", clobberFlags: true}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", clobberFlags: true}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVD", aux: "SymOff", clobberFlags: true}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + + // For storeconst ops, the AuxInt field encodes both + // the value to store and an address offset of the store. + // Cast AuxInt to a ValAndOff to extract Val and Off fields. + {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", clobberFlags: true}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem + {name: "MOVHstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVH", aux: "SymValAndOff", typ: "Mem", clobberFlags: true}, // store low 2 bytes of ... + {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", clobberFlags: true}, // store low 4 bytes of ... + {name: "MOVDstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVD", aux: "SymValAndOff", typ: "Mem", clobberFlags: true}, // store 8 bytes of ... + + {name: "CLEAR", argLength: 2, reg: regInfo{inputs: []regMask{ptr, 0}}, asm: "CLEAR", aux: "SymValAndOff", typ: "Mem", clobberFlags: true}, + + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{ptrsp, buildReg("R12"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLdefer", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call deferproc. arg0=mem, auxint=argsize, returns mem + {name: "CALLgo", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call newproc. arg0=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{ptr}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + + // (InvertFlags (CMP a b)) == (CMP b a) + // InvertFlags is a pseudo-op which can't appear in assembly output. + {name: "InvertFlags", argLength: 1}, // reverse direction of arg0 + + // Pseudo-ops + {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem + // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, + // and sorts it to the very beginning of the block to prevent other + // use of R12 (the closure pointer) + {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R12")}}}, + // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. + {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{ptrsp}}, clobberFlags: true}, + + // MOVDconvert converts between pointers and integers. + // We have a special op for this so as to not confuse GC + // (particularly stack maps). It takes a memory arg so it + // gets correctly ordered with respect to GC safepoints. + // arg0=ptr/int arg1=mem, output=int/ptr + {name: "MOVDconvert", argLength: 2, reg: gp11sp, asm: "MOVD"}, + + // Constant flag values. For any comparison, there are 5 possible + // outcomes: the three from the signed total order (<,==,>) and the + // three from the unsigned total order. The == cases overlap. + // Note: there's a sixth "unordered" outcome for floating-point + // comparisons, but we don't use such a beast yet. + // These ops are for temporary use by rewrite rules. They + // cannot appear in the generated assembly. + {name: "FlagEQ"}, // equal + {name: "FlagLT"}, // < + {name: "FlagGT"}, // > + + // store multiple + { + name: "STMG2", + argLength: 4, + reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), 0}}, + aux: "SymOff", + typ: "Mem", + asm: "STMG", + }, + { + name: "STMG3", + argLength: 5, + reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), buildReg("R3"), 0}}, + aux: "SymOff", + typ: "Mem", + asm: "STMG", + }, + { + name: "STMG4", + argLength: 6, + reg: regInfo{inputs: []regMask{ + ptrsp, + buildReg("R1"), + buildReg("R2"), + buildReg("R3"), + buildReg("R4"), + 0, + }}, + aux: "SymOff", + typ: "Mem", + asm: "STMG", + }, + { + name: "STM2", + argLength: 4, + reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), 0}}, + aux: "SymOff", + typ: "Mem", + asm: "STMY", + }, + { + name: "STM3", + argLength: 5, + reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), buildReg("R3"), 0}}, + aux: "SymOff", + typ: "Mem", + asm: "STMY", + }, + { + name: "STM4", + argLength: 6, + reg: regInfo{inputs: []regMask{ + ptrsp, + buildReg("R1"), + buildReg("R2"), + buildReg("R3"), + buildReg("R4"), + 0, + }}, + aux: "SymOff", + typ: "Mem", + asm: "STMY", + }, + + // large move + // auxint = remaining bytes after loop (rem) + // arg0 = address of dst memory (in R1, changed as a side effect) + // arg1 = address of src memory (in R2, changed as a side effect) + // arg2 = pointer to last address to move in loop + 256 + // arg3 = mem + // returns mem + // + // mvc: MVC $256, 0(R2), 0(R1) + // MOVD $256(R1), R1 + // MOVD $256(R2), R2 + // CMP R2, Rarg2 + // BNE mvc + // MVC $rem, 0(R2), 0(R1) // if rem > 0 + { + name: "LoweredMove", + aux: "Int64", + argLength: 4, + reg: regInfo{ + inputs: []regMask{buildReg("R1"), buildReg("R2"), gpsp}, + clobbers: buildReg("R1 R2"), + }, + clobberFlags: true, + typ: "Mem", + }, + + // large clear + // auxint = remaining bytes after loop (rem) + // arg0 = address of dst memory (in R1, changed as a side effect) + // arg1 = pointer to last address to zero in loop + 256 + // arg2 = mem + // returns mem + // + // clear: CLEAR $256, 0(R1) + // MOVD $256(R1), R1 + // CMP R1, Rarg2 + // BNE clear + // CLEAR $rem, 0(R1) // if rem > 0 + { + name: "LoweredZero", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R1"), gpsp}, + clobbers: buildReg("R1"), + }, + clobberFlags: true, + typ: "Mem", + }, + } + + var S390Xblocks = []blockData{ + {name: "EQ"}, + {name: "NE"}, + {name: "LT"}, + {name: "LE"}, + {name: "GT"}, + {name: "GE"}, + {name: "GTF"}, // FP comparison + {name: "GEF"}, // FP comparison + } + + archs = append(archs, arch{ + name: "S390X", + pkg: "cmd/internal/obj/s390x", + genfile: "../../s390x/ssa.go", + ops: S390Xops, + blocks: S390Xblocks, + regnames: regNamesS390X, + gpregmask: gp, + fpregmask: fp, + framepointerreg: -1, // not used + }) +} diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 7188bf6955f7e..b9d98b484f4ad 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -9,6 +9,7 @@ import ( "cmd/internal/obj/arm64" "cmd/internal/obj/mips" "cmd/internal/obj/ppc64" + "cmd/internal/obj/s390x" "cmd/internal/obj/x86" ) @@ -87,6 +88,15 @@ const ( BlockPPC64FGT BlockPPC64FGE + BlockS390XEQ + BlockS390XNE + BlockS390XLT + BlockS390XLE + BlockS390XGT + BlockS390XGE + BlockS390XGTF + BlockS390XGEF + BlockPlain BlockIf BlockDefer @@ -172,6 +182,15 @@ var blockString = [...]string{ BlockPPC64FGT: "FGT", BlockPPC64FGE: "FGE", + BlockS390XEQ: "EQ", + BlockS390XNE: "NE", + BlockS390XLT: "LT", + BlockS390XLE: "LE", + BlockS390XGT: "GT", + BlockS390XGE: "GE", + BlockS390XGTF: "GTF", + BlockS390XGEF: "GEF", + BlockPlain: "Plain", BlockIf: "If", BlockDefer: "Defer", @@ -1217,6 +1236,172 @@ const ( OpPPC64FlagLT OpPPC64FlagGT + OpS390XFADDS + OpS390XFADD + OpS390XFSUBS + OpS390XFSUB + OpS390XFMULS + OpS390XFMUL + OpS390XFDIVS + OpS390XFDIV + OpS390XFNEGS + OpS390XFNEG + OpS390XFMOVSload + OpS390XFMOVDload + OpS390XFMOVSconst + OpS390XFMOVDconst + OpS390XFMOVSloadidx + OpS390XFMOVDloadidx + OpS390XFMOVSstore + OpS390XFMOVDstore + OpS390XFMOVSstoreidx + OpS390XFMOVDstoreidx + OpS390XADD + OpS390XADDW + OpS390XADDconst + OpS390XADDWconst + OpS390XSUB + OpS390XSUBW + OpS390XSUBconst + OpS390XSUBWconst + OpS390XMULLD + OpS390XMULLW + OpS390XMULLDconst + OpS390XMULLWconst + OpS390XMULHD + OpS390XMULHDU + OpS390XDIVD + OpS390XDIVW + OpS390XDIVDU + OpS390XDIVWU + OpS390XMODD + OpS390XMODW + OpS390XMODDU + OpS390XMODWU + OpS390XAND + OpS390XANDW + OpS390XANDconst + OpS390XANDWconst + OpS390XOR + OpS390XORW + OpS390XORconst + OpS390XORWconst + OpS390XXOR + OpS390XXORW + OpS390XXORconst + OpS390XXORWconst + OpS390XCMP + OpS390XCMPW + OpS390XCMPU + OpS390XCMPWU + OpS390XCMPconst + OpS390XCMPWconst + OpS390XCMPUconst + OpS390XCMPWUconst + OpS390XFCMPS + OpS390XFCMP + OpS390XTESTB + OpS390XSLD + OpS390XSLW + OpS390XSLDconst + OpS390XSLWconst + OpS390XSRD + OpS390XSRW + OpS390XSRDconst + OpS390XSRWconst + OpS390XSRAD + OpS390XSRAW + OpS390XSRADconst + OpS390XSRAWconst + OpS390XRLLGconst + OpS390XRLLconst + OpS390XNEG + OpS390XNEGW + OpS390XNOT + OpS390XNOTW + OpS390XFSQRT + OpS390XSUBEcarrymask + OpS390XSUBEWcarrymask + OpS390XMOVDEQ + OpS390XMOVDNE + OpS390XMOVDLT + OpS390XMOVDLE + OpS390XMOVDGT + OpS390XMOVDGE + OpS390XMOVDGTnoinv + OpS390XMOVDGEnoinv + OpS390XMOVBreg + OpS390XMOVBZreg + OpS390XMOVHreg + OpS390XMOVHZreg + OpS390XMOVWreg + OpS390XMOVWZreg + OpS390XMOVDconst + OpS390XCFDBRA + OpS390XCGDBRA + OpS390XCFEBRA + OpS390XCGEBRA + OpS390XCEFBRA + OpS390XCDFBRA + OpS390XCEGBRA + OpS390XCDGBRA + OpS390XLEDBR + OpS390XLDEBR + OpS390XMOVDaddr + OpS390XMOVDaddridx + OpS390XMOVBZload + OpS390XMOVBload + OpS390XMOVHZload + OpS390XMOVHload + OpS390XMOVWZload + OpS390XMOVWload + OpS390XMOVDload + OpS390XMOVHBRload + OpS390XMOVWBRload + OpS390XMOVDBRload + OpS390XMOVBstore + OpS390XMOVHstore + OpS390XMOVWstore + OpS390XMOVDstore + OpS390XMVC + OpS390XMOVBZloadidx + OpS390XMOVHZloadidx + OpS390XMOVWZloadidx + OpS390XMOVDloadidx + OpS390XMOVHBRloadidx + OpS390XMOVWBRloadidx + OpS390XMOVDBRloadidx + OpS390XMOVBstoreidx + OpS390XMOVHstoreidx + OpS390XMOVWstoreidx + OpS390XMOVDstoreidx + OpS390XMOVBstoreconst + OpS390XMOVHstoreconst + OpS390XMOVWstoreconst + OpS390XMOVDstoreconst + OpS390XCLEAR + OpS390XCALLstatic + OpS390XCALLclosure + OpS390XCALLdefer + OpS390XCALLgo + OpS390XCALLinter + OpS390XInvertFlags + OpS390XLoweredGetG + OpS390XLoweredGetClosurePtr + OpS390XLoweredNilCheck + OpS390XMOVDconvert + OpS390XFlagEQ + OpS390XFlagLT + OpS390XFlagGT + OpS390XSTMG2 + OpS390XSTMG3 + OpS390XSTMG4 + OpS390XSTM2 + OpS390XSTM3 + OpS390XSTM4 + OpS390XLoweredMove + OpS390XLoweredZero + OpAdd8 OpAdd16 OpAdd32 @@ -15053,111 +15238,2393 @@ var opcodeTable = [...]opInfo{ }, { - name: "Add8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "Add16", - argLen: 2, - commutative: true, - generic: true, + name: "FADDS", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AFADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Add32", - argLen: 2, - commutative: true, - generic: true, + name: "FADD", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AFADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Add64", - argLen: 2, - commutative: true, - generic: true, + name: "FSUBS", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AFSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "AddPtr", - argLen: 2, - generic: true, + name: "FSUB", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AFSUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Add32F", - argLen: 2, - generic: true, + name: "FMULS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFMULS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Add64F", - argLen: 2, - generic: true, + name: "FMUL", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFMUL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Sub8", - argLen: 2, - generic: true, + name: "FDIVS", + argLen: 2, + resultInArg0: true, + asm: s390x.AFDIVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Sub16", - argLen: 2, - generic: true, + name: "FDIV", + argLen: 2, + resultInArg0: true, + asm: s390x.AFDIV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Sub32", - argLen: 2, - generic: true, + name: "FNEGS", + argLen: 1, + clobberFlags: true, + asm: s390x.AFNEGS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Sub64", - argLen: 2, - generic: true, + name: "FNEG", + argLen: 1, + clobberFlags: true, + asm: s390x.AFNEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "SubPtr", + name: "FMOVSload", + auxType: auxSymOff, argLen: 2, - generic: true, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Sub32F", + name: "FMOVDload", + auxType: auxSymOff, argLen: 2, - generic: true, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Sub64F", - argLen: 2, - generic: true, + name: "FMOVSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: s390x.AFMOVS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Mul8", - argLen: 2, - commutative: true, - generic: true, + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: s390x.AFMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Mul16", - argLen: 2, - commutative: true, - generic: true, + name: "FMOVSloadidx", + auxType: auxSymOff, + argLen: 3, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Mul32", - argLen: 2, - commutative: true, - generic: true, + name: "FMOVDloadidx", + auxType: auxSymOff, + argLen: 3, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Mul64", - argLen: 2, - commutative: true, - generic: true, + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Mul32F", - argLen: 2, - generic: true, + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "Mul64F", - argLen: 2, + name: "FMOVSstoreidx", + auxType: auxSymOff, + argLen: 4, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "ADD", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "ADDW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AADDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "ADDconst", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + asm: s390x.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "ADDWconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: s390x.AADDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SUB", + argLen: 2, + clobberFlags: true, + asm: s390x.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SUBW", + argLen: 2, + clobberFlags: true, + asm: s390x.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SUBconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SUBWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MULLD", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MULLW", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MULLDconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MULLWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MULHD", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULHD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MULHDU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULHDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "DIVD", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "DIVW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "DIVDU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "DIVWU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MODD", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MODW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MODDU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MODWU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "ANDW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "ANDWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "ORW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "ORconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "ORWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "XORW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "XORconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "XORWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "CMP", + argLen: 2, + asm: s390x.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "CMPW", + argLen: 2, + asm: s390x.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "CMPU", + argLen: 2, + asm: s390x.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "CMPWU", + argLen: 2, + asm: s390x.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "CMPconst", + auxType: auxInt64, + argLen: 1, + asm: s390x.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "CMPUconst", + auxType: auxInt64, + argLen: 1, + asm: s390x.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "CMPWUconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "FCMPS", + argLen: 2, + asm: s390x.ACEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FCMP", + argLen: 2, + asm: s390x.AFCMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "TESTB", + argLen: 1, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "SLD", + argLen: 2, + asm: s390x.ASLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SLW", + argLen: 2, + asm: s390x.ASLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SLDconst", + auxType: auxInt64, + argLen: 1, + asm: s390x.ASLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SLWconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ASLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SRD", + argLen: 2, + asm: s390x.ASRD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SRW", + argLen: 2, + asm: s390x.ASRW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SRDconst", + auxType: auxInt64, + argLen: 1, + asm: s390x.ASRD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SRWconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ASRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SRAD", + argLen: 2, + clobberFlags: true, + asm: s390x.ASRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SRAW", + argLen: 2, + clobberFlags: true, + asm: s390x.ASRAW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SRADconst", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + asm: s390x.ASRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SRAWconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: s390x.ASRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "RLLGconst", + auxType: auxInt64, + argLen: 1, + asm: s390x.ARLLG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "RLLconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ARLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "NEG", + argLen: 1, + clobberFlags: true, + asm: s390x.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "NEGW", + argLen: 1, + clobberFlags: true, + asm: s390x.ANEGW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "NOT", + argLen: 1, + resultInArg0: true, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "NOTW", + argLen: 1, + resultInArg0: true, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "FSQRT", + argLen: 1, + asm: s390x.AFSQRT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "SUBEcarrymask", + argLen: 1, + asm: s390x.ASUBE, + reg: regInfo{ + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "SUBEWcarrymask", + argLen: 1, + asm: s390x.ASUBE, + reg: regInfo{ + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDEQ", + argLen: 3, + resultInArg0: true, + asm: s390x.AMOVDEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDNE", + argLen: 3, + resultInArg0: true, + asm: s390x.AMOVDNE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDLT", + argLen: 3, + resultInArg0: true, + asm: s390x.AMOVDLT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDLE", + argLen: 3, + resultInArg0: true, + asm: s390x.AMOVDLE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDGT", + argLen: 3, + resultInArg0: true, + asm: s390x.AMOVDGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDGE", + argLen: 3, + resultInArg0: true, + asm: s390x.AMOVDGE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDGTnoinv", + argLen: 3, + resultInArg0: true, + asm: s390x.AMOVDGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDGEnoinv", + argLen: 3, + resultInArg0: true, + asm: s390x.AMOVDGE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVBZreg", + argLen: 1, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVHZreg", + argLen: 1, + asm: s390x.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVWZreg", + argLen: 1, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: s390x.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "CFDBRA", + argLen: 1, + asm: s390x.ACFDBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "CGDBRA", + argLen: 1, + asm: s390x.ACGDBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "CFEBRA", + argLen: 1, + asm: s390x.ACFEBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "CGEBRA", + argLen: 1, + asm: s390x.ACGEBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "CEFBRA", + argLen: 1, + asm: s390x.ACEFBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDFBRA", + argLen: 1, + asm: s390x.ACDFBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CEGBRA", + argLen: 1, + asm: s390x.ACEGBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDGBRA", + argLen: 1, + asm: s390x.ACDGBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LEDBR", + argLen: 1, + asm: s390x.ALEDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LDEBR", + argLen: 1, + asm: s390x.ALDEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295000064}, // SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDaddridx", + auxType: auxSymOff, + argLen: 2, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295000064}, // SP SB + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVBZload", + auxType: auxSymOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVHZload", + auxType: auxSymOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVWZload", + auxType: auxSymOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVHBRload", + auxType: auxSymOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVWBRload", + auxType: auxSymOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDBRload", + auxType: auxSymOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + {1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + {1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + {1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + {1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "MVC", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + asm: s390x.AMVC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "MOVBZloadidx", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVHZloadidx", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + asm: s390x.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVWZloadidx", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDloadidx", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVHBRloadidx", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVWBRloadidx", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVDBRloadidx", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "MOVBstoreidx", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {2, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "MOVHstoreidx", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {2, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "MOVWstoreidx", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {2, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "MOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + {2, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "MOVBstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + }, + }, + { + name: "MOVHstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + }, + }, + { + name: "MOVWstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + }, + }, + { + name: "MOVDstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB + }, + }, + }, + { + name: "CLEAR", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + asm: s390x.ACLEAR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxSymOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4294906879, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLclosure", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4096}, // R12 + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + clobbers: 4294906879, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLdefer", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4294906879, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLgo", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4294906879, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLinter", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + clobbers: 4294906879, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LoweredGetG", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4096}, // R12 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "MOVDconvert", + argLen: 2, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + outputs: []outputInfo{ + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "STMG2", + auxType: auxSymOff, + argLen: 4, + asm: s390x.ASTMG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "STMG3", + auxType: auxSymOff, + argLen: 5, + asm: s390x.ASTMG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "STMG4", + auxType: auxSymOff, + argLen: 6, + asm: s390x.ASTMG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {4, 16}, // R4 + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "STM2", + auxType: auxSymOff, + argLen: 4, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "STM3", + auxType: auxSymOff, + argLen: 5, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "STM4", + auxType: auxSymOff, + argLen: 6, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {4, 16}, // R4 + {0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 4}, // R2 + {2, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + clobbers: 6, // R1 R2 + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP + }, + clobbers: 2, // R1 + }, + }, + + { + name: "Add8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddPtr", + argLen: 2, + generic: true, + }, + { + name: "Add32F", + argLen: 2, + generic: true, + }, + { + name: "Add64F", + argLen: 2, + generic: true, + }, + { + name: "Sub8", + argLen: 2, + generic: true, + }, + { + name: "Sub16", + argLen: 2, + generic: true, + }, + { + name: "Sub32", + argLen: 2, + generic: true, + }, + { + name: "Sub64", + argLen: 2, + generic: true, + }, + { + name: "SubPtr", + argLen: 2, + generic: true, + }, + { + name: "Sub32F", + argLen: 2, + generic: true, + }, + { + name: "Sub64F", + argLen: 2, + generic: true, + }, + { + name: "Mul8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32F", + argLen: 2, + generic: true, + }, + { + name: "Mul64F", + argLen: 2, generic: true, }, { @@ -17011,3 +19478,42 @@ var gpRegMaskPPC64 = regMask(536866812) var fpRegMaskPPC64 = regMask(288230371856744448) var specialRegMaskPPC64 = regMask(0) var framepointerRegPPC64 = int8(0) +var registersS390X = [...]Register{ + {0, "R0"}, + {1, "R1"}, + {2, "R2"}, + {3, "R3"}, + {4, "R4"}, + {5, "R5"}, + {6, "R6"}, + {7, "R7"}, + {8, "R8"}, + {9, "R9"}, + {10, "R10"}, + {11, "R11"}, + {12, "R12"}, + {13, "g"}, + {14, "R14"}, + {15, "SP"}, + {16, "F0"}, + {17, "F1"}, + {18, "F2"}, + {19, "F3"}, + {20, "F4"}, + {21, "F5"}, + {22, "F6"}, + {23, "F7"}, + {24, "F8"}, + {25, "F9"}, + {26, "F10"}, + {27, "F11"}, + {28, "F12"}, + {29, "F13"}, + {30, "F14"}, + {31, "F15"}, + {32, "SB"}, +} +var gpRegMaskS390X = regMask(5119) +var fpRegMaskS390X = regMask(4294901760) +var specialRegMaskS390X = regMask(0) +var framepointerRegS390X = int8(-1) diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 7e69658f5c166..88f4ee780713e 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -502,6 +502,8 @@ func (s *regAllocState) init(f *Func) { // we do need to be careful, but that carefulness is hidden // in the rewrite rules so we always have a free register // available for global load/stores. See gen/386.rules (search for Flag_shared). + case "s390x": + // nothing to do, R10 & R11 already reserved default: s.f.Config.fe.Unimplementedf(0, "arch %s not implemented", s.f.Config.arch) } diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index a7dea1afcf122..89b3d706dc7fd 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -228,6 +228,11 @@ func is16Bit(n int64) bool { return n == int64(int16(n)) } +// is20Bit reports whether n can be represented as a signed 20 bit integer. +func is20Bit(n int64) bool { + return -(1<<19) <= n && n < (1<<19) +} + // b2i translates a boolean value to 0 or 1 for assigning to auxInt. func b2i(b bool) int64 { if b { diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go new file mode 100644 index 0000000000000..2b03ce4b100b6 --- /dev/null +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -0,0 +1,15822 @@ +// autogenerated from gen/S390X.rules: do not edit! +// generated with: cd gen; go run *.go + +package ssa + +import "math" + +var _ = math.MinInt8 // in case not otherwise used +func rewriteValueS390X(v *Value, config *Config) bool { + switch v.Op { + case OpAdd16: + return rewriteValueS390X_OpAdd16(v, config) + case OpAdd32: + return rewriteValueS390X_OpAdd32(v, config) + case OpAdd32F: + return rewriteValueS390X_OpAdd32F(v, config) + case OpAdd64: + return rewriteValueS390X_OpAdd64(v, config) + case OpAdd64F: + return rewriteValueS390X_OpAdd64F(v, config) + case OpAdd8: + return rewriteValueS390X_OpAdd8(v, config) + case OpAddPtr: + return rewriteValueS390X_OpAddPtr(v, config) + case OpAddr: + return rewriteValueS390X_OpAddr(v, config) + case OpAnd16: + return rewriteValueS390X_OpAnd16(v, config) + case OpAnd32: + return rewriteValueS390X_OpAnd32(v, config) + case OpAnd64: + return rewriteValueS390X_OpAnd64(v, config) + case OpAnd8: + return rewriteValueS390X_OpAnd8(v, config) + case OpAndB: + return rewriteValueS390X_OpAndB(v, config) + case OpAvg64u: + return rewriteValueS390X_OpAvg64u(v, config) + case OpClosureCall: + return rewriteValueS390X_OpClosureCall(v, config) + case OpCom16: + return rewriteValueS390X_OpCom16(v, config) + case OpCom32: + return rewriteValueS390X_OpCom32(v, config) + case OpCom64: + return rewriteValueS390X_OpCom64(v, config) + case OpCom8: + return rewriteValueS390X_OpCom8(v, config) + case OpConst16: + return rewriteValueS390X_OpConst16(v, config) + case OpConst32: + return rewriteValueS390X_OpConst32(v, config) + case OpConst32F: + return rewriteValueS390X_OpConst32F(v, config) + case OpConst64: + return rewriteValueS390X_OpConst64(v, config) + case OpConst64F: + return rewriteValueS390X_OpConst64F(v, config) + case OpConst8: + return rewriteValueS390X_OpConst8(v, config) + case OpConstBool: + return rewriteValueS390X_OpConstBool(v, config) + case OpConstNil: + return rewriteValueS390X_OpConstNil(v, config) + case OpConvert: + return rewriteValueS390X_OpConvert(v, config) + case OpCvt32Fto32: + return rewriteValueS390X_OpCvt32Fto32(v, config) + case OpCvt32Fto64: + return rewriteValueS390X_OpCvt32Fto64(v, config) + case OpCvt32Fto64F: + return rewriteValueS390X_OpCvt32Fto64F(v, config) + case OpCvt32to32F: + return rewriteValueS390X_OpCvt32to32F(v, config) + case OpCvt32to64F: + return rewriteValueS390X_OpCvt32to64F(v, config) + case OpCvt64Fto32: + return rewriteValueS390X_OpCvt64Fto32(v, config) + case OpCvt64Fto32F: + return rewriteValueS390X_OpCvt64Fto32F(v, config) + case OpCvt64Fto64: + return rewriteValueS390X_OpCvt64Fto64(v, config) + case OpCvt64to32F: + return rewriteValueS390X_OpCvt64to32F(v, config) + case OpCvt64to64F: + return rewriteValueS390X_OpCvt64to64F(v, config) + case OpDeferCall: + return rewriteValueS390X_OpDeferCall(v, config) + case OpDiv16: + return rewriteValueS390X_OpDiv16(v, config) + case OpDiv16u: + return rewriteValueS390X_OpDiv16u(v, config) + case OpDiv32: + return rewriteValueS390X_OpDiv32(v, config) + case OpDiv32F: + return rewriteValueS390X_OpDiv32F(v, config) + case OpDiv32u: + return rewriteValueS390X_OpDiv32u(v, config) + case OpDiv64: + return rewriteValueS390X_OpDiv64(v, config) + case OpDiv64F: + return rewriteValueS390X_OpDiv64F(v, config) + case OpDiv64u: + return rewriteValueS390X_OpDiv64u(v, config) + case OpDiv8: + return rewriteValueS390X_OpDiv8(v, config) + case OpDiv8u: + return rewriteValueS390X_OpDiv8u(v, config) + case OpEq16: + return rewriteValueS390X_OpEq16(v, config) + case OpEq32: + return rewriteValueS390X_OpEq32(v, config) + case OpEq32F: + return rewriteValueS390X_OpEq32F(v, config) + case OpEq64: + return rewriteValueS390X_OpEq64(v, config) + case OpEq64F: + return rewriteValueS390X_OpEq64F(v, config) + case OpEq8: + return rewriteValueS390X_OpEq8(v, config) + case OpEqB: + return rewriteValueS390X_OpEqB(v, config) + case OpEqPtr: + return rewriteValueS390X_OpEqPtr(v, config) + case OpGeq16: + return rewriteValueS390X_OpGeq16(v, config) + case OpGeq16U: + return rewriteValueS390X_OpGeq16U(v, config) + case OpGeq32: + return rewriteValueS390X_OpGeq32(v, config) + case OpGeq32F: + return rewriteValueS390X_OpGeq32F(v, config) + case OpGeq32U: + return rewriteValueS390X_OpGeq32U(v, config) + case OpGeq64: + return rewriteValueS390X_OpGeq64(v, config) + case OpGeq64F: + return rewriteValueS390X_OpGeq64F(v, config) + case OpGeq64U: + return rewriteValueS390X_OpGeq64U(v, config) + case OpGeq8: + return rewriteValueS390X_OpGeq8(v, config) + case OpGeq8U: + return rewriteValueS390X_OpGeq8U(v, config) + case OpGetClosurePtr: + return rewriteValueS390X_OpGetClosurePtr(v, config) + case OpGetG: + return rewriteValueS390X_OpGetG(v, config) + case OpGoCall: + return rewriteValueS390X_OpGoCall(v, config) + case OpGreater16: + return rewriteValueS390X_OpGreater16(v, config) + case OpGreater16U: + return rewriteValueS390X_OpGreater16U(v, config) + case OpGreater32: + return rewriteValueS390X_OpGreater32(v, config) + case OpGreater32F: + return rewriteValueS390X_OpGreater32F(v, config) + case OpGreater32U: + return rewriteValueS390X_OpGreater32U(v, config) + case OpGreater64: + return rewriteValueS390X_OpGreater64(v, config) + case OpGreater64F: + return rewriteValueS390X_OpGreater64F(v, config) + case OpGreater64U: + return rewriteValueS390X_OpGreater64U(v, config) + case OpGreater8: + return rewriteValueS390X_OpGreater8(v, config) + case OpGreater8U: + return rewriteValueS390X_OpGreater8U(v, config) + case OpHmul16: + return rewriteValueS390X_OpHmul16(v, config) + case OpHmul16u: + return rewriteValueS390X_OpHmul16u(v, config) + case OpHmul32: + return rewriteValueS390X_OpHmul32(v, config) + case OpHmul32u: + return rewriteValueS390X_OpHmul32u(v, config) + case OpHmul64: + return rewriteValueS390X_OpHmul64(v, config) + case OpHmul64u: + return rewriteValueS390X_OpHmul64u(v, config) + case OpHmul8: + return rewriteValueS390X_OpHmul8(v, config) + case OpHmul8u: + return rewriteValueS390X_OpHmul8u(v, config) + case OpITab: + return rewriteValueS390X_OpITab(v, config) + case OpInterCall: + return rewriteValueS390X_OpInterCall(v, config) + case OpIsInBounds: + return rewriteValueS390X_OpIsInBounds(v, config) + case OpIsNonNil: + return rewriteValueS390X_OpIsNonNil(v, config) + case OpIsSliceInBounds: + return rewriteValueS390X_OpIsSliceInBounds(v, config) + case OpLeq16: + return rewriteValueS390X_OpLeq16(v, config) + case OpLeq16U: + return rewriteValueS390X_OpLeq16U(v, config) + case OpLeq32: + return rewriteValueS390X_OpLeq32(v, config) + case OpLeq32F: + return rewriteValueS390X_OpLeq32F(v, config) + case OpLeq32U: + return rewriteValueS390X_OpLeq32U(v, config) + case OpLeq64: + return rewriteValueS390X_OpLeq64(v, config) + case OpLeq64F: + return rewriteValueS390X_OpLeq64F(v, config) + case OpLeq64U: + return rewriteValueS390X_OpLeq64U(v, config) + case OpLeq8: + return rewriteValueS390X_OpLeq8(v, config) + case OpLeq8U: + return rewriteValueS390X_OpLeq8U(v, config) + case OpLess16: + return rewriteValueS390X_OpLess16(v, config) + case OpLess16U: + return rewriteValueS390X_OpLess16U(v, config) + case OpLess32: + return rewriteValueS390X_OpLess32(v, config) + case OpLess32F: + return rewriteValueS390X_OpLess32F(v, config) + case OpLess32U: + return rewriteValueS390X_OpLess32U(v, config) + case OpLess64: + return rewriteValueS390X_OpLess64(v, config) + case OpLess64F: + return rewriteValueS390X_OpLess64F(v, config) + case OpLess64U: + return rewriteValueS390X_OpLess64U(v, config) + case OpLess8: + return rewriteValueS390X_OpLess8(v, config) + case OpLess8U: + return rewriteValueS390X_OpLess8U(v, config) + case OpLoad: + return rewriteValueS390X_OpLoad(v, config) + case OpLrot32: + return rewriteValueS390X_OpLrot32(v, config) + case OpLrot64: + return rewriteValueS390X_OpLrot64(v, config) + case OpLsh16x16: + return rewriteValueS390X_OpLsh16x16(v, config) + case OpLsh16x32: + return rewriteValueS390X_OpLsh16x32(v, config) + case OpLsh16x64: + return rewriteValueS390X_OpLsh16x64(v, config) + case OpLsh16x8: + return rewriteValueS390X_OpLsh16x8(v, config) + case OpLsh32x16: + return rewriteValueS390X_OpLsh32x16(v, config) + case OpLsh32x32: + return rewriteValueS390X_OpLsh32x32(v, config) + case OpLsh32x64: + return rewriteValueS390X_OpLsh32x64(v, config) + case OpLsh32x8: + return rewriteValueS390X_OpLsh32x8(v, config) + case OpLsh64x16: + return rewriteValueS390X_OpLsh64x16(v, config) + case OpLsh64x32: + return rewriteValueS390X_OpLsh64x32(v, config) + case OpLsh64x64: + return rewriteValueS390X_OpLsh64x64(v, config) + case OpLsh64x8: + return rewriteValueS390X_OpLsh64x8(v, config) + case OpLsh8x16: + return rewriteValueS390X_OpLsh8x16(v, config) + case OpLsh8x32: + return rewriteValueS390X_OpLsh8x32(v, config) + case OpLsh8x64: + return rewriteValueS390X_OpLsh8x64(v, config) + case OpLsh8x8: + return rewriteValueS390X_OpLsh8x8(v, config) + case OpMod16: + return rewriteValueS390X_OpMod16(v, config) + case OpMod16u: + return rewriteValueS390X_OpMod16u(v, config) + case OpMod32: + return rewriteValueS390X_OpMod32(v, config) + case OpMod32u: + return rewriteValueS390X_OpMod32u(v, config) + case OpMod64: + return rewriteValueS390X_OpMod64(v, config) + case OpMod64u: + return rewriteValueS390X_OpMod64u(v, config) + case OpMod8: + return rewriteValueS390X_OpMod8(v, config) + case OpMod8u: + return rewriteValueS390X_OpMod8u(v, config) + case OpMove: + return rewriteValueS390X_OpMove(v, config) + case OpMul16: + return rewriteValueS390X_OpMul16(v, config) + case OpMul32: + return rewriteValueS390X_OpMul32(v, config) + case OpMul32F: + return rewriteValueS390X_OpMul32F(v, config) + case OpMul64: + return rewriteValueS390X_OpMul64(v, config) + case OpMul64F: + return rewriteValueS390X_OpMul64F(v, config) + case OpMul8: + return rewriteValueS390X_OpMul8(v, config) + case OpNeg16: + return rewriteValueS390X_OpNeg16(v, config) + case OpNeg32: + return rewriteValueS390X_OpNeg32(v, config) + case OpNeg32F: + return rewriteValueS390X_OpNeg32F(v, config) + case OpNeg64: + return rewriteValueS390X_OpNeg64(v, config) + case OpNeg64F: + return rewriteValueS390X_OpNeg64F(v, config) + case OpNeg8: + return rewriteValueS390X_OpNeg8(v, config) + case OpNeq16: + return rewriteValueS390X_OpNeq16(v, config) + case OpNeq32: + return rewriteValueS390X_OpNeq32(v, config) + case OpNeq32F: + return rewriteValueS390X_OpNeq32F(v, config) + case OpNeq64: + return rewriteValueS390X_OpNeq64(v, config) + case OpNeq64F: + return rewriteValueS390X_OpNeq64F(v, config) + case OpNeq8: + return rewriteValueS390X_OpNeq8(v, config) + case OpNeqB: + return rewriteValueS390X_OpNeqB(v, config) + case OpNeqPtr: + return rewriteValueS390X_OpNeqPtr(v, config) + case OpNilCheck: + return rewriteValueS390X_OpNilCheck(v, config) + case OpNot: + return rewriteValueS390X_OpNot(v, config) + case OpOffPtr: + return rewriteValueS390X_OpOffPtr(v, config) + case OpOr16: + return rewriteValueS390X_OpOr16(v, config) + case OpOr32: + return rewriteValueS390X_OpOr32(v, config) + case OpOr64: + return rewriteValueS390X_OpOr64(v, config) + case OpOr8: + return rewriteValueS390X_OpOr8(v, config) + case OpOrB: + return rewriteValueS390X_OpOrB(v, config) + case OpRsh16Ux16: + return rewriteValueS390X_OpRsh16Ux16(v, config) + case OpRsh16Ux32: + return rewriteValueS390X_OpRsh16Ux32(v, config) + case OpRsh16Ux64: + return rewriteValueS390X_OpRsh16Ux64(v, config) + case OpRsh16Ux8: + return rewriteValueS390X_OpRsh16Ux8(v, config) + case OpRsh16x16: + return rewriteValueS390X_OpRsh16x16(v, config) + case OpRsh16x32: + return rewriteValueS390X_OpRsh16x32(v, config) + case OpRsh16x64: + return rewriteValueS390X_OpRsh16x64(v, config) + case OpRsh16x8: + return rewriteValueS390X_OpRsh16x8(v, config) + case OpRsh32Ux16: + return rewriteValueS390X_OpRsh32Ux16(v, config) + case OpRsh32Ux32: + return rewriteValueS390X_OpRsh32Ux32(v, config) + case OpRsh32Ux64: + return rewriteValueS390X_OpRsh32Ux64(v, config) + case OpRsh32Ux8: + return rewriteValueS390X_OpRsh32Ux8(v, config) + case OpRsh32x16: + return rewriteValueS390X_OpRsh32x16(v, config) + case OpRsh32x32: + return rewriteValueS390X_OpRsh32x32(v, config) + case OpRsh32x64: + return rewriteValueS390X_OpRsh32x64(v, config) + case OpRsh32x8: + return rewriteValueS390X_OpRsh32x8(v, config) + case OpRsh64Ux16: + return rewriteValueS390X_OpRsh64Ux16(v, config) + case OpRsh64Ux32: + return rewriteValueS390X_OpRsh64Ux32(v, config) + case OpRsh64Ux64: + return rewriteValueS390X_OpRsh64Ux64(v, config) + case OpRsh64Ux8: + return rewriteValueS390X_OpRsh64Ux8(v, config) + case OpRsh64x16: + return rewriteValueS390X_OpRsh64x16(v, config) + case OpRsh64x32: + return rewriteValueS390X_OpRsh64x32(v, config) + case OpRsh64x64: + return rewriteValueS390X_OpRsh64x64(v, config) + case OpRsh64x8: + return rewriteValueS390X_OpRsh64x8(v, config) + case OpRsh8Ux16: + return rewriteValueS390X_OpRsh8Ux16(v, config) + case OpRsh8Ux32: + return rewriteValueS390X_OpRsh8Ux32(v, config) + case OpRsh8Ux64: + return rewriteValueS390X_OpRsh8Ux64(v, config) + case OpRsh8Ux8: + return rewriteValueS390X_OpRsh8Ux8(v, config) + case OpRsh8x16: + return rewriteValueS390X_OpRsh8x16(v, config) + case OpRsh8x32: + return rewriteValueS390X_OpRsh8x32(v, config) + case OpRsh8x64: + return rewriteValueS390X_OpRsh8x64(v, config) + case OpRsh8x8: + return rewriteValueS390X_OpRsh8x8(v, config) + case OpS390XADD: + return rewriteValueS390X_OpS390XADD(v, config) + case OpS390XADDW: + return rewriteValueS390X_OpS390XADDW(v, config) + case OpS390XADDWconst: + return rewriteValueS390X_OpS390XADDWconst(v, config) + case OpS390XADDconst: + return rewriteValueS390X_OpS390XADDconst(v, config) + case OpS390XAND: + return rewriteValueS390X_OpS390XAND(v, config) + case OpS390XANDW: + return rewriteValueS390X_OpS390XANDW(v, config) + case OpS390XANDWconst: + return rewriteValueS390X_OpS390XANDWconst(v, config) + case OpS390XANDconst: + return rewriteValueS390X_OpS390XANDconst(v, config) + case OpS390XCMP: + return rewriteValueS390X_OpS390XCMP(v, config) + case OpS390XCMPU: + return rewriteValueS390X_OpS390XCMPU(v, config) + case OpS390XCMPUconst: + return rewriteValueS390X_OpS390XCMPUconst(v, config) + case OpS390XCMPW: + return rewriteValueS390X_OpS390XCMPW(v, config) + case OpS390XCMPWU: + return rewriteValueS390X_OpS390XCMPWU(v, config) + case OpS390XCMPWUconst: + return rewriteValueS390X_OpS390XCMPWUconst(v, config) + case OpS390XCMPWconst: + return rewriteValueS390X_OpS390XCMPWconst(v, config) + case OpS390XCMPconst: + return rewriteValueS390X_OpS390XCMPconst(v, config) + case OpS390XFMOVDload: + return rewriteValueS390X_OpS390XFMOVDload(v, config) + case OpS390XFMOVDloadidx: + return rewriteValueS390X_OpS390XFMOVDloadidx(v, config) + case OpS390XFMOVDstore: + return rewriteValueS390X_OpS390XFMOVDstore(v, config) + case OpS390XFMOVDstoreidx: + return rewriteValueS390X_OpS390XFMOVDstoreidx(v, config) + case OpS390XFMOVSload: + return rewriteValueS390X_OpS390XFMOVSload(v, config) + case OpS390XFMOVSloadidx: + return rewriteValueS390X_OpS390XFMOVSloadidx(v, config) + case OpS390XFMOVSstore: + return rewriteValueS390X_OpS390XFMOVSstore(v, config) + case OpS390XFMOVSstoreidx: + return rewriteValueS390X_OpS390XFMOVSstoreidx(v, config) + case OpS390XMOVBZload: + return rewriteValueS390X_OpS390XMOVBZload(v, config) + case OpS390XMOVBZloadidx: + return rewriteValueS390X_OpS390XMOVBZloadidx(v, config) + case OpS390XMOVBZreg: + return rewriteValueS390X_OpS390XMOVBZreg(v, config) + case OpS390XMOVBload: + return rewriteValueS390X_OpS390XMOVBload(v, config) + case OpS390XMOVBreg: + return rewriteValueS390X_OpS390XMOVBreg(v, config) + case OpS390XMOVBstore: + return rewriteValueS390X_OpS390XMOVBstore(v, config) + case OpS390XMOVBstoreconst: + return rewriteValueS390X_OpS390XMOVBstoreconst(v, config) + case OpS390XMOVBstoreidx: + return rewriteValueS390X_OpS390XMOVBstoreidx(v, config) + case OpS390XMOVDEQ: + return rewriteValueS390X_OpS390XMOVDEQ(v, config) + case OpS390XMOVDGE: + return rewriteValueS390X_OpS390XMOVDGE(v, config) + case OpS390XMOVDGT: + return rewriteValueS390X_OpS390XMOVDGT(v, config) + case OpS390XMOVDLE: + return rewriteValueS390X_OpS390XMOVDLE(v, config) + case OpS390XMOVDLT: + return rewriteValueS390X_OpS390XMOVDLT(v, config) + case OpS390XMOVDNE: + return rewriteValueS390X_OpS390XMOVDNE(v, config) + case OpS390XMOVDaddr: + return rewriteValueS390X_OpS390XMOVDaddr(v, config) + case OpS390XMOVDaddridx: + return rewriteValueS390X_OpS390XMOVDaddridx(v, config) + case OpS390XMOVDload: + return rewriteValueS390X_OpS390XMOVDload(v, config) + case OpS390XMOVDloadidx: + return rewriteValueS390X_OpS390XMOVDloadidx(v, config) + case OpS390XMOVDstore: + return rewriteValueS390X_OpS390XMOVDstore(v, config) + case OpS390XMOVDstoreconst: + return rewriteValueS390X_OpS390XMOVDstoreconst(v, config) + case OpS390XMOVDstoreidx: + return rewriteValueS390X_OpS390XMOVDstoreidx(v, config) + case OpS390XMOVHZload: + return rewriteValueS390X_OpS390XMOVHZload(v, config) + case OpS390XMOVHZloadidx: + return rewriteValueS390X_OpS390XMOVHZloadidx(v, config) + case OpS390XMOVHZreg: + return rewriteValueS390X_OpS390XMOVHZreg(v, config) + case OpS390XMOVHload: + return rewriteValueS390X_OpS390XMOVHload(v, config) + case OpS390XMOVHreg: + return rewriteValueS390X_OpS390XMOVHreg(v, config) + case OpS390XMOVHstore: + return rewriteValueS390X_OpS390XMOVHstore(v, config) + case OpS390XMOVHstoreconst: + return rewriteValueS390X_OpS390XMOVHstoreconst(v, config) + case OpS390XMOVHstoreidx: + return rewriteValueS390X_OpS390XMOVHstoreidx(v, config) + case OpS390XMOVWZload: + return rewriteValueS390X_OpS390XMOVWZload(v, config) + case OpS390XMOVWZloadidx: + return rewriteValueS390X_OpS390XMOVWZloadidx(v, config) + case OpS390XMOVWZreg: + return rewriteValueS390X_OpS390XMOVWZreg(v, config) + case OpS390XMOVWload: + return rewriteValueS390X_OpS390XMOVWload(v, config) + case OpS390XMOVWreg: + return rewriteValueS390X_OpS390XMOVWreg(v, config) + case OpS390XMOVWstore: + return rewriteValueS390X_OpS390XMOVWstore(v, config) + case OpS390XMOVWstoreconst: + return rewriteValueS390X_OpS390XMOVWstoreconst(v, config) + case OpS390XMOVWstoreidx: + return rewriteValueS390X_OpS390XMOVWstoreidx(v, config) + case OpS390XMULLD: + return rewriteValueS390X_OpS390XMULLD(v, config) + case OpS390XMULLDconst: + return rewriteValueS390X_OpS390XMULLDconst(v, config) + case OpS390XMULLW: + return rewriteValueS390X_OpS390XMULLW(v, config) + case OpS390XMULLWconst: + return rewriteValueS390X_OpS390XMULLWconst(v, config) + case OpS390XNEG: + return rewriteValueS390X_OpS390XNEG(v, config) + case OpS390XNEGW: + return rewriteValueS390X_OpS390XNEGW(v, config) + case OpS390XNOT: + return rewriteValueS390X_OpS390XNOT(v, config) + case OpS390XNOTW: + return rewriteValueS390X_OpS390XNOTW(v, config) + case OpS390XOR: + return rewriteValueS390X_OpS390XOR(v, config) + case OpS390XORW: + return rewriteValueS390X_OpS390XORW(v, config) + case OpS390XORWconst: + return rewriteValueS390X_OpS390XORWconst(v, config) + case OpS390XORconst: + return rewriteValueS390X_OpS390XORconst(v, config) + case OpS390XSLD: + return rewriteValueS390X_OpS390XSLD(v, config) + case OpS390XSLW: + return rewriteValueS390X_OpS390XSLW(v, config) + case OpS390XSRAD: + return rewriteValueS390X_OpS390XSRAD(v, config) + case OpS390XSRADconst: + return rewriteValueS390X_OpS390XSRADconst(v, config) + case OpS390XSRAW: + return rewriteValueS390X_OpS390XSRAW(v, config) + case OpS390XSRAWconst: + return rewriteValueS390X_OpS390XSRAWconst(v, config) + case OpS390XSRD: + return rewriteValueS390X_OpS390XSRD(v, config) + case OpS390XSRW: + return rewriteValueS390X_OpS390XSRW(v, config) + case OpS390XSUB: + return rewriteValueS390X_OpS390XSUB(v, config) + case OpS390XSUBEWcarrymask: + return rewriteValueS390X_OpS390XSUBEWcarrymask(v, config) + case OpS390XSUBEcarrymask: + return rewriteValueS390X_OpS390XSUBEcarrymask(v, config) + case OpS390XSUBW: + return rewriteValueS390X_OpS390XSUBW(v, config) + case OpS390XSUBWconst: + return rewriteValueS390X_OpS390XSUBWconst(v, config) + case OpS390XSUBconst: + return rewriteValueS390X_OpS390XSUBconst(v, config) + case OpS390XXOR: + return rewriteValueS390X_OpS390XXOR(v, config) + case OpS390XXORW: + return rewriteValueS390X_OpS390XXORW(v, config) + case OpS390XXORWconst: + return rewriteValueS390X_OpS390XXORWconst(v, config) + case OpS390XXORconst: + return rewriteValueS390X_OpS390XXORconst(v, config) + case OpSignExt16to32: + return rewriteValueS390X_OpSignExt16to32(v, config) + case OpSignExt16to64: + return rewriteValueS390X_OpSignExt16to64(v, config) + case OpSignExt32to64: + return rewriteValueS390X_OpSignExt32to64(v, config) + case OpSignExt8to16: + return rewriteValueS390X_OpSignExt8to16(v, config) + case OpSignExt8to32: + return rewriteValueS390X_OpSignExt8to32(v, config) + case OpSignExt8to64: + return rewriteValueS390X_OpSignExt8to64(v, config) + case OpSqrt: + return rewriteValueS390X_OpSqrt(v, config) + case OpStaticCall: + return rewriteValueS390X_OpStaticCall(v, config) + case OpStore: + return rewriteValueS390X_OpStore(v, config) + case OpSub16: + return rewriteValueS390X_OpSub16(v, config) + case OpSub32: + return rewriteValueS390X_OpSub32(v, config) + case OpSub32F: + return rewriteValueS390X_OpSub32F(v, config) + case OpSub64: + return rewriteValueS390X_OpSub64(v, config) + case OpSub64F: + return rewriteValueS390X_OpSub64F(v, config) + case OpSub8: + return rewriteValueS390X_OpSub8(v, config) + case OpSubPtr: + return rewriteValueS390X_OpSubPtr(v, config) + case OpTrunc16to8: + return rewriteValueS390X_OpTrunc16to8(v, config) + case OpTrunc32to16: + return rewriteValueS390X_OpTrunc32to16(v, config) + case OpTrunc32to8: + return rewriteValueS390X_OpTrunc32to8(v, config) + case OpTrunc64to16: + return rewriteValueS390X_OpTrunc64to16(v, config) + case OpTrunc64to32: + return rewriteValueS390X_OpTrunc64to32(v, config) + case OpTrunc64to8: + return rewriteValueS390X_OpTrunc64to8(v, config) + case OpXor16: + return rewriteValueS390X_OpXor16(v, config) + case OpXor32: + return rewriteValueS390X_OpXor32(v, config) + case OpXor64: + return rewriteValueS390X_OpXor64(v, config) + case OpXor8: + return rewriteValueS390X_OpXor8(v, config) + case OpZero: + return rewriteValueS390X_OpZero(v, config) + case OpZeroExt16to32: + return rewriteValueS390X_OpZeroExt16to32(v, config) + case OpZeroExt16to64: + return rewriteValueS390X_OpZeroExt16to64(v, config) + case OpZeroExt32to64: + return rewriteValueS390X_OpZeroExt32to64(v, config) + case OpZeroExt8to16: + return rewriteValueS390X_OpZeroExt8to16(v, config) + case OpZeroExt8to32: + return rewriteValueS390X_OpZeroExt8to32(v, config) + case OpZeroExt8to64: + return rewriteValueS390X_OpZeroExt8to64(v, config) + } + return false +} +func rewriteValueS390X_OpAdd16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Add16 x y) + // cond: + // result: (ADDW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XADDW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpAdd32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Add32 x y) + // cond: + // result: (ADDW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XADDW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpAdd32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Add32F x y) + // cond: + // result: (FADDS x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XFADDS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpAdd64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Add64 x y) + // cond: + // result: (ADD x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XADD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpAdd64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Add64F x y) + // cond: + // result: (FADD x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XFADD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpAdd8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Add8 x y) + // cond: + // result: (ADDW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XADDW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpAddPtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (AddPtr x y) + // cond: + // result: (ADD x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XADD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpAddr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Addr {sym} base) + // cond: + // result: (MOVDaddr {sym} base) + for { + sym := v.Aux + base := v.Args[0] + v.reset(OpS390XMOVDaddr) + v.Aux = sym + v.AddArg(base) + return true + } +} +func rewriteValueS390X_OpAnd16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (And16 x y) + // cond: + // result: (ANDW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpAnd32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (And32 x y) + // cond: + // result: (ANDW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpAnd64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (And64 x y) + // cond: + // result: (AND x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XAND) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpAnd8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (And8 x y) + // cond: + // result: (ANDW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpAndB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (AndB x y) + // cond: + // result: (ANDW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpAvg64u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Avg64u x y) + // cond: + // result: (ADD (ADD (SRDconst x [1]) (SRDconst y [1])) (ANDconst (AND x y) [1])) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XADD) + v0 := b.NewValue0(v.Line, OpS390XADD, t) + v1 := b.NewValue0(v.Line, OpS390XSRDconst, t) + v1.AuxInt = 1 + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XSRDconst, t) + v2.AuxInt = 1 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Line, OpS390XANDconst, t) + v3.AuxInt = 1 + v4 := b.NewValue0(v.Line, OpS390XAND, t) + v4.AddArg(x) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueS390X_OpClosureCall(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ClosureCall [argwid] entry closure mem) + // cond: + // result: (CALLclosure [argwid] entry closure mem) + for { + argwid := v.AuxInt + entry := v.Args[0] + closure := v.Args[1] + mem := v.Args[2] + v.reset(OpS390XCALLclosure) + v.AuxInt = argwid + v.AddArg(entry) + v.AddArg(closure) + v.AddArg(mem) + return true + } +} +func rewriteValueS390X_OpCom16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Com16 x) + // cond: + // result: (NOTW x) + for { + x := v.Args[0] + v.reset(OpS390XNOTW) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpCom32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Com32 x) + // cond: + // result: (NOTW x) + for { + x := v.Args[0] + v.reset(OpS390XNOTW) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpCom64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Com64 x) + // cond: + // result: (NOT x) + for { + x := v.Args[0] + v.reset(OpS390XNOT) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpCom8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Com8 x) + // cond: + // result: (NOTW x) + for { + x := v.Args[0] + v.reset(OpS390XNOTW) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpConst16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Const16 [val]) + // cond: + // result: (MOVDconst [val]) + for { + val := v.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = val + return true + } +} +func rewriteValueS390X_OpConst32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Const32 [val]) + // cond: + // result: (MOVDconst [val]) + for { + val := v.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = val + return true + } +} +func rewriteValueS390X_OpConst32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Const32F [val]) + // cond: + // result: (FMOVSconst [val]) + for { + val := v.AuxInt + v.reset(OpS390XFMOVSconst) + v.AuxInt = val + return true + } +} +func rewriteValueS390X_OpConst64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Const64 [val]) + // cond: + // result: (MOVDconst [val]) + for { + val := v.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = val + return true + } +} +func rewriteValueS390X_OpConst64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Const64F [val]) + // cond: + // result: (FMOVDconst [val]) + for { + val := v.AuxInt + v.reset(OpS390XFMOVDconst) + v.AuxInt = val + return true + } +} +func rewriteValueS390X_OpConst8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Const8 [val]) + // cond: + // result: (MOVDconst [val]) + for { + val := v.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = val + return true + } +} +func rewriteValueS390X_OpConstBool(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ConstBool [b]) + // cond: + // result: (MOVDconst [b]) + for { + b := v.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = b + return true + } +} +func rewriteValueS390X_OpConstNil(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ConstNil) + // cond: + // result: (MOVDconst [0]) + for { + v.reset(OpS390XMOVDconst) + v.AuxInt = 0 + return true + } +} +func rewriteValueS390X_OpConvert(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Convert x mem) + // cond: + // result: (MOVDconvert x mem) + for { + t := v.Type + x := v.Args[0] + mem := v.Args[1] + v.reset(OpS390XMOVDconvert) + v.Type = t + v.AddArg(x) + v.AddArg(mem) + return true + } +} +func rewriteValueS390X_OpCvt32Fto32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt32Fto32 x) + // cond: + // result: (CFEBRA x) + for { + x := v.Args[0] + v.reset(OpS390XCFEBRA) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpCvt32Fto64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt32Fto64 x) + // cond: + // result: (CGEBRA x) + for { + x := v.Args[0] + v.reset(OpS390XCGEBRA) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpCvt32Fto64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt32Fto64F x) + // cond: + // result: (LDEBR x) + for { + x := v.Args[0] + v.reset(OpS390XLDEBR) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpCvt32to32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt32to32F x) + // cond: + // result: (CEFBRA x) + for { + x := v.Args[0] + v.reset(OpS390XCEFBRA) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpCvt32to64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt32to64F x) + // cond: + // result: (CDFBRA x) + for { + x := v.Args[0] + v.reset(OpS390XCDFBRA) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpCvt64Fto32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt64Fto32 x) + // cond: + // result: (CFDBRA x) + for { + x := v.Args[0] + v.reset(OpS390XCFDBRA) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpCvt64Fto32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt64Fto32F x) + // cond: + // result: (LEDBR x) + for { + x := v.Args[0] + v.reset(OpS390XLEDBR) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpCvt64Fto64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt64Fto64 x) + // cond: + // result: (CGDBRA x) + for { + x := v.Args[0] + v.reset(OpS390XCGDBRA) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpCvt64to32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt64to32F x) + // cond: + // result: (CEGBRA x) + for { + x := v.Args[0] + v.reset(OpS390XCEGBRA) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpCvt64to64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt64to64F x) + // cond: + // result: (CDGBRA x) + for { + x := v.Args[0] + v.reset(OpS390XCDGBRA) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpDeferCall(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (DeferCall [argwid] mem) + // cond: + // result: (CALLdefer [argwid] mem) + for { + argwid := v.AuxInt + mem := v.Args[0] + v.reset(OpS390XCALLdefer) + v.AuxInt = argwid + v.AddArg(mem) + return true + } +} +func rewriteValueS390X_OpDiv16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div16 x y) + // cond: + // result: (DIVW (MOVHreg x) (MOVHreg y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XDIVW) + v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpDiv16u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div16u x y) + // cond: + // result: (DIVWU (MOVHZreg x) (MOVHZreg y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XDIVWU) + v0 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpDiv32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div32 x y) + // cond: + // result: (DIVW (MOVWreg x) y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XDIVW) + v0 := b.NewValue0(v.Line, OpS390XMOVWreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpDiv32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div32F x y) + // cond: + // result: (FDIVS x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XFDIVS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpDiv32u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div32u x y) + // cond: + // result: (DIVWU (MOVWZreg x) y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XDIVWU) + v0 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64()) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpDiv64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div64 x y) + // cond: + // result: (DIVD x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XDIVD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpDiv64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div64F x y) + // cond: + // result: (FDIV x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XFDIV) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpDiv64u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div64u x y) + // cond: + // result: (DIVDU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XDIVDU) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpDiv8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div8 x y) + // cond: + // result: (DIVW (MOVBreg x) (MOVBreg y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XDIVW) + v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpDiv8u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div8u x y) + // cond: + // result: (DIVWU (MOVBZreg x) (MOVBZreg y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XDIVWU) + v0 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpEq16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Eq16 x y) + // cond: + // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDEQ) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpEq32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Eq32 x y) + // cond: + // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDEQ) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpEq32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Eq32F x y) + // cond: + // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDEQ) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpEq64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Eq64 x y) + // cond: + // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDEQ) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpEq64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Eq64F x y) + // cond: + // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDEQ) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpEq8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Eq8 x y) + // cond: + // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDEQ) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpEqB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (EqB x y) + // cond: + // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDEQ) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpEqPtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (EqPtr x y) + // cond: + // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDEQ) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGeq16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq16 x y) + // cond: + // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGeq16U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq16U x y) + // cond: + // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGeq32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq32 x y) + // cond: + // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGeq32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq32F x y) + // cond: + // result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGEnoinv) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGeq32U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq32U x y) + // cond: + // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPWU, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGeq64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq64 x y) + // cond: + // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGeq64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq64F x y) + // cond: + // result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGEnoinv) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGeq64U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq64U x y) + // cond: + // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGeq8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq8 x y) + // cond: + // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGeq8U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq8U x y) + // cond: + // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGetClosurePtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (GetClosurePtr) + // cond: + // result: (LoweredGetClosurePtr) + for { + v.reset(OpS390XLoweredGetClosurePtr) + return true + } +} +func rewriteValueS390X_OpGetG(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (GetG mem) + // cond: + // result: (LoweredGetG mem) + for { + mem := v.Args[0] + v.reset(OpS390XLoweredGetG) + v.AddArg(mem) + return true + } +} +func rewriteValueS390X_OpGoCall(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (GoCall [argwid] mem) + // cond: + // result: (CALLgo [argwid] mem) + for { + argwid := v.AuxInt + mem := v.Args[0] + v.reset(OpS390XCALLgo) + v.AuxInt = argwid + v.AddArg(mem) + return true + } +} +func rewriteValueS390X_OpGreater16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater16 x y) + // cond: + // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGreater16U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater16U x y) + // cond: + // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGreater32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater32 x y) + // cond: + // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGreater32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater32F x y) + // cond: + // result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGTnoinv) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGreater32U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater32U x y) + // cond: + // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPWU, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGreater64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater64 x y) + // cond: + // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGreater64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater64F x y) + // cond: + // result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGTnoinv) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGreater64U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater64U x y) + // cond: + // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGreater8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater8 x y) + // cond: + // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpGreater8U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater8U x y) + // cond: + // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpHmul16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul16 x y) + // cond: + // result: (SRDconst [16] (MULLW (MOVHreg x) (MOVHreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRDconst) + v.AuxInt = 16 + v0 := b.NewValue0(v.Line, OpS390XMULLW, config.fe.TypeInt32()) + v1 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpHmul16u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul16u x y) + // cond: + // result: (SRDconst [16] (MULLW (MOVHZreg x) (MOVHZreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRDconst) + v.AuxInt = 16 + v0 := b.NewValue0(v.Line, OpS390XMULLW, config.fe.TypeInt32()) + v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpHmul32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul32 x y) + // cond: + // result: (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRDconst) + v.AuxInt = 32 + v0 := b.NewValue0(v.Line, OpS390XMULLD, config.fe.TypeInt64()) + v1 := b.NewValue0(v.Line, OpS390XMOVWreg, config.fe.TypeInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XMOVWreg, config.fe.TypeInt64()) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpHmul32u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul32u x y) + // cond: + // result: (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRDconst) + v.AuxInt = 32 + v0 := b.NewValue0(v.Line, OpS390XMULLD, config.fe.TypeInt64()) + v1 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64()) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpHmul64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul64 x y) + // cond: + // result: (MULHD x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMULHD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpHmul64u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul64u x y) + // cond: + // result: (MULHDU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMULHDU) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpHmul8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul8 x y) + // cond: + // result: (SRDconst [8] (MULLW (MOVBreg x) (MOVBreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRDconst) + v.AuxInt = 8 + v0 := b.NewValue0(v.Line, OpS390XMULLW, config.fe.TypeInt32()) + v1 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpHmul8u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul8u x y) + // cond: + // result: (SRDconst [8] (MULLW (MOVBZreg x) (MOVBZreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRDconst) + v.AuxInt = 8 + v0 := b.NewValue0(v.Line, OpS390XMULLW, config.fe.TypeInt32()) + v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpITab(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ITab (Load ptr mem)) + // cond: + // result: (MOVDload ptr mem) + for { + v_0 := v.Args[0] + if v_0.Op != OpLoad { + break + } + ptr := v_0.Args[0] + mem := v_0.Args[1] + v.reset(OpS390XMOVDload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpInterCall(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (InterCall [argwid] entry mem) + // cond: + // result: (CALLinter [argwid] entry mem) + for { + argwid := v.AuxInt + entry := v.Args[0] + mem := v.Args[1] + v.reset(OpS390XCALLinter) + v.AuxInt = argwid + v.AddArg(entry) + v.AddArg(mem) + return true + } +} +func rewriteValueS390X_OpIsInBounds(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (IsInBounds idx len) + // cond: + // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len)) + for { + idx := v.Args[0] + len := v.Args[1] + v.reset(OpS390XMOVDLT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v2.AddArg(idx) + v2.AddArg(len) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpIsNonNil(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (IsNonNil p) + // cond: + // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0])) + for { + p := v.Args[0] + v.reset(OpS390XMOVDNE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPconst, TypeFlags) + v2.AuxInt = 0 + v2.AddArg(p) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpIsSliceInBounds(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (IsSliceInBounds idx len) + // cond: + // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len)) + for { + idx := v.Args[0] + len := v.Args[1] + v.reset(OpS390XMOVDLE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v2.AddArg(idx) + v2.AddArg(len) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLeq16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq16 x y) + // cond: + // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLeq16U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq16U x y) + // cond: + // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLeq32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq32 x y) + // cond: + // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLeq32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq32F x y) + // cond: + // result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGEnoinv) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags) + v2.AddArg(y) + v2.AddArg(x) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLeq32U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq32U x y) + // cond: + // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPWU, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLeq64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq64 x y) + // cond: + // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLeq64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq64F x y) + // cond: + // result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGEnoinv) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags) + v2.AddArg(y) + v2.AddArg(x) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLeq64U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq64U x y) + // cond: + // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLeq8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq8 x y) + // cond: + // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLeq8U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq8U x y) + // cond: + // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLess16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less16 x y) + // cond: + // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLess16U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less16U x y) + // cond: + // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLess32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less32 x y) + // cond: + // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLess32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less32F x y) + // cond: + // result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGTnoinv) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags) + v2.AddArg(y) + v2.AddArg(x) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLess32U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less32U x y) + // cond: + // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPWU, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLess64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less64 x y) + // cond: + // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLess64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less64F x y) + // cond: + // result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDGTnoinv) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags) + v2.AddArg(y) + v2.AddArg(x) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLess64U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less64U x y) + // cond: + // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLess8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less8 x y) + // cond: + // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLess8U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less8U x y) + // cond: + // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDLT) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpLoad(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVDload ptr mem) + for { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpS390XMOVDload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitInt(t) + // result: (MOVWZload ptr mem) + for { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is32BitInt(t)) { + break + } + v.reset(OpS390XMOVWZload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: is16BitInt(t) + // result: (MOVHZload ptr mem) + for { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is16BitInt(t)) { + break + } + v.reset(OpS390XMOVHZload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: (t.IsBoolean() || is8BitInt(t)) + // result: (MOVBZload ptr mem) + for { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(t.IsBoolean() || is8BitInt(t)) { + break + } + v.reset(OpS390XMOVBZload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (FMOVSload ptr mem) + for { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is32BitFloat(t)) { + break + } + v.reset(OpS390XFMOVSload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (FMOVDload ptr mem) + for { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is64BitFloat(t)) { + break + } + v.reset(OpS390XFMOVDload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpLrot32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lrot32 x [c]) + // cond: + // result: (RLLconst [c&31] x) + for { + t := v.Type + c := v.AuxInt + x := v.Args[0] + v.reset(OpS390XRLLconst) + v.Type = t + v.AuxInt = c & 31 + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpLrot64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lrot64 x [c]) + // cond: + // result: (RLLGconst [c&63] x) + for { + t := v.Type + c := v.AuxInt + x := v.Args[0] + v.reset(OpS390XRLLGconst) + v.Type = t + v.AuxInt = c & 63 + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpLsh16x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh16x16 x y) + // cond: + // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 31 + v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh16x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh16x32 x y) + // cond: + // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst y [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 31 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh16x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh16x64 x y) + // cond: + // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPUconst y [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags) + v2.AuxInt = 31 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh16x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh16x8 x y) + // cond: + // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 31 + v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh32x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh32x16 x y) + // cond: + // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 31 + v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh32x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh32x32 x y) + // cond: + // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst y [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 31 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh32x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh32x64 x y) + // cond: + // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPUconst y [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags) + v2.AuxInt = 31 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh32x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh32x8 x y) + // cond: + // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 31 + v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh64x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh64x16 x y) + // cond: + // result: (AND (SLD x y) (SUBEcarrymask (CMPWUconst (MOVHZreg y) [63]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XAND) + v0 := b.NewValue0(v.Line, OpS390XSLD, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 63 + v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh64x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh64x32 x y) + // cond: + // result: (AND (SLD x y) (SUBEcarrymask (CMPWUconst y [63]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XAND) + v0 := b.NewValue0(v.Line, OpS390XSLD, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 63 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh64x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh64x64 x y) + // cond: + // result: (AND (SLD x y) (SUBEcarrymask (CMPUconst y [63]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XAND) + v0 := b.NewValue0(v.Line, OpS390XSLD, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags) + v2.AuxInt = 63 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh64x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh64x8 x y) + // cond: + // result: (AND (SLD x y) (SUBEcarrymask (CMPWUconst (MOVBZreg y) [63]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XAND) + v0 := b.NewValue0(v.Line, OpS390XSLD, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 63 + v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh8x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh8x16 x y) + // cond: + // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 31 + v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh8x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh8x32 x y) + // cond: + // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst y [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 31 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh8x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh8x64 x y) + // cond: + // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPUconst y [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags) + v2.AuxInt = 31 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpLsh8x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh8x8 x y) + // cond: + // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 31 + v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpMod16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod16 x y) + // cond: + // result: (MODW (MOVHreg x) (MOVHreg y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMODW) + v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpMod16u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod16u x y) + // cond: + // result: (MODWU (MOVHZreg x) (MOVHZreg y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMODWU) + v0 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpMod32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod32 x y) + // cond: + // result: (MODW (MOVWreg x) y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMODW) + v0 := b.NewValue0(v.Line, OpS390XMOVWreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpMod32u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod32u x y) + // cond: + // result: (MODWU (MOVWZreg x) y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMODWU) + v0 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64()) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpMod64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod64 x y) + // cond: + // result: (MODD x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMODD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpMod64u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod64u x y) + // cond: + // result: (MODDU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMODDU) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpMod8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod8 x y) + // cond: + // result: (MODW (MOVBreg x) (MOVBreg y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMODW) + v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpMod8u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod8u x y) + // cond: + // result: (MODWU (MOVBZreg x) (MOVBZreg y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMODWU) + v0 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpMove(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Move [s] _ _ mem) + // cond: SizeAndAlign(s).Size() == 0 + // result: mem + for { + s := v.AuxInt + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 0) { + break + } + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 1 + // result: (MOVBstore dst (MOVBZload src mem) mem) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 1) { + break + } + v.reset(OpS390XMOVBstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpS390XMOVBZload, config.fe.TypeUInt8()) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 2 + // result: (MOVHstore dst (MOVHZload src mem) mem) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 2) { + break + } + v.reset(OpS390XMOVHstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16()) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 4 + // result: (MOVWstore dst (MOVWZload src mem) mem) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 4) { + break + } + v.reset(OpS390XMOVWstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32()) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 8 + // result: (MOVDstore dst (MOVDload src mem) mem) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 8) { + break + } + v.reset(OpS390XMOVDstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64()) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 16 + // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 16) { + break + } + v.reset(OpS390XMOVDstore) + v.AuxInt = 8 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64()) + v0.AuxInt = 8 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDstore, TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64()) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 24 + // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 24) { + break + } + v.reset(OpS390XMOVDstore) + v.AuxInt = 16 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64()) + v0.AuxInt = 16 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDstore, TypeMem) + v1.AuxInt = 8 + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64()) + v2.AuxInt = 8 + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v3 := b.NewValue0(v.Line, OpS390XMOVDstore, TypeMem) + v3.AddArg(dst) + v4 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64()) + v4.AddArg(src) + v4.AddArg(mem) + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 3 + // result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVHstore dst (MOVHZload src mem) mem)) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 3) { + break + } + v.reset(OpS390XMOVBstore) + v.AuxInt = 2 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpS390XMOVBZload, config.fe.TypeUInt8()) + v0.AuxInt = 2 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVHstore, TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16()) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 5 + // result: (MOVBstore [4] dst (MOVBZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 5) { + break + } + v.reset(OpS390XMOVBstore) + v.AuxInt = 4 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpS390XMOVBZload, config.fe.TypeUInt8()) + v0.AuxInt = 4 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVWstore, TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32()) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 6 + // result: (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 6) { + break + } + v.reset(OpS390XMOVHstore) + v.AuxInt = 4 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16()) + v0.AuxInt = 4 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVWstore, TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32()) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 7 + // result: (MOVBstore [6] dst (MOVBZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 7) { + break + } + v.reset(OpS390XMOVBstore) + v.AuxInt = 6 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpS390XMOVBZload, config.fe.TypeUInt8()) + v0.AuxInt = 6 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVHstore, TypeMem) + v1.AuxInt = 4 + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16()) + v2.AuxInt = 4 + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v3 := b.NewValue0(v.Line, OpS390XMOVWstore, TypeMem) + v3.AddArg(dst) + v4 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32()) + v4.AddArg(src) + v4.AddArg(mem) + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 256 + // result: (MVC [makeValAndOff(SizeAndAlign(s).Size(), 0)] dst src mem) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 256) { + break + } + v.reset(OpS390XMVC) + v.AuxInt = makeValAndOff(SizeAndAlign(s).Size(), 0) + v.AddArg(dst) + v.AddArg(src) + v.AddArg(mem) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() > 256 && SizeAndAlign(s).Size() <= 512 + // result: (MVC [makeValAndOff(SizeAndAlign(s).Size()-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() > 256 && SizeAndAlign(s).Size() <= 512) { + break + } + v.reset(OpS390XMVC) + v.AuxInt = makeValAndOff(SizeAndAlign(s).Size()-256, 256) + v.AddArg(dst) + v.AddArg(src) + v0 := b.NewValue0(v.Line, OpS390XMVC, TypeMem) + v0.AuxInt = makeValAndOff(256, 0) + v0.AddArg(dst) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Size() <= 768 + // result: (MVC [makeValAndOff(SizeAndAlign(s).Size()-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Size() <= 768) { + break + } + v.reset(OpS390XMVC) + v.AuxInt = makeValAndOff(SizeAndAlign(s).Size()-512, 512) + v.AddArg(dst) + v.AddArg(src) + v0 := b.NewValue0(v.Line, OpS390XMVC, TypeMem) + v0.AuxInt = makeValAndOff(256, 256) + v0.AddArg(dst) + v0.AddArg(src) + v1 := b.NewValue0(v.Line, OpS390XMVC, TypeMem) + v1.AuxInt = makeValAndOff(256, 0) + v1.AddArg(dst) + v1.AddArg(src) + v1.AddArg(mem) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() > 768 && SizeAndAlign(s).Size() <= 1024 + // result: (MVC [makeValAndOff(SizeAndAlign(s).Size()-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() > 768 && SizeAndAlign(s).Size() <= 1024) { + break + } + v.reset(OpS390XMVC) + v.AuxInt = makeValAndOff(SizeAndAlign(s).Size()-768, 768) + v.AddArg(dst) + v.AddArg(src) + v0 := b.NewValue0(v.Line, OpS390XMVC, TypeMem) + v0.AuxInt = makeValAndOff(256, 512) + v0.AddArg(dst) + v0.AddArg(src) + v1 := b.NewValue0(v.Line, OpS390XMVC, TypeMem) + v1.AuxInt = makeValAndOff(256, 256) + v1.AddArg(dst) + v1.AddArg(src) + v2 := b.NewValue0(v.Line, OpS390XMVC, TypeMem) + v2.AuxInt = makeValAndOff(256, 0) + v2.AddArg(dst) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() > 1024 + // result: (LoweredMove [SizeAndAlign(s).Size()%256] dst src (ADDconst src [(SizeAndAlign(s).Size()/256)*256]) mem) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() > 1024) { + break + } + v.reset(OpS390XLoweredMove) + v.AuxInt = SizeAndAlign(s).Size() % 256 + v.AddArg(dst) + v.AddArg(src) + v0 := b.NewValue0(v.Line, OpS390XADDconst, src.Type) + v0.AuxInt = (SizeAndAlign(s).Size() / 256) * 256 + v0.AddArg(src) + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpMul16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mul16 x y) + // cond: + // result: (MULLW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMULLW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpMul32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mul32 x y) + // cond: + // result: (MULLW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMULLW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpMul32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mul32F x y) + // cond: + // result: (FMULS x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XFMULS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpMul64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mul64 x y) + // cond: + // result: (MULLD x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMULLD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpMul64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mul64F x y) + // cond: + // result: (FMUL x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XFMUL) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpMul8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mul8 x y) + // cond: + // result: (MULLW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMULLW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpNeg16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neg16 x) + // cond: + // result: (NEGW (MOVHreg x)) + for { + x := v.Args[0] + v.reset(OpS390XNEGW) + v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpNeg32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neg32 x) + // cond: + // result: (NEGW x) + for { + x := v.Args[0] + v.reset(OpS390XNEGW) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpNeg32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neg32F x) + // cond: + // result: (FNEGS x) + for { + x := v.Args[0] + v.reset(OpS390XFNEGS) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpNeg64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neg64 x) + // cond: + // result: (NEG x) + for { + x := v.Args[0] + v.reset(OpS390XNEG) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpNeg64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neg64F x) + // cond: + // result: (FNEG x) + for { + x := v.Args[0] + v.reset(OpS390XFNEG) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpNeg8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neg8 x) + // cond: + // result: (NEGW (MOVBreg x)) + for { + x := v.Args[0] + v.reset(OpS390XNEGW) + v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpNeq16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq16 x y) + // cond: + // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDNE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpNeq32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq32 x y) + // cond: + // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDNE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpNeq32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq32F x y) + // cond: + // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDNE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpNeq64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq64 x y) + // cond: + // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDNE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpNeq64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq64F x y) + // cond: + // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDNE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpNeq8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq8 x y) + // cond: + // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDNE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpNeqB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NeqB x y) + // cond: + // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDNE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v3.AddArg(x) + v2.AddArg(v3) + v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v4.AddArg(y) + v2.AddArg(v4) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpNeqPtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NeqPtr x y) + // cond: + // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XMOVDNE) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v1.AuxInt = 1 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags) + v2.AddArg(x) + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpNilCheck(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NilCheck ptr mem) + // cond: + // result: (LoweredNilCheck ptr mem) + for { + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpS390XLoweredNilCheck) + v.AddArg(ptr) + v.AddArg(mem) + return true + } +} +func rewriteValueS390X_OpNot(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Not x) + // cond: + // result: (XORWconst [1] x) + for { + x := v.Args[0] + v.reset(OpS390XXORWconst) + v.AuxInt = 1 + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpOffPtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (OffPtr [off] ptr:(SP)) + // cond: + // result: (MOVDaddr [off] ptr) + for { + off := v.AuxInt + ptr := v.Args[0] + if ptr.Op != OpSP { + break + } + v.reset(OpS390XMOVDaddr) + v.AuxInt = off + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // cond: is32Bit(off) + // result: (ADDconst [off] ptr) + for { + off := v.AuxInt + ptr := v.Args[0] + if !(is32Bit(off)) { + break + } + v.reset(OpS390XADDconst) + v.AuxInt = off + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // cond: + // result: (ADD (MOVDconst [off]) ptr) + for { + off := v.AuxInt + ptr := v.Args[0] + v.reset(OpS390XADD) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = off + v.AddArg(v0) + v.AddArg(ptr) + return true + } +} +func rewriteValueS390X_OpOr16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Or16 x y) + // cond: + // result: (ORW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XORW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpOr32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Or32 x y) + // cond: + // result: (ORW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XORW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpOr64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Or64 x y) + // cond: + // result: (OR x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpOr8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Or8 x y) + // cond: + // result: (ORW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XORW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpOrB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (OrB x y) + // cond: + // result: (ORW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XORW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpRsh16Ux16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16Ux16 x y) + // cond: + // result: (ANDW (SRW (MOVHZreg x) y) (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [15]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSRW, t) + v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v3.AuxInt = 15 + v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpRsh16Ux32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16Ux32 x y) + // cond: + // result: (ANDW (SRW (MOVHZreg x) y) (SUBEWcarrymask (CMPWUconst y [15]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSRW, t) + v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v3.AuxInt = 15 + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpRsh16Ux64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16Ux64 x y) + // cond: + // result: (ANDW (SRW (MOVHZreg x) y) (SUBEWcarrymask (CMPUconst y [15]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSRW, t) + v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v3 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags) + v3.AuxInt = 15 + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpRsh16Ux8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16Ux8 x y) + // cond: + // result: (ANDW (SRW (MOVHZreg x) y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [15]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSRW, t) + v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v3.AuxInt = 15 + v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpRsh16x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16x16 x y) + // cond: + // result: (SRAW (MOVHreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [15]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAW) + v.Type = t + v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XORW, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type) + v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type) + v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v4.AuxInt = 15 + v5 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v5.AddArg(y) + v4.AddArg(v5) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh16x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16x32 x y) + // cond: + // result: (SRAW (MOVHreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst y [15]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAW) + v.Type = t + v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XORW, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type) + v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type) + v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v4.AuxInt = 15 + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh16x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16x64 x y) + // cond: + // result: (SRAW (MOVHreg x) (OR y (NOT (SUBEcarrymask (CMPUconst y [15]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAW) + v.Type = t + v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XOR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Line, OpS390XNOT, y.Type) + v3 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, y.Type) + v4 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags) + v4.AuxInt = 15 + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh16x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16x8 x y) + // cond: + // result: (SRAW (MOVHreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [15]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAW) + v.Type = t + v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XORW, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type) + v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type) + v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v4.AuxInt = 15 + v5 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v5.AddArg(y) + v4.AddArg(v5) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh32Ux16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32Ux16 x y) + // cond: + // result: (ANDW (SRW x y) (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSRW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 31 + v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh32Ux32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32Ux32 x y) + // cond: + // result: (ANDW (SRW x y) (SUBEWcarrymask (CMPWUconst y [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSRW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 31 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh32Ux64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32Ux64 x y) + // cond: + // result: (ANDW (SRW x y) (SUBEWcarrymask (CMPUconst y [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSRW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags) + v2.AuxInt = 31 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh32Ux8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32Ux8 x y) + // cond: + // result: (ANDW (SRW x y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSRW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 31 + v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh32x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32x16 x y) + // cond: + // result: (SRAW x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [31]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAW) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpS390XORW, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v3.AuxInt = 31 + v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpRsh32x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32x32 x y) + // cond: + // result: (SRAW x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst y [31]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAW) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpS390XORW, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v3.AuxInt = 31 + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpRsh32x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32x64 x y) + // cond: + // result: (SRAW x (OR y (NOT (SUBEcarrymask (CMPUconst y [31]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAW) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpS390XOR, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpS390XNOT, y.Type) + v2 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags) + v3.AuxInt = 31 + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpRsh32x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32x8 x y) + // cond: + // result: (SRAW x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAW) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpS390XORW, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v3.AuxInt = 31 + v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpRsh64Ux16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64Ux16 x y) + // cond: + // result: (AND (SRD x y) (SUBEcarrymask (CMPWUconst (MOVHZreg y) [63]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XAND) + v0 := b.NewValue0(v.Line, OpS390XSRD, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 63 + v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh64Ux32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64Ux32 x y) + // cond: + // result: (AND (SRD x y) (SUBEcarrymask (CMPWUconst y [63]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XAND) + v0 := b.NewValue0(v.Line, OpS390XSRD, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 63 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh64Ux64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64Ux64 x y) + // cond: + // result: (AND (SRD x y) (SUBEcarrymask (CMPUconst y [63]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XAND) + v0 := b.NewValue0(v.Line, OpS390XSRD, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags) + v2.AuxInt = 63 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh64Ux8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64Ux8 x y) + // cond: + // result: (AND (SRD x y) (SUBEcarrymask (CMPWUconst (MOVBZreg y) [63]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XAND) + v0 := b.NewValue0(v.Line, OpS390XSRD, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t) + v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v2.AuxInt = 63 + v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh64x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64x16 x y) + // cond: + // result: (SRAD x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [63]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAD) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpS390XORW, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v3.AuxInt = 63 + v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpRsh64x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64x32 x y) + // cond: + // result: (SRAD x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst y [63]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAD) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpS390XORW, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v3.AuxInt = 63 + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpRsh64x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64x64 x y) + // cond: + // result: (SRAD x (OR y (NOT (SUBEcarrymask (CMPUconst y [63]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAD) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpS390XOR, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpS390XNOT, y.Type) + v2 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags) + v3.AuxInt = 63 + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpRsh64x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64x8 x y) + // cond: + // result: (SRAD x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [63]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAD) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpS390XORW, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v3.AuxInt = 63 + v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpRsh8Ux16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8Ux16 x y) + // cond: + // result: (ANDW (SRW (MOVBZreg x) y) (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [7]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSRW, t) + v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v3.AuxInt = 7 + v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpRsh8Ux32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8Ux32 x y) + // cond: + // result: (ANDW (SRW (MOVBZreg x) y) (SUBEWcarrymask (CMPWUconst y [7]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSRW, t) + v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v3.AuxInt = 7 + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpRsh8Ux64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8Ux64 x y) + // cond: + // result: (ANDW (SRW (MOVBZreg x) y) (SUBEWcarrymask (CMPUconst y [7]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSRW, t) + v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v3 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags) + v3.AuxInt = 7 + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpRsh8Ux8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8Ux8 x y) + // cond: + // result: (ANDW (SRW (MOVBZreg x) y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [7]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XANDW) + v0 := b.NewValue0(v.Line, OpS390XSRW, t) + v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t) + v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v3.AuxInt = 7 + v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueS390X_OpRsh8x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8x16 x y) + // cond: + // result: (SRAW (MOVBreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVHZreg y) [7]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAW) + v.Type = t + v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XORW, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type) + v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type) + v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v4.AuxInt = 7 + v5 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v5.AddArg(y) + v4.AddArg(v5) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh8x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8x32 x y) + // cond: + // result: (SRAW (MOVBreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst y [7]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAW) + v.Type = t + v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XORW, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type) + v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type) + v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v4.AuxInt = 7 + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh8x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8x64 x y) + // cond: + // result: (SRAW (MOVBreg x) (OR y (NOT (SUBEcarrymask (CMPUconst y [7]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAW) + v.Type = t + v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XOR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Line, OpS390XNOT, y.Type) + v3 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, y.Type) + v4 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags) + v4.AuxInt = 7 + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpRsh8x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8x8 x y) + // cond: + // result: (SRAW (MOVBreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [7]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSRAW) + v.Type = t + v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XORW, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type) + v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type) + v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v4.AuxInt = 7 + v5 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64()) + v5.AddArg(y) + v4.AddArg(v5) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueS390X_OpS390XADD(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADD x (MOVDconst [c])) + // cond: is32Bit(c) + // result: (ADDconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpS390XADDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADD (MOVDconst [c]) x) + // cond: is32Bit(c) + // result: (ADDconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpS390XADDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADD x (MOVDaddr [c] {s} y)) + // cond: x.Op != OpSB && y.Op != OpSB + // result: (MOVDaddridx [c] {s} x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + c := v_1.AuxInt + s := v_1.Aux + y := v_1.Args[0] + if !(x.Op != OpSB && y.Op != OpSB) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADD (MOVDaddr [c] {s} x) y) + // cond: x.Op != OpSB && y.Op != OpSB + // result: (MOVDaddridx [c] {s} x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + c := v_0.AuxInt + s := v_0.Aux + x := v_0.Args[0] + y := v.Args[1] + if !(x.Op != OpSB && y.Op != OpSB) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADD x (NEG y)) + // cond: + // result: (SUB x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XNEG { + break + } + y := v_1.Args[0] + v.reset(OpS390XSUB) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XADDW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADDW x (MOVDconst [c])) + // cond: + // result: (ADDWconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XADDWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADDW (MOVDconst [c]) x) + // cond: + // result: (ADDWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpS390XADDWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADDW x (NEGW y)) + // cond: + // result: (SUBW x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XNEGW { + break + } + y := v_1.Args[0] + v.reset(OpS390XSUBW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XADDWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADDWconst [c] x) + // cond: int32(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int32(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ADDWconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [int64(int32(c+d))]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = int64(int32(c + d)) + return true + } + // match: (ADDWconst [c] (ADDWconst [d] x)) + // cond: + // result: (ADDWconst [int64(int32(c+d))] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XADDWconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpS390XADDWconst) + v.AuxInt = int64(int32(c + d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XADDconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADDconst [c] (MOVDaddr [d] {s} x)) + // cond: ((c+d)&1 == 0) && is32Bit(c+d) + // result: (MOVDaddr [c+d] {s} x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + d := v_0.AuxInt + s := v_0.Aux + x := v_0.Args[0] + if !(((c+d)&1 == 0) && is32Bit(c+d)) { + break + } + v.reset(OpS390XMOVDaddr) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + return true + } + // match: (ADDconst [c] (MOVDaddr [d] {s} x)) + // cond: x.Op != OpSB && is32Bit(c+d) + // result: (MOVDaddr [c+d] {s} x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + d := v_0.AuxInt + s := v_0.Aux + x := v_0.Args[0] + if !(x.Op != OpSB && is32Bit(c+d)) { + break + } + v.reset(OpS390XMOVDaddr) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + return true + } + // match: (ADDconst [c] (MOVDaddridx [d] {s} x y)) + // cond: is32Bit(c+d) + // result: (MOVDaddridx [c+d] {s} x y) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { + break + } + d := v_0.AuxInt + s := v_0.Aux + x := v_0.Args[0] + y := v_0.Args[1] + if !(is32Bit(c + d)) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDconst [0] x) + // cond: + // result: x + for { + if v.AuxInt != 0 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ADDconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [c+d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c + d + return true + } + // match: (ADDconst [c] (ADDconst [d] x)) + // cond: is32Bit(c+d) + // result: (ADDconst [c+d] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + if !(is32Bit(c + d)) { + break + } + v.reset(OpS390XADDconst) + v.AuxInt = c + d + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XAND(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (AND x (MOVDconst [c])) + // cond: is32Bit(c) + // result: (ANDconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpS390XANDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (AND (MOVDconst [c]) x) + // cond: is32Bit(c) + // result: (ANDconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpS390XANDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (AND x x) + // cond: + // result: x + for { + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XANDW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ANDW x (MOVDconst [c])) + // cond: + // result: (ANDWconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XANDWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDW (MOVDconst [c]) x) + // cond: + // result: (ANDWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpS390XANDWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDW x x) + // cond: + // result: x + for { + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XANDWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ANDWconst [c] (ANDWconst [d] x)) + // cond: + // result: (ANDWconst [c & d] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XANDWconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpS390XANDWconst) + v.AuxInt = c & d + v.AddArg(x) + return true + } + // match: (ANDWconst [c] _) + // cond: int32(c)==0 + // result: (MOVDconst [0]) + for { + c := v.AuxInt + if !(int32(c) == 0) { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = 0 + return true + } + // match: (ANDWconst [c] x) + // cond: int32(c)==-1 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int32(c) == -1) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ANDWconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [c&d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c & d + return true + } + return false +} +func rewriteValueS390X_OpS390XANDconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ANDconst [c] (ANDconst [d] x)) + // cond: + // result: (ANDconst [c & d] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XANDconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpS390XANDconst) + v.AuxInt = c & d + v.AddArg(x) + return true + } + // match: (ANDconst [0xFF] x) + // cond: + // result: (MOVBZreg x) + for { + if v.AuxInt != 0xFF { + break + } + x := v.Args[0] + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } + // match: (ANDconst [0xFFFF] x) + // cond: + // result: (MOVHZreg x) + for { + if v.AuxInt != 0xFFFF { + break + } + x := v.Args[0] + v.reset(OpS390XMOVHZreg) + v.AddArg(x) + return true + } + // match: (ANDconst [0xFFFFFFFF] x) + // cond: + // result: (MOVWZreg x) + for { + if v.AuxInt != 0xFFFFFFFF { + break + } + x := v.Args[0] + v.reset(OpS390XMOVWZreg) + v.AddArg(x) + return true + } + // match: (ANDconst [0] _) + // cond: + // result: (MOVDconst [0]) + for { + if v.AuxInt != 0 { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = 0 + return true + } + // match: (ANDconst [-1] x) + // cond: + // result: x + for { + if v.AuxInt != -1 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ANDconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [c&d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c & d + return true + } + return false +} +func rewriteValueS390X_OpS390XCMP(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMP x (MOVDconst [c])) + // cond: is32Bit(c) + // result: (CMPconst x [c]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpS390XCMPconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (CMP (MOVDconst [c]) x) + // cond: is32Bit(c) + // result: (InvertFlags (CMPconst x [c])) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Line, OpS390XCMPconst, TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XCMPU(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPU x (MOVDconst [c])) + // cond: is32Bit(c) + // result: (CMPUconst x [int64(uint32(c))]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpS390XCMPUconst) + v.AuxInt = int64(uint32(c)) + v.AddArg(x) + return true + } + // match: (CMPU (MOVDconst [c]) x) + // cond: is32Bit(c) + // result: (InvertFlags (CMPUconst x [int64(uint32(c))])) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags) + v0.AuxInt = int64(uint32(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XCMPUconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPUconst (MOVDconst [x]) [y]) + // cond: uint64(x)==uint64(y) + // result: (FlagEQ) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + x := v_0.AuxInt + if !(uint64(x) == uint64(y)) { + break + } + v.reset(OpS390XFlagEQ) + return true + } + // match: (CMPUconst (MOVDconst [x]) [y]) + // cond: uint64(x)uint64(y) + // result: (FlagGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + x := v_0.AuxInt + if !(uint64(x) > uint64(y)) { + break + } + v.reset(OpS390XFlagGT) + return true + } + return false +} +func rewriteValueS390X_OpS390XCMPW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPW x (MOVDconst [c])) + // cond: + // result: (CMPWconst x [c]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XCMPWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (CMPW (MOVDconst [c]) x) + // cond: + // result: (InvertFlags (CMPWconst x [c])) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Line, OpS390XCMPWconst, TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XCMPWU(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPWU x (MOVDconst [c])) + // cond: + // result: (CMPWUconst x [int64(uint32(c))]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XCMPWUconst) + v.AuxInt = int64(uint32(c)) + v.AddArg(x) + return true + } + // match: (CMPWU (MOVDconst [c]) x) + // cond: + // result: (InvertFlags (CMPWUconst x [int64(uint32(c))])) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags) + v0.AuxInt = int64(uint32(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XCMPWUconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPWUconst (MOVDconst [x]) [y]) + // cond: uint32(x)==uint32(y) + // result: (FlagEQ) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + x := v_0.AuxInt + if !(uint32(x) == uint32(y)) { + break + } + v.reset(OpS390XFlagEQ) + return true + } + // match: (CMPWUconst (MOVDconst [x]) [y]) + // cond: uint32(x)uint32(y) + // result: (FlagGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + x := v_0.AuxInt + if !(uint32(x) > uint32(y)) { + break + } + v.reset(OpS390XFlagGT) + return true + } + return false +} +func rewriteValueS390X_OpS390XCMPWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPWconst (MOVDconst [x]) [y]) + // cond: int32(x)==int32(y) + // result: (FlagEQ) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + x := v_0.AuxInt + if !(int32(x) == int32(y)) { + break + } + v.reset(OpS390XFlagEQ) + return true + } + // match: (CMPWconst (MOVDconst [x]) [y]) + // cond: int32(x)int32(y) + // result: (FlagGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + x := v_0.AuxInt + if !(int32(x) > int32(y)) { + break + } + v.reset(OpS390XFlagGT) + return true + } + // match: (CMPWconst (SRWconst _ [c]) [n]) + // cond: 0 <= n && 0 < c && c <= 32 && (1<y + // result: (FlagGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + x := v_0.AuxInt + if !(x > y) { + break + } + v.reset(OpS390XFlagGT) + return true + } + // match: (CMPconst (MOVBZreg _) [c]) + // cond: 0xFF < c + // result: (FlagLT) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVBZreg { + break + } + if !(0xFF < c) { + break + } + v.reset(OpS390XFlagLT) + return true + } + // match: (CMPconst (MOVHZreg _) [c]) + // cond: 0xFFFF < c + // result: (FlagLT) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVHZreg { + break + } + if !(0xFFFF < c) { + break + } + v.reset(OpS390XFlagLT) + return true + } + // match: (CMPconst (MOVWZreg _) [c]) + // cond: 0xFFFFFFFF < c + // result: (FlagLT) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVWZreg { + break + } + if !(0xFFFFFFFF < c) { + break + } + v.reset(OpS390XFlagLT) + return true + } + // match: (CMPconst (SRDconst _ [c]) [n]) + // cond: 0 <= n && 0 < c && c <= 64 && (1< [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZload { + break + } + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Line, OpS390XMOVBZload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVBZreg x:(MOVBZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBZloadidx [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZloadidx { + break + } + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Line, OpS390XMOVBZloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVBZreg (ANDWconst [c] x)) + // cond: + // result: (ANDconst [c & 0xff] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XANDWconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpS390XANDconst) + v.AuxInt = c & 0xff + v.AddArg(x) + return true + } + // match: (MOVBZreg (ANDconst [c] x)) + // cond: + // result: (ANDconst [c & 0xff] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpS390XANDconst) + v.AuxInt = c & 0xff + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVBload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVBload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVBreg(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBreg x:(MOVBload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZload { + break + } + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Line, OpS390XMOVBload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVBreg (ANDWconst [c] x)) + // cond: c & 0x80 == 0 + // result: (ANDconst [c & 0x7f] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XANDWconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x80 == 0) { + break + } + v.reset(OpS390XANDconst) + v.AuxInt = c & 0x7f + v.AddArg(x) + return true + } + // match: (MOVBreg (ANDconst [c] x)) + // cond: c & 0x80 == 0 + // result: (ANDconst [c & 0x7f] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x80 == 0) { + break + } + v.reset(OpS390XANDconst) + v.AuxInt = c & 0x7f + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVBstore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVBreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVBZreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVBstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpS390XMOVBstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) + // cond: validOff(off) && ptr.Op != OpSB + // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off) && ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVBstoreconst) + v.AuxInt = makeValAndOff(int64(int8(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVBstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVBstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVBstoreidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVBstoreidx [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { + break + } + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVBstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVHstore [i-1] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpS390XMOVBstore { + break + } + if x.AuxInt != i-1 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + x_1 := x.Args[1] + if x_1.Op != OpS390XSRDconst { + break + } + if x_1.AuxInt != 8 { + break + } + if w != x_1.Args[0] { + break + } + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpS390XMOVHstore) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVHstore [i-1] {s} p w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w0 := v.Args[1] + if w0.Op != OpS390XSRDconst { + break + } + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVBstore { + break + } + if x.AuxInt != i-1 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + x_1 := x.Args[1] + if x_1.Op != OpS390XSRDconst { + break + } + if x_1.AuxInt != j+8 { + break + } + if w != x_1.Args[0] { + break + } + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpS390XMOVHstore) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpS390XMOVBstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpS390XMOVBstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) + // cond: p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) + // result: (MOVHstoreconst [makeValAndOff(ValAndOff(c).Val()&0xff | ValAndOff(a).Val()<<8, ValAndOff(a).Off())] {s} p mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + x := v.Args[1] + if x.Op != OpS390XMOVBstoreconst { + break + } + a := x.AuxInt + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + mem := x.Args[1] + if !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = makeValAndOff(ValAndOff(c).Val()&0xff|ValAndOff(a).Val()<<8, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) + // cond: + // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVBstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) + // cond: + // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVBstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVHstoreidx [i-1] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != OpS390XMOVBstoreidx { + break + } + if x.AuxInt != i-1 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst { + break + } + if x_2.AuxInt != 8 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + w0 := v.Args[2] + if w0.Op != OpS390XSRDconst { + break + } + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVBstoreidx { + break + } + if x.AuxInt != i-1 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst { + break + } + if x_2.AuxInt != j+8 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDEQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVDEQ x y (InvertFlags cmp)) + // cond: + // result: (MOVDEQ x y cmp) + for { + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XInvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpS390XMOVDEQ) + v.AddArg(x) + v.AddArg(y) + v.AddArg(cmp) + return true + } + // match: (MOVDEQ _ x (FlagEQ)) + // cond: + // result: x + for { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagEQ { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVDEQ y _ (FlagLT)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagLT { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVDEQ y _ (FlagGT)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagGT { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDGE(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVDGE x y (InvertFlags cmp)) + // cond: + // result: (MOVDLE x y cmp) + for { + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XInvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpS390XMOVDLE) + v.AddArg(x) + v.AddArg(y) + v.AddArg(cmp) + return true + } + // match: (MOVDGE _ x (FlagEQ)) + // cond: + // result: x + for { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagEQ { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVDGE y _ (FlagLT)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagLT { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVDGE _ x (FlagGT)) + // cond: + // result: x + for { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagGT { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDGT(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVDGT x y (InvertFlags cmp)) + // cond: + // result: (MOVDLT x y cmp) + for { + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XInvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpS390XMOVDLT) + v.AddArg(x) + v.AddArg(y) + v.AddArg(cmp) + return true + } + // match: (MOVDGT y _ (FlagEQ)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagEQ { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVDGT y _ (FlagLT)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagLT { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVDGT _ x (FlagGT)) + // cond: + // result: x + for { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagGT { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDLE(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVDLE x y (InvertFlags cmp)) + // cond: + // result: (MOVDGE x y cmp) + for { + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XInvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpS390XMOVDGE) + v.AddArg(x) + v.AddArg(y) + v.AddArg(cmp) + return true + } + // match: (MOVDLE _ x (FlagEQ)) + // cond: + // result: x + for { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagEQ { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVDLE _ x (FlagLT)) + // cond: + // result: x + for { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagLT { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVDLE y _ (FlagGT)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagGT { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDLT(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVDLT x y (InvertFlags cmp)) + // cond: + // result: (MOVDGT x y cmp) + for { + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XInvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpS390XMOVDGT) + v.AddArg(x) + v.AddArg(y) + v.AddArg(cmp) + return true + } + // match: (MOVDLT y _ (FlagEQ)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagEQ { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVDLT _ x (FlagLT)) + // cond: + // result: x + for { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagLT { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVDLT y _ (FlagGT)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagGT { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDNE(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVDNE x y (InvertFlags cmp)) + // cond: + // result: (MOVDNE x y cmp) + for { + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XInvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpS390XMOVDNE) + v.AddArg(x) + v.AddArg(y) + v.AddArg(cmp) + return true + } + // match: (MOVDNE _ y (FlagEQ)) + // cond: + // result: y + for { + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagEQ { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVDNE x _ (FlagLT)) + // cond: + // result: x + for { + x := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagLT { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVDNE x _ (FlagGT)) + // cond: + // result: x + for { + x := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagGT { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDaddr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVDaddr [c] {s} (ADDconst [d] x)) + // cond: ((c+d)&1 == 0) && is32Bit(c+d) + // result: (MOVDaddr [c+d] {s} x) + for { + c := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + if !(((c+d)&1 == 0) && is32Bit(c+d)) { + break + } + v.reset(OpS390XMOVDaddr) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + return true + } + // match: (MOVDaddr [c] {s} (ADDconst [d] x)) + // cond: x.Op != OpSB && is32Bit(c+d) + // result: (MOVDaddr [c+d] {s} x) + for { + c := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + if !(x.Op != OpSB && is32Bit(c+d)) { + break + } + v.reset(OpS390XMOVDaddr) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + return true + } + // match: (MOVDaddr [c] {s} (ADD x y)) + // cond: x.Op != OpSB && y.Op != OpSB + // result: (MOVDaddridx [c] {s} x y) + for { + c := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if !(x.Op != OpSB && y.Op != OpSB) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (MOVDaddr [off1] {sym1} (MOVDaddr [off2] {sym2} x)) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVDaddr [off1+off2] {mergeSym(sym1,sym2)} x) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + x := v_0.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVDaddr) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + return true + } + // match: (MOVDaddr [off1] {sym1} (MOVDaddridx [off2] {sym2} x y)) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + x := v_0.Args[0] + y := v_0.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDaddridx(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVDaddridx [c] {s} (ADDconst [d] x) y) + // cond: is32Bit(c+d) && x.Op != OpSB + // result: (MOVDaddridx [c+d] {s} x y) + for { + c := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + y := v.Args[1] + if !(is32Bit(c+d) && x.Op != OpSB) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (MOVDaddridx [c] {s} x (ADDconst [d] y)) + // cond: is32Bit(c+d) && y.Op != OpSB + // result: (MOVDaddridx [c+d] {s} x y) + for { + c := v.AuxInt + s := v.Aux + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + y := v_1.Args[0] + if !(is32Bit(c+d) && y.Op != OpSB) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + x := v_0.Args[0] + y := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB + // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + y := v_1.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDstore { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVDload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpS390XMOVDload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVDload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVDloadidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVDload [off] {sym} (ADD ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVDloadidx [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { + break + } + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVDloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDloadidx(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) + // cond: + // result: (MOVDloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpS390XMOVDloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) + // cond: + // result: (MOVDloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVDloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDstore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVDstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpS390XMOVDstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) + // cond: validValAndOff(c,off) && int64(int16(c)) == c && ptr.Op != OpSB + // result: (MOVDstoreconst [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(validValAndOff(c, off) && int64(int16(c)) == c && ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVDstoreconst) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVDstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVDstoreidx [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { + break + } + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVDstore [i] {s} p w3 x2:(MOVDstore [i-8] {s} p w2 x1:(MOVDstore [i-16] {s} p w1 x0:(MOVDstore [i-24] {s} p w0 mem)))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && is20Bit(i-24) && clobber(x0) && clobber(x1) && clobber(x2) + // result: (STMG4 [i-24] {s} p w0 w1 w2 w3 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w3 := v.Args[1] + x2 := v.Args[2] + if x2.Op != OpS390XMOVDstore { + break + } + if x2.AuxInt != i-8 { + break + } + if x2.Aux != s { + break + } + if p != x2.Args[0] { + break + } + w2 := x2.Args[1] + x1 := x2.Args[2] + if x1.Op != OpS390XMOVDstore { + break + } + if x1.AuxInt != i-16 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + w1 := x1.Args[1] + x0 := x1.Args[2] + if x0.Op != OpS390XMOVDstore { + break + } + if x0.AuxInt != i-24 { + break + } + if x0.Aux != s { + break + } + if p != x0.Args[0] { + break + } + w0 := x0.Args[1] + mem := x0.Args[2] + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && is20Bit(i-24) && clobber(x0) && clobber(x1) && clobber(x2)) { + break + } + v.reset(OpS390XSTMG4) + v.AuxInt = i - 24 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(w1) + v.AddArg(w2) + v.AddArg(w3) + v.AddArg(mem) + return true + } + // match: (MOVDstore [i] {s} p w2 x1:(MOVDstore [i-8] {s} p w1 x0:(MOVDstore [i-16] {s} p w0 mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && is20Bit(i-16) && clobber(x0) && clobber(x1) + // result: (STMG3 [i-16] {s} p w0 w1 w2 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w2 := v.Args[1] + x1 := v.Args[2] + if x1.Op != OpS390XMOVDstore { + break + } + if x1.AuxInt != i-8 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + w1 := x1.Args[1] + x0 := x1.Args[2] + if x0.Op != OpS390XMOVDstore { + break + } + if x0.AuxInt != i-16 { + break + } + if x0.Aux != s { + break + } + if p != x0.Args[0] { + break + } + w0 := x0.Args[1] + mem := x0.Args[2] + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && is20Bit(i-16) && clobber(x0) && clobber(x1)) { + break + } + v.reset(OpS390XSTMG3) + v.AuxInt = i - 16 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(w1) + v.AddArg(w2) + v.AddArg(mem) + return true + } + // match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem)) + // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(i-8) && clobber(x) + // result: (STMG2 [i-8] {s} p w0 w1 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w1 := v.Args[1] + x := v.Args[2] + if x.Op != OpS390XMOVDstore { + break + } + if x.AuxInt != i-8 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + w0 := x.Args[1] + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && is20Bit(i-8) && clobber(x)) { + break + } + v.reset(OpS390XSTMG2) + v.AuxInt = i - 8 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(w1) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVDstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpS390XMOVDstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVDstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpS390XMOVDstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) + // cond: + // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) + // cond: + // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHZload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVHstore { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVHZload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpS390XMOVHZload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVHZload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVHZloadidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVHZload [off] {sym} (ADD ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVHZloadidx [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { + break + } + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVHZloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHZloadidx(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) + // cond: + // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpS390XMOVHZloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) + // cond: + // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVHZloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHZreg(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVHZreg x:(MOVBZload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHZreg x:(MOVHZload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVHZload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHZreg x:(MOVBZreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHZreg x:(MOVHZreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVHZreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHZreg x:(MOVHZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHZload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHZload { + break + } + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Line, OpS390XMOVHZload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVHZreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHZloadidx { + break + } + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Line, OpS390XMOVHZloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVHZreg (ANDWconst [c] x)) + // cond: + // result: (ANDconst [c & 0xffff] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XANDWconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpS390XANDconst) + v.AuxInt = c & 0xffff + v.AddArg(x) + return true + } + // match: (MOVHZreg (ANDconst [c] x)) + // cond: + // result: (ANDconst [c & 0xffff] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpS390XANDconst) + v.AuxInt = c & 0xffff + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVHload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHreg(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVHreg x:(MOVBload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBZload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVHload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBZreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVHreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHZload { + break + } + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Line, OpS390XMOVHload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVHreg (ANDWconst [c] x)) + // cond: c & 0x8000 == 0 + // result: (ANDconst [c & 0x7fff] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XANDWconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x8000 == 0) { + break + } + v.reset(OpS390XANDconst) + v.AuxInt = c & 0x7fff + v.AddArg(x) + return true + } + // match: (MOVHreg (ANDconst [c] x)) + // cond: c & 0x8000 == 0 + // result: (ANDconst [c & 0x7fff] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x8000 == 0) { + break + } + v.reset(OpS390XANDconst) + v.AuxInt = c & 0x7fff + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHstore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) + // cond: + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVHreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVHstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) + // cond: + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVHZreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVHstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVHstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpS390XMOVHstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) + // cond: validOff(off) && ptr.Op != OpSB + // result: (MOVHstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off) && ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = makeValAndOff(int64(int16(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVHstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVHstoreidx [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { + break + } + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVWstore [i-2] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpS390XMOVHstore { + break + } + if x.AuxInt != i-2 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + x_1 := x.Args[1] + if x_1.Op != OpS390XSRDconst { + break + } + if x_1.AuxInt != 16 { + break + } + if w != x_1.Args[0] { + break + } + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpS390XMOVWstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVWstore [i-2] {s} p w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w0 := v.Args[1] + if w0.Op != OpS390XSRDconst { + break + } + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVHstore { + break + } + if x.AuxInt != i-2 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + x_1 := x.Args[1] + if x_1.Op != OpS390XSRDconst { + break + } + if x_1.AuxInt != j+16 { + break + } + if w != x_1.Args[0] { + break + } + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpS390XMOVWstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVHstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVHstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem)) + // cond: p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) + // result: (MOVWstoreconst [makeValAndOff(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16, ValAndOff(a).Off())] {s} p mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + x := v.Args[1] + if x.Op != OpS390XMOVHstoreconst { + break + } + a := x.AuxInt + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + mem := x.Args[1] + if !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = makeValAndOff(ValAndOff(c).Val()&0xffff|ValAndOff(a).Val()<<16, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) + // cond: + // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) + // cond: + // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx [i-2] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != OpS390XMOVHstoreidx { + break + } + if x.AuxInt != i-2 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst { + break + } + if x_2.AuxInt != 16 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + w0 := v.Args[2] + if w0.Op != OpS390XSRDconst { + break + } + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVHstoreidx { + break + } + if x.AuxInt != i-2 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst { + break + } + if x_2.AuxInt != j+16 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWZload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVWstore { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVWZload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpS390XMOVWZload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVWZload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVWZloadidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWZload [off] {sym} (ADD ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVWZloadidx [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { + break + } + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVWZloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWZloadidx(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) + // cond: + // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpS390XMOVWZloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) + // cond: + // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVWZloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWZreg(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWZreg x:(MOVBZload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVHZload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVHZload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVWZload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVWZload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVBZreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVHZreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVHZreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVWZreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVWZreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVWZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWZload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWZload { + break + } + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Line, OpS390XMOVWZload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVWZreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWZloadidx { + break + } + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Line, OpS390XMOVWZloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVWZreg (ANDWconst [c] x)) + // cond: + // result: (ANDconst [c & 0xffffffff] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XANDWconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpS390XANDconst) + v.AuxInt = c & 0xffffffff + v.AddArg(x) + return true + } + // match: (MOVWZreg (ANDconst [c] x)) + // cond: + // result: (ANDconst [c & 0xffffffff] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpS390XANDconst) + v.AuxInt = c & 0xffffffff + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVWload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWreg(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWreg x:(MOVBload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBZload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVHload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHZload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVHZload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVWload { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBZreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVHreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVHreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWreg _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpS390XMOVWreg { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWZload { + break + } + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Line, OpS390XMOVWload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVWreg (ANDWconst [c] x)) + // cond: c & 0x80000000 == 0 + // result: (ANDconst [c & 0x7fffffff] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XANDWconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x80000000 == 0) { + break + } + v.reset(OpS390XANDconst) + v.AuxInt = c & 0x7fffffff + v.AddArg(x) + return true + } + // match: (MOVWreg (ANDconst [c] x)) + // cond: c & 0x80000000 == 0 + // result: (ANDconst [c & 0x7fffffff] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x80000000 == 0) { + break + } + v.reset(OpS390XANDconst) + v.AuxInt = c & 0x7fffffff + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWstore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) + // cond: + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVWreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVWstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) + // cond: + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVWZreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVWstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVWstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpS390XMOVWstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) + // cond: validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB + // result: (MOVWstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = makeValAndOff(int64(int32(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVWstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVWstoreidx [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { + break + } + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVDstore [i-4] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XSRDconst { + break + } + if v_1.AuxInt != 32 { + break + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVWstore { + break + } + if x.AuxInt != i-4 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if w != x.Args[1] { + break + } + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpS390XMOVDstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVDstore [i-4] {s} p w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w0 := v.Args[1] + if w0.Op != OpS390XSRDconst { + break + } + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVWstore { + break + } + if x.AuxInt != i-4 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + x_1 := x.Args[1] + if x_1.Op != OpS390XSRDconst { + break + } + if x_1.AuxInt != j+32 { + break + } + if w != x_1.Args[0] { + break + } + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpS390XMOVDstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) + return true + } + // match: (MOVWstore [i] {s} p w3 x2:(MOVWstore [i-4] {s} p w2 x1:(MOVWstore [i-8] {s} p w1 x0:(MOVWstore [i-12] {s} p w0 mem)))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && is20Bit(i-12) && clobber(x0) && clobber(x1) && clobber(x2) + // result: (STM4 [i-12] {s} p w0 w1 w2 w3 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w3 := v.Args[1] + x2 := v.Args[2] + if x2.Op != OpS390XMOVWstore { + break + } + if x2.AuxInt != i-4 { + break + } + if x2.Aux != s { + break + } + if p != x2.Args[0] { + break + } + w2 := x2.Args[1] + x1 := x2.Args[2] + if x1.Op != OpS390XMOVWstore { + break + } + if x1.AuxInt != i-8 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + w1 := x1.Args[1] + x0 := x1.Args[2] + if x0.Op != OpS390XMOVWstore { + break + } + if x0.AuxInt != i-12 { + break + } + if x0.Aux != s { + break + } + if p != x0.Args[0] { + break + } + w0 := x0.Args[1] + mem := x0.Args[2] + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && is20Bit(i-12) && clobber(x0) && clobber(x1) && clobber(x2)) { + break + } + v.reset(OpS390XSTM4) + v.AuxInt = i - 12 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(w1) + v.AddArg(w2) + v.AddArg(w3) + v.AddArg(mem) + return true + } + // match: (MOVWstore [i] {s} p w2 x1:(MOVWstore [i-4] {s} p w1 x0:(MOVWstore [i-8] {s} p w0 mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && is20Bit(i-8) && clobber(x0) && clobber(x1) + // result: (STM3 [i-8] {s} p w0 w1 w2 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w2 := v.Args[1] + x1 := v.Args[2] + if x1.Op != OpS390XMOVWstore { + break + } + if x1.AuxInt != i-4 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + w1 := x1.Args[1] + x0 := x1.Args[2] + if x0.Op != OpS390XMOVWstore { + break + } + if x0.AuxInt != i-8 { + break + } + if x0.Aux != s { + break + } + if p != x0.Args[0] { + break + } + w0 := x0.Args[1] + mem := x0.Args[2] + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && is20Bit(i-8) && clobber(x0) && clobber(x1)) { + break + } + v.reset(OpS390XSTM3) + v.AuxInt = i - 8 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(w1) + v.AddArg(w2) + v.AddArg(mem) + return true + } + // match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem)) + // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(i-4) && clobber(x) + // result: (STM2 [i-4] {s} p w0 w1 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w1 := v.Args[1] + x := v.Args[2] + if x.Op != OpS390XMOVWstore { + break + } + if x.AuxInt != i-4 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + w0 := x.Args[1] + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && is20Bit(i-4) && clobber(x)) { + break + } + v.reset(OpS390XSTM2) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(w1) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { + break + } + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) + // cond: p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) + // result: (MOVDstore [ValAndOff(a).Off()] {s} p (MOVDconst [ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32]) mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + x := v.Args[1] + if x.Op != OpS390XMOVWstoreconst { + break + } + a := x.AuxInt + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + mem := x.Args[1] + if !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpS390XMOVDstore) + v.AuxInt = ValAndOff(a).Off() + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64()) + v0.AuxInt = ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) + // cond: + // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) + // cond: + // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVDstoreidx [i-4] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != OpS390XMOVWstoreidx { + break + } + if x.AuxInt != i-4 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst { + break + } + if x_2.AuxInt != 32 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVDstoreidx [i-4] {s} p idx w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + w0 := v.Args[2] + if w0.Op != OpS390XSRDconst { + break + } + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVWstoreidx { + break + } + if x.AuxInt != i-4 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst { + break + } + if x_2.AuxInt != j+32 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMULLD(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MULLD x (MOVDconst [c])) + // cond: is32Bit(c) + // result: (MULLDconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpS390XMULLDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MULLD (MOVDconst [c]) x) + // cond: is32Bit(c) + // result: (MULLDconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpS390XMULLDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMULLDconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MULLDconst [-1] x) + // cond: + // result: (NEG x) + for { + if v.AuxInt != -1 { + break + } + x := v.Args[0] + v.reset(OpS390XNEG) + v.AddArg(x) + return true + } + // match: (MULLDconst [0] _) + // cond: + // result: (MOVDconst [0]) + for { + if v.AuxInt != 0 { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = 0 + return true + } + // match: (MULLDconst [1] x) + // cond: + // result: x + for { + if v.AuxInt != 1 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MULLDconst [c] x) + // cond: isPowerOfTwo(c) + // result: (SLDconst [log2(c)] x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpS390XSLDconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true + } + // match: (MULLDconst [c] x) + // cond: isPowerOfTwo(c+1) && c >= 15 + // result: (SUB (SLDconst [log2(c+1)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c+1) && c >= 15) { + break + } + v.reset(OpS390XSUB) + v0 := b.NewValue0(v.Line, OpS390XSLDconst, v.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLDconst [c] x) + // cond: isPowerOfTwo(c-1) && c >= 17 + // result: (ADD (SLDconst [log2(c-1)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c-1) && c >= 17) { + break + } + v.reset(OpS390XADD) + v0 := b.NewValue0(v.Line, OpS390XSLDconst, v.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLDconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [c*d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c * d + return true + } + return false +} +func rewriteValueS390X_OpS390XMULLW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MULLW x (MOVDconst [c])) + // cond: + // result: (MULLWconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XMULLWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MULLW (MOVDconst [c]) x) + // cond: + // result: (MULLWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpS390XMULLWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMULLWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MULLWconst [-1] x) + // cond: + // result: (NEGW x) + for { + if v.AuxInt != -1 { + break + } + x := v.Args[0] + v.reset(OpS390XNEGW) + v.AddArg(x) + return true + } + // match: (MULLWconst [0] _) + // cond: + // result: (MOVDconst [0]) + for { + if v.AuxInt != 0 { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = 0 + return true + } + // match: (MULLWconst [1] x) + // cond: + // result: x + for { + if v.AuxInt != 1 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MULLWconst [c] x) + // cond: isPowerOfTwo(c) + // result: (SLWconst [log2(c)] x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpS390XSLWconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true + } + // match: (MULLWconst [c] x) + // cond: isPowerOfTwo(c+1) && c >= 15 + // result: (SUBW (SLWconst [log2(c+1)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c+1) && c >= 15) { + break + } + v.reset(OpS390XSUBW) + v0 := b.NewValue0(v.Line, OpS390XSLWconst, v.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLWconst [c] x) + // cond: isPowerOfTwo(c-1) && c >= 17 + // result: (ADDW (SLWconst [log2(c-1)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c-1) && c >= 17) { + break + } + v.reset(OpS390XADDW) + v0 := b.NewValue0(v.Line, OpS390XSLWconst, v.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLWconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [int64(int32(c*d))]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = int64(int32(c * d)) + return true + } + return false +} +func rewriteValueS390X_OpS390XNEG(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NEG (MOVDconst [c])) + // cond: + // result: (MOVDconst [-c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = -c + return true + } + return false +} +func rewriteValueS390X_OpS390XNEGW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NEGW (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(int32(-c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = int64(int32(-c)) + return true + } + return false +} +func rewriteValueS390X_OpS390XNOT(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NOT x) + // cond: true + // result: (XORconst [-1] x) + for { + x := v.Args[0] + if !(true) { + break + } + v.reset(OpS390XXORconst) + v.AuxInt = -1 + v.AddArg(x) + return true + } + // match: (NOT (MOVDconst [c])) + // cond: + // result: (MOVDconst [^c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = ^c + return true + } + return false +} +func rewriteValueS390X_OpS390XNOTW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NOTW x) + // cond: true + // result: (XORWconst [-1] x) + for { + x := v.Args[0] + if !(true) { + break + } + v.reset(OpS390XXORWconst) + v.AuxInt = -1 + v.AddArg(x) + return true + } + // match: (NOTW (MOVDconst [c])) + // cond: + // result: (MOVDconst [^c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = ^c + return true + } + return false +} +func rewriteValueS390X_OpS390XOR(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (OR x (MOVDconst [c])) + // cond: is32Bit(c) + // result: (ORconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpS390XORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (OR (MOVDconst [c]) x) + // cond: is32Bit(c) + // result: (ORconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpS390XORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (OR x x) + // cond: + // result: x + for { + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR x0:(MOVBZload [i] {s} p mem) s0:(SLDconst [8] x1:(MOVBZload [i+1] {s} p mem))) s1:(SLDconst [16] x2:(MOVBZload [i+2] {s} p mem))) s2:(SLDconst [24] x3:(MOVBZload [i+3] {s} p mem))) s3:(SLDconst [32] x4:(MOVBZload [i+4] {s} p mem))) s4:(SLDconst [40] x5:(MOVBZload [i+5] {s} p mem))) s5:(SLDconst [48] x6:(MOVBZload [i+6] {s} p mem))) s6:(SLDconst [56] x7:(MOVBZload [i+7] {s} p mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRload [i] {s} p mem) + for { + o0 := v.Args[0] + if o0.Op != OpS390XOR { + break + } + o1 := o0.Args[0] + if o1.Op != OpS390XOR { + break + } + o2 := o1.Args[0] + if o2.Op != OpS390XOR { + break + } + o3 := o2.Args[0] + if o3.Op != OpS390XOR { + break + } + o4 := o3.Args[0] + if o4.Op != OpS390XOR { + break + } + o5 := o4.Args[0] + if o5.Op != OpS390XOR { + break + } + x0 := o5.Args[0] + if x0.Op != OpS390XMOVBZload { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := o5.Args[1] + if s0.Op != OpS390XSLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZload { + break + } + if x1.AuxInt != i+1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o4.Args[1] + if s1.Op != OpS390XSLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZload { + break + } + if x2.AuxInt != i+2 { + break + } + if x2.Aux != s { + break + } + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o3.Args[1] + if s2.Op != OpS390XSLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpS390XMOVBZload { + break + } + if x3.AuxInt != i+3 { + break + } + if x3.Aux != s { + break + } + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o2.Args[1] + if s3.Op != OpS390XSLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpS390XMOVBZload { + break + } + if x4.AuxInt != i+4 { + break + } + if x4.Aux != s { + break + } + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o1.Args[1] + if s4.Op != OpS390XSLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpS390XMOVBZload { + break + } + if x5.AuxInt != i+5 { + break + } + if x5.Aux != s { + break + } + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o0.Args[1] + if s5.Op != OpS390XSLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpS390XMOVBZload { + break + } + if x6.AuxInt != i+6 { + break + } + if x6.Aux != s { + break + } + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpS390XSLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpS390XMOVBZload { + break + } + if x7.AuxInt != i+7 { + break + } + if x7.Aux != s { + break + } + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Line, OpS390XMOVDBRload, config.fe.TypeUInt64()) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLDconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem))) s1:(SLDconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem))) s2:(SLDconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem))) s3:(SLDconst [32] x4:(MOVBZloadidx [i+4] {s} p idx mem))) s4:(SLDconst [40] x5:(MOVBZloadidx [i+5] {s} p idx mem))) s5:(SLDconst [48] x6:(MOVBZloadidx [i+6] {s} p idx mem))) s6:(SLDconst [56] x7:(MOVBZloadidx [i+7] {s} p idx mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRloadidx [i] {s} p idx mem) + for { + o0 := v.Args[0] + if o0.Op != OpS390XOR { + break + } + o1 := o0.Args[0] + if o1.Op != OpS390XOR { + break + } + o2 := o1.Args[0] + if o2.Op != OpS390XOR { + break + } + o3 := o2.Args[0] + if o3.Op != OpS390XOR { + break + } + o4 := o3.Args[0] + if o4.Op != OpS390XOR { + break + } + o5 := o4.Args[0] + if o5.Op != OpS390XOR { + break + } + x0 := o5.Args[0] + if x0.Op != OpS390XMOVBZloadidx { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := o5.Args[1] + if s0.Op != OpS390XSLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZloadidx { + break + } + if x1.AuxInt != i+1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if idx != x1.Args[1] { + break + } + if mem != x1.Args[2] { + break + } + s1 := o4.Args[1] + if s1.Op != OpS390XSLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZloadidx { + break + } + if x2.AuxInt != i+2 { + break + } + if x2.Aux != s { + break + } + if p != x2.Args[0] { + break + } + if idx != x2.Args[1] { + break + } + if mem != x2.Args[2] { + break + } + s2 := o3.Args[1] + if s2.Op != OpS390XSLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpS390XMOVBZloadidx { + break + } + if x3.AuxInt != i+3 { + break + } + if x3.Aux != s { + break + } + if p != x3.Args[0] { + break + } + if idx != x3.Args[1] { + break + } + if mem != x3.Args[2] { + break + } + s3 := o2.Args[1] + if s3.Op != OpS390XSLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpS390XMOVBZloadidx { + break + } + if x4.AuxInt != i+4 { + break + } + if x4.Aux != s { + break + } + if p != x4.Args[0] { + break + } + if idx != x4.Args[1] { + break + } + if mem != x4.Args[2] { + break + } + s4 := o1.Args[1] + if s4.Op != OpS390XSLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpS390XMOVBZloadidx { + break + } + if x5.AuxInt != i+5 { + break + } + if x5.Aux != s { + break + } + if p != x5.Args[0] { + break + } + if idx != x5.Args[1] { + break + } + if mem != x5.Args[2] { + break + } + s5 := o0.Args[1] + if s5.Op != OpS390XSLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpS390XMOVBZloadidx { + break + } + if x6.AuxInt != i+6 { + break + } + if x6.Aux != s { + break + } + if p != x6.Args[0] { + break + } + if idx != x6.Args[1] { + break + } + if mem != x6.Args[2] { + break + } + s6 := v.Args[1] + if s6.Op != OpS390XSLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpS390XMOVBZloadidx { + break + } + if x7.AuxInt != i+7 { + break + } + if x7.Aux != s { + break + } + if p != x7.Args[0] { + break + } + if idx != x7.Args[1] { + break + } + if mem != x7.Args[2] { + break + } + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Line, OpS390XMOVDBRloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR x0:(MOVBZload [i] {s} p mem) s0:(SLDconst [8] x1:(MOVBZload [i-1] {s} p mem))) s1:(SLDconst [16] x2:(MOVBZload [i-2] {s} p mem))) s2:(SLDconst [24] x3:(MOVBZload [i-3] {s} p mem))) s3:(SLDconst [32] x4:(MOVBZload [i-4] {s} p mem))) s4:(SLDconst [40] x5:(MOVBZload [i-5] {s} p mem))) s5:(SLDconst [48] x6:(MOVBZload [i-6] {s} p mem))) s6:(SLDconst [56] x7:(MOVBZload [i-7] {s} p mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload [i-7] {s} p mem) + for { + o0 := v.Args[0] + if o0.Op != OpS390XOR { + break + } + o1 := o0.Args[0] + if o1.Op != OpS390XOR { + break + } + o2 := o1.Args[0] + if o2.Op != OpS390XOR { + break + } + o3 := o2.Args[0] + if o3.Op != OpS390XOR { + break + } + o4 := o3.Args[0] + if o4.Op != OpS390XOR { + break + } + o5 := o4.Args[0] + if o5.Op != OpS390XOR { + break + } + x0 := o5.Args[0] + if x0.Op != OpS390XMOVBZload { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := o5.Args[1] + if s0.Op != OpS390XSLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZload { + break + } + if x1.AuxInt != i-1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o4.Args[1] + if s1.Op != OpS390XSLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZload { + break + } + if x2.AuxInt != i-2 { + break + } + if x2.Aux != s { + break + } + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o3.Args[1] + if s2.Op != OpS390XSLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpS390XMOVBZload { + break + } + if x3.AuxInt != i-3 { + break + } + if x3.Aux != s { + break + } + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o2.Args[1] + if s3.Op != OpS390XSLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpS390XMOVBZload { + break + } + if x4.AuxInt != i-4 { + break + } + if x4.Aux != s { + break + } + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o1.Args[1] + if s4.Op != OpS390XSLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpS390XMOVBZload { + break + } + if x5.AuxInt != i-5 { + break + } + if x5.Aux != s { + break + } + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o0.Args[1] + if s5.Op != OpS390XSLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpS390XMOVBZload { + break + } + if x6.AuxInt != i-6 { + break + } + if x6.Aux != s { + break + } + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpS390XSLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpS390XMOVBZload { + break + } + if x7.AuxInt != i-7 { + break + } + if x7.Aux != s { + break + } + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64()) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i - 7 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLDconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem))) s1:(SLDconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem))) s2:(SLDconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem))) s3:(SLDconst [32] x4:(MOVBZloadidx [i-4] {s} p idx mem))) s4:(SLDconst [40] x5:(MOVBZloadidx [i-5] {s} p idx mem))) s5:(SLDconst [48] x6:(MOVBZloadidx [i-6] {s} p idx mem))) s6:(SLDconst [56] x7:(MOVBZloadidx [i-7] {s} p idx mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx [i-7] {s} p idx mem) + for { + o0 := v.Args[0] + if o0.Op != OpS390XOR { + break + } + o1 := o0.Args[0] + if o1.Op != OpS390XOR { + break + } + o2 := o1.Args[0] + if o2.Op != OpS390XOR { + break + } + o3 := o2.Args[0] + if o3.Op != OpS390XOR { + break + } + o4 := o3.Args[0] + if o4.Op != OpS390XOR { + break + } + o5 := o4.Args[0] + if o5.Op != OpS390XOR { + break + } + x0 := o5.Args[0] + if x0.Op != OpS390XMOVBZloadidx { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := o5.Args[1] + if s0.Op != OpS390XSLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZloadidx { + break + } + if x1.AuxInt != i-1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if idx != x1.Args[1] { + break + } + if mem != x1.Args[2] { + break + } + s1 := o4.Args[1] + if s1.Op != OpS390XSLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZloadidx { + break + } + if x2.AuxInt != i-2 { + break + } + if x2.Aux != s { + break + } + if p != x2.Args[0] { + break + } + if idx != x2.Args[1] { + break + } + if mem != x2.Args[2] { + break + } + s2 := o3.Args[1] + if s2.Op != OpS390XSLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpS390XMOVBZloadidx { + break + } + if x3.AuxInt != i-3 { + break + } + if x3.Aux != s { + break + } + if p != x3.Args[0] { + break + } + if idx != x3.Args[1] { + break + } + if mem != x3.Args[2] { + break + } + s3 := o2.Args[1] + if s3.Op != OpS390XSLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpS390XMOVBZloadidx { + break + } + if x4.AuxInt != i-4 { + break + } + if x4.Aux != s { + break + } + if p != x4.Args[0] { + break + } + if idx != x4.Args[1] { + break + } + if mem != x4.Args[2] { + break + } + s4 := o1.Args[1] + if s4.Op != OpS390XSLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpS390XMOVBZloadidx { + break + } + if x5.AuxInt != i-5 { + break + } + if x5.Aux != s { + break + } + if p != x5.Args[0] { + break + } + if idx != x5.Args[1] { + break + } + if mem != x5.Args[2] { + break + } + s5 := o0.Args[1] + if s5.Op != OpS390XSLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpS390XMOVBZloadidx { + break + } + if x6.AuxInt != i-6 { + break + } + if x6.Aux != s { + break + } + if p != x6.Args[0] { + break + } + if idx != x6.Args[1] { + break + } + if mem != x6.Args[2] { + break + } + s6 := v.Args[1] + if s6.Op != OpS390XSLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpS390XMOVBZloadidx { + break + } + if x7.AuxInt != i-7 { + break + } + if x7.Aux != s { + break + } + if p != x7.Args[0] { + break + } + if idx != x7.Args[1] { + break + } + if mem != x7.Args[2] { + break + } + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Line, OpS390XMOVDloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i - 7 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XORW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ORW x (MOVDconst [c])) + // cond: + // result: (ORWconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XORWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ORW (MOVDconst [c]) x) + // cond: + // result: (ORWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpS390XORWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ORW x x) + // cond: + // result: x + for { + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ORW x0:(MOVBZload [i] {s} p mem) s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i] {s} p mem)) + for { + x0 := v.Args[0] + if x0.Op != OpS390XMOVBZload { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := v.Args[1] + if s0.Op != OpS390XSLWconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZload { + break + } + if x1.AuxInt != i+1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { + break + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVHBRload, config.fe.TypeUInt16()) + v1.AuxInt = i + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + // match: (ORW o0:(ORW o1:(ORW x0:(MOVBZload [i] {s} p mem) s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem))) s1:(SLWconst [16] x2:(MOVBZload [i+2] {s} p mem))) s2:(SLWconst [24] x3:(MOVBZload [i+3] {s} p mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) + // result: @mergePoint(b,x0,x1,x2,x3) (MOVWZreg (MOVWBRload [i] {s} p mem)) + for { + o0 := v.Args[0] + if o0.Op != OpS390XORW { + break + } + o1 := o0.Args[0] + if o1.Op != OpS390XORW { + break + } + x0 := o1.Args[0] + if x0.Op != OpS390XMOVBZload { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := o1.Args[1] + if s0.Op != OpS390XSLWconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZload { + break + } + if x1.AuxInt != i+1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o0.Args[1] + if s1.Op != OpS390XSLWconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZload { + break + } + if x2.AuxInt != i+2 { + break + } + if x2.Aux != s { + break + } + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := v.Args[1] + if s2.Op != OpS390XSLWconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpS390XMOVBZload { + break + } + if x3.AuxInt != i+3 { + break + } + if x3.Aux != s { + break + } + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { + break + } + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64()) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVWBRload, config.fe.TypeUInt32()) + v1.AuxInt = i + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + // match: (ORW x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i] {s} p idx mem)) + for { + x0 := v.Args[0] + if x0.Op != OpS390XMOVBZloadidx { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := v.Args[1] + if s0.Op != OpS390XSLWconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZloadidx { + break + } + if x1.AuxInt != i+1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if idx != x1.Args[1] { + break + } + if mem != x1.Args[2] { + break + } + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { + break + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64()) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVHBRloadidx, v.Type) + v1.AuxInt = i + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + // match: (ORW o0:(ORW o1:(ORW x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem))) s1:(SLWconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem))) s2:(SLWconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) + // result: @mergePoint(b,x0,x1,x2,x3) (MOVWZreg (MOVWBRloadidx [i] {s} p idx mem)) + for { + o0 := v.Args[0] + if o0.Op != OpS390XORW { + break + } + o1 := o0.Args[0] + if o1.Op != OpS390XORW { + break + } + x0 := o1.Args[0] + if x0.Op != OpS390XMOVBZloadidx { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := o1.Args[1] + if s0.Op != OpS390XSLWconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZloadidx { + break + } + if x1.AuxInt != i+1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if idx != x1.Args[1] { + break + } + if mem != x1.Args[2] { + break + } + s1 := o0.Args[1] + if s1.Op != OpS390XSLWconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZloadidx { + break + } + if x2.AuxInt != i+2 { + break + } + if x2.Aux != s { + break + } + if p != x2.Args[0] { + break + } + if idx != x2.Args[1] { + break + } + if mem != x2.Args[2] { + break + } + s2 := v.Args[1] + if s2.Op != OpS390XSLWconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpS390XMOVBZloadidx { + break + } + if x3.AuxInt != i+3 { + break + } + if x3.Aux != s { + break + } + if p != x3.Args[0] { + break + } + if idx != x3.Args[1] { + break + } + if mem != x3.Args[2] { + break + } + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { + break + } + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64()) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpS390XMOVWBRloadidx, v.Type) + v1.AuxInt = i + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + // match: (ORW x0:(MOVBZload [i] {s} p mem) s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVHZload [i-1] {s} p mem) + for { + x0 := v.Args[0] + if x0.Op != OpS390XMOVBZload { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := v.Args[1] + if s0.Op != OpS390XSLWconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZload { + break + } + if x1.AuxInt != i-1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { + break + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16()) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i - 1 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (ORW o0:(ORW o1:(ORW x0:(MOVBZload [i] {s} p mem) s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem))) s1:(SLWconst [16] x2:(MOVBZload [i-2] {s} p mem))) s2:(SLWconst [24] x3:(MOVBZload [i-3] {s} p mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) + // result: @mergePoint(b,x0,x1,x2,x3) (MOVWZload [i-3] {s} p mem) + for { + o0 := v.Args[0] + if o0.Op != OpS390XORW { + break + } + o1 := o0.Args[0] + if o1.Op != OpS390XORW { + break + } + x0 := o1.Args[0] + if x0.Op != OpS390XMOVBZload { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := o1.Args[1] + if s0.Op != OpS390XSLWconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZload { + break + } + if x1.AuxInt != i-1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o0.Args[1] + if s1.Op != OpS390XSLWconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZload { + break + } + if x2.AuxInt != i-2 { + break + } + if x2.Aux != s { + break + } + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := v.Args[1] + if s2.Op != OpS390XSLWconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpS390XMOVBZload { + break + } + if x3.AuxInt != i-3 { + break + } + if x3.Aux != s { + break + } + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { + break + } + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32()) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i - 3 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (ORW x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i-1] {s} p idx mem) + for { + x0 := v.Args[0] + if x0.Op != OpS390XMOVBZloadidx { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := v.Args[1] + if s0.Op != OpS390XSLWconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZloadidx { + break + } + if x1.AuxInt != i-1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if idx != x1.Args[1] { + break + } + if mem != x1.Args[2] { + break + } + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { + break + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Line, OpS390XMOVHZloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i - 1 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (ORW o0:(ORW o1:(ORW x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem))) s1:(SLWconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem))) s2:(SLWconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) + // result: @mergePoint(b,x0,x1,x2,x3) (MOVWZloadidx [i-3] {s} p idx mem) + for { + o0 := v.Args[0] + if o0.Op != OpS390XORW { + break + } + o1 := o0.Args[0] + if o1.Op != OpS390XORW { + break + } + x0 := o1.Args[0] + if x0.Op != OpS390XMOVBZloadidx { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := o1.Args[1] + if s0.Op != OpS390XSLWconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZloadidx { + break + } + if x1.AuxInt != i-1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if idx != x1.Args[1] { + break + } + if mem != x1.Args[2] { + break + } + s1 := o0.Args[1] + if s1.Op != OpS390XSLWconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZloadidx { + break + } + if x2.AuxInt != i-2 { + break + } + if x2.Aux != s { + break + } + if p != x2.Args[0] { + break + } + if idx != x2.Args[1] { + break + } + if mem != x2.Args[2] { + break + } + s2 := v.Args[1] + if s2.Op != OpS390XSLWconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpS390XMOVBZloadidx { + break + } + if x3.AuxInt != i-3 { + break + } + if x3.Aux != s { + break + } + if p != x3.Args[0] { + break + } + if idx != x3.Args[1] { + break + } + if mem != x3.Args[2] { + break + } + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { + break + } + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(v.Line, OpS390XMOVWZloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i - 3 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XORWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ORWconst [c] x) + // cond: int32(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int32(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ORWconst [c] _) + // cond: int32(c)==-1 + // result: (MOVDconst [-1]) + for { + c := v.AuxInt + if !(int32(c) == -1) { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = -1 + return true + } + // match: (ORWconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [c|d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c | d + return true + } + return false +} +func rewriteValueS390X_OpS390XORconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ORconst [0] x) + // cond: + // result: x + for { + if v.AuxInt != 0 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ORconst [-1] _) + // cond: + // result: (MOVDconst [-1]) + for { + if v.AuxInt != -1 { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = -1 + return true + } + // match: (ORconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [c|d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c | d + return true + } + return false +} +func rewriteValueS390X_OpS390XSLD(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SLD x (MOVDconst [c])) + // cond: + // result: (SLDconst [c&63] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XSLDconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SLD x (ANDconst [63] y)) + // cond: + // result: (SLD x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XANDconst { + break + } + if v_1.AuxInt != 63 { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLD) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XSLW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SLW x (MOVDconst [c])) + // cond: + // result: (SLWconst [c&63] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XSLWconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SLW x (ANDWconst [63] y)) + // cond: + // result: (SLW x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XANDWconst { + break + } + if v_1.AuxInt != 63 { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRAD(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SRAD x (MOVDconst [c])) + // cond: + // result: (SRADconst [c&63] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XSRADconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SRAD x (ANDconst [63] y)) + // cond: + // result: (SRAD x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XANDconst { + break + } + if v_1.AuxInt != 63 { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAD) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRADconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SRADconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [d>>uint64(c)]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = d >> uint64(c) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRAW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SRAW x (MOVDconst [c])) + // cond: + // result: (SRAWconst [c&63] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XSRAWconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SRAW x (ANDWconst [63] y)) + // cond: + // result: (SRAW x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XANDWconst { + break + } + if v_1.AuxInt != 63 { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRAWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SRAWconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [d>>uint64(c)]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = d >> uint64(c) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRD(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SRD x (MOVDconst [c])) + // cond: + // result: (SRDconst [c&63] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XSRDconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SRD x (ANDconst [63] y)) + // cond: + // result: (SRD x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XANDconst { + break + } + if v_1.AuxInt != 63 { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRD) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SRW x (MOVDconst [c])) + // cond: + // result: (SRWconst [c&63] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XSRWconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SRW x (ANDWconst [63] y)) + // cond: + // result: (SRW x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XANDWconst { + break + } + if v_1.AuxInt != 63 { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XSUB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUB x (MOVDconst [c])) + // cond: is32Bit(c) + // result: (SUBconst x [c]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpS390XSUBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (SUB (MOVDconst [c]) x) + // cond: is32Bit(c) + // result: (NEG (SUBconst x [c])) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpS390XNEG) + v0 := b.NewValue0(v.Line, OpS390XSUBconst, v.Type) + v0.AuxInt = c + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SUB x x) + // cond: + // result: (MOVDconst [0]) + for { + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueS390X_OpS390XSUBEWcarrymask(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBEWcarrymask (FlagEQ)) + // cond: + // result: (MOVDconst [-1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XFlagEQ { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = -1 + return true + } + // match: (SUBEWcarrymask (FlagLT)) + // cond: + // result: (MOVDconst [-1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XFlagLT { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = -1 + return true + } + // match: (SUBEWcarrymask (FlagGT)) + // cond: + // result: (MOVDconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XFlagGT { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueS390X_OpS390XSUBEcarrymask(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBEcarrymask (FlagEQ)) + // cond: + // result: (MOVDconst [-1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XFlagEQ { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = -1 + return true + } + // match: (SUBEcarrymask (FlagLT)) + // cond: + // result: (MOVDconst [-1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XFlagLT { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = -1 + return true + } + // match: (SUBEcarrymask (FlagGT)) + // cond: + // result: (MOVDconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XFlagGT { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueS390X_OpS390XSUBW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBW x (MOVDconst [c])) + // cond: + // result: (SUBWconst x [c]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XSUBWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (SUBW (MOVDconst [c]) x) + // cond: + // result: (NEGW (SUBWconst x [c])) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpS390XNEGW) + v0 := b.NewValue0(v.Line, OpS390XSUBWconst, v.Type) + v0.AuxInt = c + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SUBW x x) + // cond: + // result: (MOVDconst [0]) + for { + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueS390X_OpS390XSUBWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBWconst [c] x) + // cond: int32(c) == 0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int32(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (SUBWconst [c] x) + // cond: + // result: (ADDWconst [int64(int32(-c))] x) + for { + c := v.AuxInt + x := v.Args[0] + v.reset(OpS390XADDWconst) + v.AuxInt = int64(int32(-c)) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpS390XSUBconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBconst [0] x) + // cond: + // result: x + for { + if v.AuxInt != 0 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (SUBconst [c] x) + // cond: c != -(1<<31) + // result: (ADDconst [-c] x) + for { + c := v.AuxInt + x := v.Args[0] + if !(c != -(1 << 31)) { + break + } + v.reset(OpS390XADDconst) + v.AuxInt = -c + v.AddArg(x) + return true + } + // match: (SUBconst (MOVDconst [d]) [c]) + // cond: + // result: (MOVDconst [d-c]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = d - c + return true + } + // match: (SUBconst (SUBconst x [d]) [c]) + // cond: is32Bit(-c-d) + // result: (ADDconst [-c-d] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XSUBconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + if !(is32Bit(-c - d)) { + break + } + v.reset(OpS390XADDconst) + v.AuxInt = -c - d + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XXOR(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XOR x (MOVDconst [c])) + // cond: is32Bit(c) + // result: (XORconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpS390XXORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XOR (MOVDconst [c]) x) + // cond: is32Bit(c) + // result: (XORconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpS390XXORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XOR x x) + // cond: + // result: (MOVDconst [0]) + for { + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueS390X_OpS390XXORW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XORW x (MOVDconst [c])) + // cond: + // result: (XORWconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XXORWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XORW (MOVDconst [c]) x) + // cond: + // result: (XORWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpS390XXORWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XORW x x) + // cond: + // result: (MOVDconst [0]) + for { + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueS390X_OpS390XXORWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XORWconst [c] x) + // cond: int32(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int32(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (XORWconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [c^d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c ^ d + return true + } + return false +} +func rewriteValueS390X_OpS390XXORconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XORconst [0] x) + // cond: + // result: x + for { + if v.AuxInt != 0 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (XORconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [c^d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c ^ d + return true + } + return false +} +func rewriteValueS390X_OpSignExt16to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt16to32 x) + // cond: + // result: (MOVHreg x) + for { + x := v.Args[0] + v.reset(OpS390XMOVHreg) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpSignExt16to64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt16to64 x) + // cond: + // result: (MOVHreg x) + for { + x := v.Args[0] + v.reset(OpS390XMOVHreg) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpSignExt32to64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt32to64 x) + // cond: + // result: (MOVWreg x) + for { + x := v.Args[0] + v.reset(OpS390XMOVWreg) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpSignExt8to16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt8to16 x) + // cond: + // result: (MOVBreg x) + for { + x := v.Args[0] + v.reset(OpS390XMOVBreg) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpSignExt8to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt8to32 x) + // cond: + // result: (MOVBreg x) + for { + x := v.Args[0] + v.reset(OpS390XMOVBreg) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpSignExt8to64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt8to64 x) + // cond: + // result: (MOVBreg x) + for { + x := v.Args[0] + v.reset(OpS390XMOVBreg) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpSqrt(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sqrt x) + // cond: + // result: (FSQRT x) + for { + x := v.Args[0] + v.reset(OpS390XFSQRT) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpStaticCall(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (StaticCall [argwid] {target} mem) + // cond: + // result: (CALLstatic [argwid] {target} mem) + for { + argwid := v.AuxInt + target := v.Aux + mem := v.Args[0] + v.reset(OpS390XCALLstatic) + v.AuxInt = argwid + v.Aux = target + v.AddArg(mem) + return true + } +} +func rewriteValueS390X_OpStore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Store [8] ptr val mem) + // cond: is64BitFloat(val.Type) + // result: (FMOVDstore ptr val mem) + for { + if v.AuxInt != 8 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is64BitFloat(val.Type)) { + break + } + v.reset(OpS390XFMOVDstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [4] ptr val mem) + // cond: is32BitFloat(val.Type) + // result: (FMOVSstore ptr val mem) + for { + if v.AuxInt != 4 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32BitFloat(val.Type)) { + break + } + v.reset(OpS390XFMOVSstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [8] ptr val mem) + // cond: + // result: (MOVDstore ptr val mem) + for { + if v.AuxInt != 8 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpS390XMOVDstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [4] ptr val mem) + // cond: + // result: (MOVWstore ptr val mem) + for { + if v.AuxInt != 4 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpS390XMOVWstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [2] ptr val mem) + // cond: + // result: (MOVHstore ptr val mem) + for { + if v.AuxInt != 2 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpS390XMOVHstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [1] ptr val mem) + // cond: + // result: (MOVBstore ptr val mem) + for { + if v.AuxInt != 1 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpS390XMOVBstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpSub16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub16 x y) + // cond: + // result: (SUBW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSUBW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpSub32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub32 x y) + // cond: + // result: (SUBW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSUBW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpSub32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub32F x y) + // cond: + // result: (FSUBS x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XFSUBS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpSub64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub64 x y) + // cond: + // result: (SUB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpSub64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub64F x y) + // cond: + // result: (FSUB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XFSUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpSub8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub8 x y) + // cond: + // result: (SUBW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSUBW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpSubPtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SubPtr x y) + // cond: + // result: (SUB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XSUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpTrunc16to8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc16to8 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpTrunc32to16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc32to16 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpTrunc32to8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc32to8 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpTrunc64to16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc64to16 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpTrunc64to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc64to32 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpTrunc64to8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc64to8 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpXor16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Xor16 x y) + // cond: + // result: (XORW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XXORW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpXor32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Xor32 x y) + // cond: + // result: (XORW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XXORW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpXor64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Xor64 x y) + // cond: + // result: (XOR x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XXOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpXor8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Xor8 x y) + // cond: + // result: (XORW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XXORW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpZero(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Zero [s] _ mem) + // cond: SizeAndAlign(s).Size() == 0 + // result: mem + for { + s := v.AuxInt + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 0) { + break + } + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Zero [s] destptr mem) + // cond: SizeAndAlign(s).Size() == 1 + // result: (MOVBstoreconst [0] destptr mem) + for { + s := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 1) { + break + } + v.reset(OpS390XMOVBstoreconst) + v.AuxInt = 0 + v.AddArg(destptr) + v.AddArg(mem) + return true + } + // match: (Zero [s] destptr mem) + // cond: SizeAndAlign(s).Size() == 2 + // result: (MOVHstoreconst [0] destptr mem) + for { + s := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 2) { + break + } + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = 0 + v.AddArg(destptr) + v.AddArg(mem) + return true + } + // match: (Zero [s] destptr mem) + // cond: SizeAndAlign(s).Size() == 4 + // result: (MOVWstoreconst [0] destptr mem) + for { + s := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 4) { + break + } + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = 0 + v.AddArg(destptr) + v.AddArg(mem) + return true + } + // match: (Zero [s] destptr mem) + // cond: SizeAndAlign(s).Size() == 8 + // result: (MOVDstoreconst [0] destptr mem) + for { + s := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 8) { + break + } + v.reset(OpS390XMOVDstoreconst) + v.AuxInt = 0 + v.AddArg(destptr) + v.AddArg(mem) + return true + } + // match: (Zero [s] destptr mem) + // cond: SizeAndAlign(s).Size() == 3 + // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVHstoreconst [0] destptr mem)) + for { + s := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 3) { + break + } + v.reset(OpS390XMOVBstoreconst) + v.AuxInt = makeValAndOff(0, 2) + v.AddArg(destptr) + v0 := b.NewValue0(v.Line, OpS390XMOVHstoreconst, TypeMem) + v0.AuxInt = 0 + v0.AddArg(destptr) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Zero [s] destptr mem) + // cond: SizeAndAlign(s).Size() == 5 + // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem)) + for { + s := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 5) { + break + } + v.reset(OpS390XMOVBstoreconst) + v.AuxInt = makeValAndOff(0, 4) + v.AddArg(destptr) + v0 := b.NewValue0(v.Line, OpS390XMOVWstoreconst, TypeMem) + v0.AuxInt = 0 + v0.AddArg(destptr) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Zero [s] destptr mem) + // cond: SizeAndAlign(s).Size() == 6 + // result: (MOVHstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem)) + for { + s := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 6) { + break + } + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = makeValAndOff(0, 4) + v.AddArg(destptr) + v0 := b.NewValue0(v.Line, OpS390XMOVWstoreconst, TypeMem) + v0.AuxInt = 0 + v0.AddArg(destptr) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Zero [s] destptr mem) + // cond: SizeAndAlign(s).Size() == 7 + // result: (MOVWstoreconst [makeValAndOff(0,3)] destptr (MOVWstoreconst [0] destptr mem)) + for { + s := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 7) { + break + } + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = makeValAndOff(0, 3) + v.AddArg(destptr) + v0 := b.NewValue0(v.Line, OpS390XMOVWstoreconst, TypeMem) + v0.AuxInt = 0 + v0.AddArg(destptr) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Zero [s] destptr mem) + // cond: SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 1024 + // result: (CLEAR [makeValAndOff(SizeAndAlign(s).Size(), 0)] destptr mem) + for { + s := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 1024) { + break + } + v.reset(OpS390XCLEAR) + v.AuxInt = makeValAndOff(SizeAndAlign(s).Size(), 0) + v.AddArg(destptr) + v.AddArg(mem) + return true + } + // match: (Zero [s] destptr mem) + // cond: SizeAndAlign(s).Size() > 1024 + // result: (LoweredZero [SizeAndAlign(s).Size()%256] destptr (ADDconst destptr [(SizeAndAlign(s).Size()/256)*256]) mem) + for { + s := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() > 1024) { + break + } + v.reset(OpS390XLoweredZero) + v.AuxInt = SizeAndAlign(s).Size() % 256 + v.AddArg(destptr) + v0 := b.NewValue0(v.Line, OpS390XADDconst, destptr.Type) + v0.AuxInt = (SizeAndAlign(s).Size() / 256) * 256 + v0.AddArg(destptr) + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpZeroExt16to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt16to32 x) + // cond: + // result: (MOVHZreg x) + for { + x := v.Args[0] + v.reset(OpS390XMOVHZreg) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpZeroExt16to64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt16to64 x) + // cond: + // result: (MOVHZreg x) + for { + x := v.Args[0] + v.reset(OpS390XMOVHZreg) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpZeroExt32to64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt32to64 x) + // cond: + // result: (MOVWZreg x) + for { + x := v.Args[0] + v.reset(OpS390XMOVWZreg) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpZeroExt8to16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt8to16 x) + // cond: + // result: (MOVBZreg x) + for { + x := v.Args[0] + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpZeroExt8to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt8to32 x) + // cond: + // result: (MOVBZreg x) + for { + x := v.Args[0] + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpZeroExt8to64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt8to64 x) + // cond: + // result: (MOVBZreg x) + for { + x := v.Args[0] + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } +} +func rewriteBlockS390X(b *Block, config *Config) bool { + switch b.Kind { + case BlockS390XEQ: + // match: (EQ (InvertFlags cmp) yes no) + // cond: + // result: (EQ cmp yes no) + for { + v := b.Control + if v.Op != OpS390XInvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XEQ + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (EQ (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpS390XFlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (EQ (FlagLT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpS390XFlagLT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (EQ (FlagGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpS390XFlagGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + case BlockS390XGE: + // match: (GE (InvertFlags cmp) yes no) + // cond: + // result: (LE cmp yes no) + for { + v := b.Control + if v.Op != OpS390XInvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XLE + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (GE (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpS390XFlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (GE (FlagLT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpS390XFlagLT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (GE (FlagGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpS390XFlagGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + case BlockS390XGT: + // match: (GT (InvertFlags cmp) yes no) + // cond: + // result: (LT cmp yes no) + for { + v := b.Control + if v.Op != OpS390XInvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XLT + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (GT (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpS390XFlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (GT (FlagLT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpS390XFlagLT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (GT (FlagGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpS390XFlagGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + case BlockIf: + // match: (If (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) + // cond: + // result: (LT cmp yes no) + for { + v := b.Control + if v.Op != OpS390XMOVDLT { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + if v_0.AuxInt != 0 { + break + } + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + if v_1.AuxInt != 1 { + break + } + cmp := v.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XLT + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (If (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) + // cond: + // result: (LE cmp yes no) + for { + v := b.Control + if v.Op != OpS390XMOVDLE { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + if v_0.AuxInt != 0 { + break + } + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + if v_1.AuxInt != 1 { + break + } + cmp := v.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XLE + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (If (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) + // cond: + // result: (GT cmp yes no) + for { + v := b.Control + if v.Op != OpS390XMOVDGT { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + if v_0.AuxInt != 0 { + break + } + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + if v_1.AuxInt != 1 { + break + } + cmp := v.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XGT + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (If (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) + // cond: + // result: (GE cmp yes no) + for { + v := b.Control + if v.Op != OpS390XMOVDGE { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + if v_0.AuxInt != 0 { + break + } + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + if v_1.AuxInt != 1 { + break + } + cmp := v.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XGE + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (If (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) + // cond: + // result: (EQ cmp yes no) + for { + v := b.Control + if v.Op != OpS390XMOVDEQ { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + if v_0.AuxInt != 0 { + break + } + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + if v_1.AuxInt != 1 { + break + } + cmp := v.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XEQ + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (If (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) + // cond: + // result: (NE cmp yes no) + for { + v := b.Control + if v.Op != OpS390XMOVDNE { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + if v_0.AuxInt != 0 { + break + } + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + if v_1.AuxInt != 1 { + break + } + cmp := v.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XNE + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) + // cond: + // result: (GTF cmp yes no) + for { + v := b.Control + if v.Op != OpS390XMOVDGTnoinv { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + if v_0.AuxInt != 0 { + break + } + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + if v_1.AuxInt != 1 { + break + } + cmp := v.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XGTF + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) + // cond: + // result: (GEF cmp yes no) + for { + v := b.Control + if v.Op != OpS390XMOVDGEnoinv { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + if v_0.AuxInt != 0 { + break + } + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + if v_1.AuxInt != 1 { + break + } + cmp := v.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XGEF + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (If cond yes no) + // cond: + // result: (NE (TESTB cond) yes no) + for { + v := b.Control + _ = v + cond := b.Control + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XNE + v0 := b.NewValue0(v.Line, OpS390XTESTB, TypeFlags) + v0.AddArg(cond) + b.SetControl(v0) + _ = yes + _ = no + return true + } + case BlockS390XLE: + // match: (LE (InvertFlags cmp) yes no) + // cond: + // result: (GE cmp yes no) + for { + v := b.Control + if v.Op != OpS390XInvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XGE + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (LE (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpS390XFlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (LE (FlagLT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpS390XFlagLT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (LE (FlagGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpS390XFlagGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + case BlockS390XLT: + // match: (LT (InvertFlags cmp) yes no) + // cond: + // result: (GT cmp yes no) + for { + v := b.Control + if v.Op != OpS390XInvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XGT + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (LT (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpS390XFlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (LT (FlagLT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpS390XFlagLT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (LT (FlagGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpS390XFlagGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + case BlockS390XNE: + // match: (NE (TESTB (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) + // cond: + // result: (LT cmp yes no) + for { + v := b.Control + if v.Op != OpS390XTESTB { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDLT { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + if v_0_0.AuxInt != 0 { + break + } + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpS390XMOVDconst { + break + } + if v_0_1.AuxInt != 1 { + break + } + cmp := v_0.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XLT + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (NE (TESTB (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) + // cond: + // result: (LE cmp yes no) + for { + v := b.Control + if v.Op != OpS390XTESTB { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDLE { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + if v_0_0.AuxInt != 0 { + break + } + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpS390XMOVDconst { + break + } + if v_0_1.AuxInt != 1 { + break + } + cmp := v_0.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XLE + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (NE (TESTB (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) + // cond: + // result: (GT cmp yes no) + for { + v := b.Control + if v.Op != OpS390XTESTB { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDGT { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + if v_0_0.AuxInt != 0 { + break + } + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpS390XMOVDconst { + break + } + if v_0_1.AuxInt != 1 { + break + } + cmp := v_0.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XGT + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (NE (TESTB (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) + // cond: + // result: (GE cmp yes no) + for { + v := b.Control + if v.Op != OpS390XTESTB { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDGE { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + if v_0_0.AuxInt != 0 { + break + } + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpS390XMOVDconst { + break + } + if v_0_1.AuxInt != 1 { + break + } + cmp := v_0.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XGE + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (NE (TESTB (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) + // cond: + // result: (EQ cmp yes no) + for { + v := b.Control + if v.Op != OpS390XTESTB { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDEQ { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + if v_0_0.AuxInt != 0 { + break + } + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpS390XMOVDconst { + break + } + if v_0_1.AuxInt != 1 { + break + } + cmp := v_0.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XEQ + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (NE (TESTB (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) + // cond: + // result: (NE cmp yes no) + for { + v := b.Control + if v.Op != OpS390XTESTB { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDNE { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + if v_0_0.AuxInt != 0 { + break + } + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpS390XMOVDconst { + break + } + if v_0_1.AuxInt != 1 { + break + } + cmp := v_0.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XNE + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (NE (TESTB (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) + // cond: + // result: (GTF cmp yes no) + for { + v := b.Control + if v.Op != OpS390XTESTB { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDGTnoinv { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + if v_0_0.AuxInt != 0 { + break + } + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpS390XMOVDconst { + break + } + if v_0_1.AuxInt != 1 { + break + } + cmp := v_0.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XGTF + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (NE (TESTB (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) + // cond: + // result: (GEF cmp yes no) + for { + v := b.Control + if v.Op != OpS390XTESTB { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDGEnoinv { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + if v_0_0.AuxInt != 0 { + break + } + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpS390XMOVDconst { + break + } + if v_0_1.AuxInt != 1 { + break + } + cmp := v_0.Args[2] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XGEF + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (NE (InvertFlags cmp) yes no) + // cond: + // result: (NE cmp yes no) + for { + v := b.Control + if v.Op != OpS390XInvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockS390XNE + b.SetControl(cmp) + _ = yes + _ = no + return true + } + // match: (NE (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpS390XFlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (NE (FlagLT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpS390XFlagLT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (NE (FlagGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpS390XFlagGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + } + return false +} diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go index ffc816b00ff9a..0cfd32744680a 100644 --- a/src/cmd/compile/internal/ssa/schedule.go +++ b/src/cmd/compile/internal/ssa/schedule.go @@ -84,7 +84,10 @@ func schedule(f *Func) { // Compute score. Larger numbers are scheduled closer to the end of the block. for _, v := range b.Values { switch { - case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr || v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr || v.Op == Op386LoweredGetClosurePtr || v.Op == OpMIPS64LoweredGetClosurePtr: + case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr || + v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr || + v.Op == Op386LoweredGetClosurePtr || v.Op == OpMIPS64LoweredGetClosurePtr || + v.Op == OpS390XLoweredGetClosurePtr: // We also score GetLoweredClosurePtr as early as possible to ensure that the // context register is not stomped. GetLoweredClosurePtr should only appear // in the entry block where there are no phi functions, so there is no diff --git a/test/live.go b/test/live.go index c3985a37a2b78..6742e589e1392 100644 --- a/test/live.go +++ b/test/live.go @@ -1,4 +1,4 @@ -// +build !amd64,!arm,!amd64p32,!386,!arm64,!ppc64le,!mips64,!mips64le +// +build !amd64,!arm,!amd64p32,!386,!arm64,!ppc64le,!mips64,!mips64le,!s390x // errorcheck -0 -l -live -wb=0 // Copyright 2014 The Go Authors. All rights reserved. diff --git a/test/live_ssa.go b/test/live_ssa.go index 41ac407db362d..b74ae1539141a 100644 --- a/test/live_ssa.go +++ b/test/live_ssa.go @@ -1,4 +1,4 @@ -// +build amd64 arm amd64p32 386 arm64 mips64 mips64le +// +build amd64 arm amd64p32 386 arm64 mips64 mips64le s390x // errorcheck -0 -l -live -wb=0 // Copyright 2014 The Go Authors. All rights reserved. diff --git a/test/nilptr3_ssa.go b/test/nilptr3_ssa.go index 9fb533d0f9f9b..bc98a94be91e0 100644 --- a/test/nilptr3_ssa.go +++ b/test/nilptr3_ssa.go @@ -1,5 +1,5 @@ // errorcheck -0 -d=nil -// +build amd64 arm amd64p32 386 arm64 mips64 mips64le ppc64le +// +build amd64 arm amd64p32 386 arm64 mips64 mips64le ppc64le s390x // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/test/phiopt.go b/test/phiopt.go index 21dd13155c3c3..e57ea94868b08 100644 --- a/test/phiopt.go +++ b/test/phiopt.go @@ -1,4 +1,4 @@ -// +build amd64 +// +build amd64,s390x // errorcheck -0 -d=ssa/phiopt/debug=3 // Copyright 2016 The Go Authors. All rights reserved. diff --git a/test/sliceopt.go b/test/sliceopt.go index 4c38541b03989..2fb1b3118ca4d 100644 --- a/test/sliceopt.go +++ b/test/sliceopt.go @@ -1,4 +1,4 @@ -// +build !amd64,!arm,!amd64p32,!386,!arm64,!ppc64le,!mips64,!mips64le +// +build !amd64,!arm,!amd64p32,!386,!arm64,!ppc64le,!mips64,!mips64le,!s390x // errorcheck -0 -d=append,slice // Copyright 2015 The Go Authors. All rights reserved.