diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 1f0975a814428..a38bb0c14891c 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -884,23 +884,6 @@ class VPseudoMaskUnarySOutMask: let BaseInstr = !cast(PseudoToVInst.VInst); } -// Masked mask operation have no $rd=$merge constraints -class VPseudoUnaryMOutMask: - Pseudo<(outs VR:$rd), - (ins VR:$merge, VR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>, - RISCVVPseudo { - let mayLoad = 0; - let mayStore = 0; - let hasSideEffects = 0; - let usesCustomInserter = 1; - let Constraints = "$rd = $merge"; - let Uses = [VL, VTYPE]; - let HasVLOp = 1; - let HasSEWOp = 1; - let HasMergeOp = 1; - let BaseInstr = !cast(PseudoToVInst.VInst); -} - // Mask can be V0~V31 class VPseudoUnaryAnyMask : @@ -995,6 +978,28 @@ class VPseudoBinaryMask(PseudoToVInst.VInst); } +// Like VPseudoBinaryMask, but output can be V0. +class VPseudoBinaryMOutMask : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoBinaryCarryIn { + let VLMul = MInfo.value in { + def "_" # MInfo.MX : VPseudoBinaryNoMask; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask; + } +} + multiclass VPseudoBinaryEmul; + defm _VV : VPseudoBinaryM; } multiclass VPseudoBinaryM_VX { foreach m = MxList.m in defm "_VX" : - VPseudoBinary; + VPseudoBinaryM; } multiclass VPseudoBinaryM_VF { foreach m = MxList.m in foreach f = FPList.fpinfo in defm "_V" # f.FX : - VPseudoBinary; + VPseudoBinaryM; } multiclass VPseudoBinaryM_VI { foreach m = MxList.m in - defm _VI : VPseudoBinary; + defm _VI : VPseudoBinaryM; } multiclass VPseudoBinaryV_VV_VX_VI { diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll index cf9d68ef2a563..5e1f4d1e8b141 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll @@ -1683,12 +1683,12 @@ define @intrinsic_vmseq_mask_vx_nxv1i64_i64( ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vv v26, v8, v25, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmseq.vv v25, v8, v26, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll index a5e6e5c23c387..be2e83d4a4d3f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll @@ -1713,12 +1713,12 @@ define @intrinsic_vmsge_mask_vx_nxv1i64_i64( ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vv v26, v25, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsle.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2761,11 +2761,11 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64( @intrinsic_vmsgeu_mask_vx_nxv1i64_i64( ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vv v26, v25, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsleu.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2761,11 +2761,11 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64( @intrinsic_vmsgt_mask_vx_nxv1i64_i64( ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vv v26, v25, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmslt.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll index 839722ce6c24e..71e499b3ca399 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll @@ -1683,12 +1683,12 @@ define @intrinsic_vmsgtu_mask_vx_nxv1i64_i64( ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vv v26, v25, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsltu.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll index 6a33e23b4ecce..74662ab9f89a8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll @@ -1683,12 +1683,12 @@ define @intrinsic_vmsle_mask_vx_nxv1i64_i64( ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vv v26, v8, v25, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsle.vv v25, v8, v26, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll index 8b4879b8f515a..5bf609ef07f16 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll @@ -1683,12 +1683,12 @@ define @intrinsic_vmsleu_mask_vx_nxv1i64_i64( ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vv v26, v8, v25, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsleu.vv v25, v8, v26, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll index 482fe2ce8db6e..8ca44d66ebebc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll @@ -1683,12 +1683,12 @@ define @intrinsic_vmslt_mask_vx_nxv1i64_i64( ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vv v26, v8, v25, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmslt.vv v25, v8, v26, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll index d8140beab031c..3ccd474a441b6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll @@ -1683,12 +1683,12 @@ define @intrinsic_vmsltu_mask_vx_nxv1i64_i64( ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vv v26, v8, v25, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsltu.vv v25, v8, v26, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll index 5389e8b11e02e..b8feac3c8ce64 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll @@ -1683,12 +1683,12 @@ define @intrinsic_vmsne_mask_vx_nxv1i64_i64( ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vv v26, v8, v25, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsne.vv v25, v8, v26, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: