Skip to content

Commit

Permalink
[RISCV] Fix alias printing for vmnot.m
Browse files Browse the repository at this point in the history
By clearing the HasDummyMask flag from mask register binary operations
and mask load/store.

HasDummyMask was causing an extra operand to get appended when
converting from MachineInstr to MCInst. This extra operand doesn't
appear in the assembly string so was mostly ignored, but it prevented
the alias instruction printing from working correctly.

Reviewed By: arcbbb

Differential Revision: https://reviews.llvm.org/D124424
  • Loading branch information
topperc committed Apr 28, 2022
1 parent 2883de0 commit 8631a5e
Show file tree
Hide file tree
Showing 14 changed files with 226 additions and 224 deletions.
20 changes: 11 additions & 9 deletions llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Expand Up @@ -641,7 +641,7 @@ class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
let VLMul = m.value;
}

class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF> :
class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF, bit DummyMask = 1> :
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
Expand All @@ -651,7 +651,7 @@ class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF> :
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let HasDummyMask = DummyMask;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}

Expand Down Expand Up @@ -794,7 +794,7 @@ class VPseudoILoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}

class VPseudoUSStoreNoMask<VReg StClass, int EEW>:
class VPseudoUSStoreNoMask<VReg StClass, int EEW, bit DummyMask = 1>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
Expand All @@ -804,7 +804,7 @@ class VPseudoUSStoreNoMask<VReg StClass, int EEW>:
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let HasDummyMask = DummyMask;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}

Expand Down Expand Up @@ -1035,7 +1035,8 @@ class VPseudoUnaryAnyMask<VReg RetClass,
class VPseudoBinaryNoMask<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
string Constraint> :
string Constraint,
int DummyMask = 1> :
Pseudo<(outs RetClass:$rd),
(ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
Expand All @@ -1045,7 +1046,7 @@ class VPseudoBinaryNoMask<VReg RetClass,
let Constraints = Constraint;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let HasDummyMask = DummyMask;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}

Expand Down Expand Up @@ -1544,7 +1545,8 @@ multiclass VPseudoFFLoad {
multiclass VPseudoLoadMask {
foreach mti = AllMasks in {
let VLMul = mti.LMul.value in {
def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*isFF*/0>;
def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*isFF*/0,
/*DummyMask*/0>;
}
}
}
Expand Down Expand Up @@ -1616,7 +1618,7 @@ multiclass VPseudoUSStore {
multiclass VPseudoStoreMask {
foreach mti = AllMasks in {
let VLMul = mti.LMul.value in {
def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1>;
def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1, /*DummyMask*/0>;
}
}
}
Expand Down Expand Up @@ -1866,7 +1868,7 @@ multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
multiclass VPseudoVALU_MM {
foreach m = MxList in
let VLMul = m.value in {
def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "">,
def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "", /*DummyMask*/0>,
Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>;
}
}
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
Expand Up @@ -256,7 +256,7 @@ define void @fcmp_ule_vv_v32f16(<32 x half>* %x, <32 x half>* %y, <32 x i1>* %z)
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vle16.v v12, (a1)
; CHECK-NEXT: vmflt.vv v16, v12, v8
; CHECK-NEXT: vmnand.mm v8, v16, v16
; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a2)
; CHECK-NEXT: ret
%a = load <32 x half>, <32 x half>* %x
Expand Down Expand Up @@ -290,7 +290,7 @@ define void @fcmp_uge_vv_v16f32(<16 x float>* %x, <16 x float>* %y, <16 x i1>* %
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v12, (a1)
; CHECK-NEXT: vmflt.vv v16, v8, v12
; CHECK-NEXT: vmnand.mm v8, v16, v16
; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a2)
; CHECK-NEXT: ret
%a = load <16 x float>, <16 x float>* %x
Expand Down Expand Up @@ -323,7 +323,7 @@ define void @fcmp_ult_vv_v8f64(<8 x double>* %x, <8 x double>* %y, <8 x i1>* %z)
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vle64.v v12, (a1)
; CHECK-NEXT: vmfle.vv v16, v12, v8
; CHECK-NEXT: vmnand.mm v8, v16, v16
; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a2)
; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
Expand Down Expand Up @@ -357,7 +357,7 @@ define void @fcmp_ugt_vv_v64f16(<64 x half>* %x, <64 x half>* %y, <64 x i1>* %z)
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vle16.v v16, (a1)
; CHECK-NEXT: vmfle.vv v24, v8, v16
; CHECK-NEXT: vmnand.mm v8, v24, v24
; CHECK-NEXT: vmnot.m v8, v24
; CHECK-NEXT: vsm.v v8, (a2)
; CHECK-NEXT: ret
%a = load <64 x half>, <64 x half>* %x
Expand Down Expand Up @@ -761,7 +761,7 @@ define void @fcmp_ule_vf_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) {
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vmfgt.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v12, v12
; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <32 x half>, <32 x half>* %x
Expand Down Expand Up @@ -795,7 +795,7 @@ define void @fcmp_uge_vf_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vmflt.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v12, v12
; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <16 x float>, <16 x float>* %x
Expand Down Expand Up @@ -828,7 +828,7 @@ define void @fcmp_ult_vf_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vmfge.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v12, v12
; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
Expand Down Expand Up @@ -862,7 +862,7 @@ define void @fcmp_ugt_vf_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) {
; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vmfle.vf v16, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v16, v16
; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <64 x half>, <64 x half>* %x
Expand Down Expand Up @@ -1269,7 +1269,7 @@ define void @fcmp_ule_fv_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) {
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vmflt.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v12, v12
; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <32 x half>, <32 x half>* %x
Expand Down Expand Up @@ -1303,7 +1303,7 @@ define void @fcmp_uge_fv_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vmfgt.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v12, v12
; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <16 x float>, <16 x float>* %x
Expand Down Expand Up @@ -1336,7 +1336,7 @@ define void @fcmp_ult_fv_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vmfle.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v12, v12
; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
Expand Down Expand Up @@ -1370,7 +1370,7 @@ define void @fcmp_ugt_fv_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) {
; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vmfge.vf v16, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v16, v16
; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <64 x half>, <64 x half>* %x
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll
Expand Up @@ -59,7 +59,7 @@ define void @not_v64i1(<64 x i1>* %x, <64 x i1>* %y) {
; CHECK-NEXT: li a1, 64
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vmnand.mm v8, v8, v8
; CHECK-NEXT: vmnot.m v8, v8
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <64 x i1>, <64 x i1>* %x
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
Expand Up @@ -10,7 +10,7 @@ define signext i1 @vpreduce_and_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i3
; CHECK-LABEL: vpreduce_and_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
Expand Down Expand Up @@ -62,7 +62,7 @@ define signext i1 @vpreduce_and_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i3
; CHECK-LABEL: vpreduce_and_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
Expand Down Expand Up @@ -114,7 +114,7 @@ define signext i1 @vpreduce_and_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i3
; CHECK-LABEL: vpreduce_and_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
Expand Down Expand Up @@ -166,7 +166,7 @@ define signext i1 @vpreduce_and_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i3
; CHECK-LABEL: vpreduce_and_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
Expand Down Expand Up @@ -218,7 +218,7 @@ define signext i1 @vpreduce_and_v10i1(i1 signext %s, <10 x i1> %v, <10 x i1> %m,
; CHECK-LABEL: vpreduce_and_v10i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
Expand All @@ -235,7 +235,7 @@ define signext i1 @vpreduce_and_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m,
; CHECK-LABEL: vpreduce_and_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
Expand All @@ -259,7 +259,7 @@ define signext i1 @vpreduce_and_v256i1(i1 signext %s, <256 x i1> %v, <256 x i1>
; CHECK-NEXT: mv a3, a2
; CHECK-NEXT: .LBB14_2:
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu
; CHECK-NEXT: vmnand.mm v8, v8, v8
; CHECK-NEXT: vmnot.m v8, v8
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vcpop.m a2, v8, v0.t
; CHECK-NEXT: li a3, 128
Expand All @@ -269,7 +269,7 @@ define signext i1 @vpreduce_and_v256i1(i1 signext %s, <256 x i1> %v, <256 x i1>
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB14_4:
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
; CHECK-NEXT: vmnand.mm v8, v11, v11
; CHECK-NEXT: vmnot.m v8, v11
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vcpop.m a1, v8, v0.t
; CHECK-NEXT: seqz a1, a1
Expand Down

0 comments on commit 8631a5e

Please sign in to comment.