diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h index de2f51d3b4911..ef7375195bf88 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -188,6 +188,11 @@ static inline unsigned getSEWOpNum(const MCInstrDesc &Desc) { return Desc.getNumOperands() - Offset; } +static inline unsigned getVecPolicyOpNum(const MCInstrDesc &Desc) { + assert(hasVecPolicyOp(Desc.TSFlags)); + return Desc.getNumOperands() - 1; +} + // RISC-V Specific Machine Operand Flags enum { MO_None = 0, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index cf9925135bfe5..1addb0a613030 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -1510,6 +1510,13 @@ std::string RISCVInstrInfo::createMIROperandComment( unsigned SEW = Log2SEW ? 1 << Log2SEW : 8; assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW"); OS << "e" << SEW; + } else if (RISCVII::hasVecPolicyOp(TSFlags) && + OpIdx == RISCVII::getVecPolicyOpNum(MI.getDesc())) { + unsigned Policy = MI.getOperand(OpIdx).getImm(); + assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) && + "Invalid Policy Value"); + OS << (Policy & RISCVII::TAIL_AGNOSTIC ? "ta" : "tu") << ", " + << (Policy & RISCVII::MASK_AGNOSTIC ? "ma" : "mu"); } OS.flush(); diff --git a/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir b/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir index b15beb22433b4..214eb2f7c250e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir +++ b/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir @@ -30,7 +30,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrnov0 = COPY $v1 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v2 - ; CHECK-NEXT: [[PseudoVNMSUB_VV_M1_:%[0-9]+]]:vr = PseudoVNMSUB_VV_M1 [[PseudoVNMSUB_VV_M1_]], [[COPY1]], [[COPY2]], -1, 6 /* e64 */, 1, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVNMSUB_VV_M1_:%[0-9]+]]:vr = PseudoVNMSUB_VV_M1 [[PseudoVNMSUB_VV_M1_]], [[COPY1]], [[COPY2]], -1, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY [[PseudoVNMSUB_VV_M1_]] ; CHECK-NEXT: dead [[COPY2]]:vr = PseudoVSLL_VI_M1 [[COPY2]], 11, $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v0 = COPY [[PseudoVNMSUB_VV_M1_]] diff --git a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir index 23c7c8a54aa60..c6ab95d9d703d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir +++ b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir @@ -141,7 +141,7 @@ body: | ; CHECK-NEXT: $x5 = LD $x2, 0 :: (load (s64) from %stack.16) ; CHECK-NEXT: renamable $v0 = PseudoVRELOAD_M1 killed $x1 :: (load unknown-size from %stack.1, align 8) ; CHECK-NEXT: $x1 = LD $x2, 8 :: (load (s64) from %stack.15) - ; CHECK-NEXT: renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3 /* e8 */, 1, implicit $vl, implicit $vtype + ; CHECK-NEXT: renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: BLT killed renamable $x16, renamable $x27, %bb.2 ; CHECK-NEXT: {{ $}} diff --git a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll index 2aa01273d8831..4c7c4e43c0f61 100644 --- a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll +++ b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll @@ -13,7 +13,7 @@ define @vpload_nxv8i64(* %ptr, @llvm.vp.load.nxv8i64.p0nxv8i64(* %ptr, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll index 0ccbf2d826672..f145f67e9dca0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll @@ -16,7 +16,7 @@ define @foo( %x, @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir index 0916d264874bb..86a60e425f819 100644 --- a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir @@ -53,7 +53,7 @@ body: | ; CHECK-NEXT: $v0 = COPY [[COPY]] ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]] - ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6 /* e64 */, 1 :: (load (s512) from %ir.a, align 8) + ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6 /* e64 */, 1 /* ta, mu */ :: (load (s512) from %ir.a, align 8) ; CHECK-NEXT: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]] ; CHECK-NEXT: PseudoRET implicit $v8m8 %1:vr = COPY $v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll index 1cb7fc0e6c31e..48c3fbb8e9bde 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll @@ -50,7 +50,7 @@ define i64 @test_vleff_nxv8i8_mask( %maskedoff, %val, i8* %base,