Skip to content

Commit

Permalink
[RISCV] Support mask policy for RVV IR intrinsics.
Browse files Browse the repository at this point in the history
Add the UsesMaskPolicy flag to indicate the operations result
would be effected by the mask policy. (ex. mask operations).

It means RISCVInsertVSETVLI should decide the mask policy according
by mask policy operand or passthru operand.
If UsesMaskPolicy is false (ex. unmasked, store, and reduction operations),
the mask policy could be either mask undisturbed or agnostic.
Currently, RISCVInsertVSETVLI sets UsesMaskPolicy operations default to
MA, otherwise to MU to keep the current mask policy would not be changed
for unmasked operations.

Add masked-tama, masked-tamu, masked-tuma and masked-tumu test cases.
I didn't add all operations because most of implementations are using
the same pseudo multiclass. Some tests maybe be duplicated in different
tests. (ex. masked vmacc with tumu shows in vmacc-rv32.ll and masked-tumu)
I think having different tests only for policy would make the testing
clear.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D120226
  • Loading branch information
zakk0610 committed Mar 22, 2022
1 parent 0ff19b1 commit abb5a98
Show file tree
Hide file tree
Showing 10 changed files with 4,955 additions and 107 deletions.
55 changes: 31 additions & 24 deletions llvm/include/llvm/IR/IntrinsicsRISCV.td
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For unit stride load with mask
// Input: (maskedoff, pointer, mask, vl, ta)
// Input: (maskedoff, pointer, mask, vl, policy)
class RISCVUSLoadMask
: Intrinsic<[llvm_anyvector_ty ],
[LLVMMatchType<0>,
Expand All @@ -231,7 +231,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For unit stride fault-only-first load with mask
// Input: (maskedoff, pointer, mask, vl, ta)
// Input: (maskedoff, pointer, mask, vl, policy)
// Output: (data, vl)
// NOTE: We model this with default memory properties since we model writing
// VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
Expand All @@ -255,7 +255,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For strided load with mask
// Input: (maskedoff, pointer, stride, mask, vl, ta)
// Input: (maskedoff, pointer, stride, mask, vl, policy)
class RISCVSLoadMask
: Intrinsic<[llvm_anyvector_ty ],
[LLVMMatchType<0>,
Expand All @@ -277,7 +277,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For indexed load with mask
// Input: (maskedoff, pointer, index, mask, vl, ta)
// Input: (maskedoff, pointer, index, mask, vl, policy)
class RISCVILoadMask
: Intrinsic<[llvm_anyvector_ty ],
[LLVMMatchType<0>,
Expand Down Expand Up @@ -358,7 +358,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For destination vector type is the same as first source vector (with mask).
// Input: (vector_in, mask, vl, ta)
// Input: (vector_in, vector_in, mask, vl, policy)
class RISCVUnaryAAMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
Expand All @@ -367,7 +367,8 @@ let TargetPrefix = "riscv" in {
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 3;
}
class RISCVUnaryAAMaskTU
// Input: (passthru, vector_in, vector_in, mask, vl)
class RISCVCompress
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
Expand All @@ -392,7 +393,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For destination vector type is the same as first and second source vector.
// Input: (vector_in, vector_in, int_vector_in, vl, ta)
// Input: (vector_in, vector_in, int_vector_in, vl, policy)
class RISCVRGatherVVMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
Expand All @@ -411,7 +412,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For destination vector type is the same as first and second source vector.
// Input: (vector_in, vector_in, int16_vector_in, vl, ta)
// Input: (vector_in, vector_in, int16_vector_in, vl, policy)
class RISCVRGatherEI16VVMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
Expand All @@ -426,14 +427,14 @@ let TargetPrefix = "riscv" in {
// Input: (passthru, vector_in, xlen_in, vl)
class RISCVGatherVXNoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
LLVMMatchType<1>],
[IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 3;
}
// For destination vector type is the same as first source vector (with mask).
// Second operand is XLen.
// Input: (maskedoff, vector_in, xlen_in, mask, vl, ta)
// Input: (maskedoff, vector_in, xlen_in, mask, vl, policy)
class RISCVGatherVXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
Expand All @@ -453,7 +454,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For destination vector type is the same as first source vector (with mask).
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVBinaryAAXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
Expand All @@ -475,7 +476,7 @@ let TargetPrefix = "riscv" in {
}
// For destination vector type is the same as first source vector (with mask).
// The second source operand must match the destination type or be an XLen scalar.
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVBinaryAAShiftMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
Expand All @@ -495,7 +496,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For destination vector type is NOT the same as first source vector (with mask).
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVBinaryABXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
Expand All @@ -517,7 +518,7 @@ let TargetPrefix = "riscv" in {
}
// For destination vector type is NOT the same as first source vector (with mask).
// The second source operand must match the destination type or be an XLen scalar.
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVBinaryABShiftMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
Expand Down Expand Up @@ -615,7 +616,7 @@ let TargetPrefix = "riscv" in {
}
// For Saturating binary operations with mask.
// The destination vector type is the same as first source vector.
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVSaturatingBinaryAAXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
Expand All @@ -639,7 +640,7 @@ let TargetPrefix = "riscv" in {
// For Saturating binary operations with mask.
// The destination vector type is the same as first source vector.
// The second source operand matches the destination type or is an XLen scalar.
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVSaturatingBinaryAAShiftMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
Expand All @@ -662,7 +663,7 @@ let TargetPrefix = "riscv" in {
// For Saturating binary operations with mask.
// The destination vector type is NOT the same as first source vector (with mask).
// The second source operand matches the destination type or is an XLen scalar.
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVSaturatingBinaryABShiftMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
Expand All @@ -671,13 +672,15 @@ let TargetPrefix = "riscv" in {
[ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
let VLOperand = 4;
}
// Input: (vector_in, vector_in, vector_in/scalar_in, vl)
class RISCVTernaryAAAXNoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
LLVMMatchType<1>],
[IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 3;
}
// Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVTernaryAAAXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
Expand All @@ -686,6 +689,7 @@ let TargetPrefix = "riscv" in {
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 4;
}
// NoMask Vector Multiply-Add operations, its first operand can not be undef.
// Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
class RISCVTernaryAAXANoMask
: Intrinsic<[llvm_anyvector_ty],
Expand All @@ -695,6 +699,7 @@ let TargetPrefix = "riscv" in {
let ScalarOperand = 1;
let VLOperand = 3;
}
// Mask Vector Multiply-Add operations, its first operand can not be undef.
// Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
class RISCVTernaryAAXAMask
: Intrinsic<[llvm_anyvector_ty],
Expand All @@ -705,6 +710,7 @@ let TargetPrefix = "riscv" in {
let ScalarOperand = 1;
let VLOperand = 4;
}
// NoMask Widening Vector Multiply-Add operations, its first operand can not be undef.
// Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
class RISCVTernaryWideNoMask
: Intrinsic< [llvm_anyvector_ty],
Expand All @@ -714,6 +720,7 @@ let TargetPrefix = "riscv" in {
let ScalarOperand = 1;
let VLOperand = 3;
}
// Mask Widening Vector Multiply-Add operations, its first operand can not be undef.
// Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
class RISCVTernaryWideMask
: Intrinsic< [llvm_anyvector_ty],
Expand Down Expand Up @@ -772,7 +779,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For destination vector type is NOT the same as source vector (with mask).
// Input: (maskedoff, vector_in, mask, vl, ta)
// Input: (maskedoff, vector_in, mask, vl, policy)
class RISCVUnaryABMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty,
Expand Down Expand Up @@ -824,7 +831,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For Conversion unary operations with mask.
// Input: (maskedoff, vector_in, mask, vl, ta)
// Input: (maskedoff, vector_in, mask, vl, policy)
class RISCVConversionMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty,
Expand All @@ -844,7 +851,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 1;
}
// For unit stride segment load with mask
// Input: (maskedoff, pointer, mask, vl, ta)
// Input: (maskedoff, pointer, mask, vl, policy)
class RISCVUSSegLoadMask<int nf>
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
!add(nf, -1))),
Expand All @@ -870,7 +877,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 1;
}
// For unit stride fault-only-first segment load with mask
// Input: (maskedoff, pointer, mask, vl, ta)
// Input: (maskedoff, pointer, mask, vl, policy)
// Output: (data, vl)
// NOTE: We model this with default memory properties since we model writing
// VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
Expand All @@ -896,7 +903,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For stride segment load with mask
// Input: (maskedoff, pointer, offset, mask, vl, ta)
// Input: (maskedoff, pointer, offset, mask, vl, policy)
class RISCVSSegLoadMask<int nf>
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
!add(nf, -1))),
Expand All @@ -920,7 +927,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For indexed segment load with mask
// Input: (maskedoff, pointer, index, mask, vl, ta)
// Input: (maskedoff, pointer, index, mask, vl, policy)
class RISCVISegLoadMask<int nf>
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
!add(nf, -1))),
Expand Down Expand Up @@ -1360,7 +1367,7 @@ let TargetPrefix = "riscv" in {
defm vrgather_vx : RISCVRGatherVX;
defm vrgatherei16_vv : RISCVRGatherEI16VV;

def "int_riscv_vcompress" : RISCVUnaryAAMaskTU;
def "int_riscv_vcompress" : RISCVCompress;

defm vaaddu : RISCVSaturatingBinaryAAX;
defm vaadd : RISCVSaturatingBinaryAAX;
Expand Down
13 changes: 12 additions & 1 deletion llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,13 @@ enum {
// Pseudos.
IsRVVWideningReductionShift = HasVecPolicyOpShift + 1,
IsRVVWideningReductionMask = 1 << IsRVVWideningReductionShift,

// Does this instruction care about mask policy. If it is not, the mask policy
// could be either agnostic or undisturbed. For example, unmasked, store, and
// reduction operations result would not be affected by mask policy, so
// compiler has free to select either one.
UsesMaskPolicyShift = IsRVVWideningReductionShift + 1,
UsesMaskPolicyMask = 1 << UsesMaskPolicyShift,
};

// Match with the definitions in RISCVInstrFormatsV.td
Expand All @@ -110,8 +117,8 @@ enum VLMUL : uint8_t {
};

enum {
TAIL_UNDISTURBED = 0,
TAIL_AGNOSTIC = 1,
MASK_AGNOSTIC = 2,
};

// Helper functions to read TSFlags.
Expand Down Expand Up @@ -156,6 +163,10 @@ static inline bool hasVecPolicyOp(uint64_t TSFlags) {
static inline bool isRVVWideningReduction(uint64_t TSFlags) {
return TSFlags & IsRVVWideningReductionMask;
}
/// \returns true if mask policy is valid for the instruction.
static inline bool UsesMaskPolicy(uint64_t TSFlags) {
return TSFlags & UsesMaskPolicyMask;
}

// RISC-V Specific Machine Operand Flags
enum {
Expand Down
45 changes: 31 additions & 14 deletions llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -502,29 +502,46 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
unsigned NumOperands = MI.getNumExplicitOperands();
bool HasPolicy = RISCVII::hasVecPolicyOp(TSFlags);

// Default to tail agnostic unless the destination is tied to a source.
// Unless the source is undef. In that case the user would have some control
// over the tail values. Some pseudo instructions force a tail agnostic policy
// despite having a tied def.
// If the instruction has policy argument, use the argument.
// If there is no policy argument, default to tail agnostic unless the
// destination is tied to a source. Unless the source is undef. In that case
// the user would have some control over the policy values. Some pseudo
// instructions force a tail agnostic policy despite having a tied def.
bool ForceTailAgnostic = RISCVII::doesForceTailAgnostic(TSFlags);
bool TailAgnostic = true;
// If the instruction has policy argument, use the argument.
bool UsesMaskPolicy = RISCVII::UsesMaskPolicy(TSFlags);
// FIXME: Could we look at the above or below instructions to choose the
// matched mask policy to reduce vsetvli instructions? Default mask policy is
// agnostic if instructions use mask policy, otherwise is undisturbed. Because
// most mask operations are mask undisturbed, so we could possibly reduce the
// vsetvli between mask and nomasked instruction sequence.
bool MaskAgnostic = UsesMaskPolicy;
unsigned UseOpIdx;
if (HasPolicy) {
const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1);
TailAgnostic = Op.getImm() & 0x1;
}

unsigned UseOpIdx;
if (!(ForceTailAgnostic || (HasPolicy && TailAgnostic)) &&
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
uint64_t Policy = Op.getImm();
assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) &&
"Invalid Policy Value");
// Although in some cases, mismatched passthru/maskedoff with policy value
// does not make sense (ex. tied operand is IMPLICIT_DEF with non-TAMA
// policy, or tied operand is not IMPLICIT_DEF with TAMA policy), but users
// have set the policy value explicitly, so compiler would not fix it.
TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC;
MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
} else if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
TailAgnostic = false;
if (UsesMaskPolicy)
MaskAgnostic = false;
// If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic.
const MachineOperand &UseMO = MI.getOperand(UseOpIdx);
MachineInstr *UseMI = MRI->getVRegDef(UseMO.getReg());
if (UseMI) {
UseMI = elideCopies(UseMI, MRI);
if (UseMI && UseMI->isImplicitDef())
if (UseMI && UseMI->isImplicitDef()) {
TailAgnostic = true;
if (UsesMaskPolicy)
MaskAgnostic = true;
}
}
}

Expand Down Expand Up @@ -559,8 +576,8 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
}
} else
InstrInfo.setAVLReg(RISCV::NoRegister);
InstrInfo.setVTYPE(VLMul, SEW, /*TailAgnostic*/ TailAgnostic,
/*MaskAgnostic*/ false, MaskRegOp, StoreOp, ScalarMovOp);
InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, MaskRegOp, StoreOp,
ScalarMovOp);

return InstrInfo;
}
Expand Down
5 changes: 4 additions & 1 deletion llvm/lib/Target/RISCV/RISCVInstrFormats.td
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,10 @@ class RVInst<dag outs, dag ins, string opcodestr, string argstr,
let TSFlags{16} = HasVecPolicyOp;

bit IsRVVWideningReduction = 0;
let TSFlags{17} = IsRVVWideningReduction;
let TSFlags{17} = IsRVVWideningReduction;

bit UsesMaskPolicy = 0;
let TSFlags{18} = UsesMaskPolicy;
}

// Pseudo instructions
Expand Down
Loading

0 comments on commit abb5a98

Please sign in to comment.