diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h index 8cf76d2de3fda..a718dc487386f 100644 --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -781,6 +781,15 @@ class IRBuilderBase { CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask = nullptr); + /// Create a call to Masked Expand Load intrinsic + CallInst *CreateMaskedExpandLoad(Type *Ty, Value *Ptr, Value *Mask = nullptr, + Value *PassThru = nullptr, + const Twine &Name = ""); + + /// Create a call to Masked Compress Store intrinsic + CallInst *CreateMaskedCompressStore(Value *Val, Value *Ptr, + Value *Mask = nullptr); + /// Create an assume intrinsic call that allows the optimizer to /// assume that the provided condition will be true. /// diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp index 47967b12179dc..8a46a71186c89 100644 --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -700,6 +700,51 @@ CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs, return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes); } +/// Create a call to Masked Expand Load intrinsic +/// \p Ty - vector type to load +/// \p Ptr - base pointer for the load +/// \p Mask - vector of booleans which indicates what vector lanes should +/// be accessed in memory +/// \p PassThru - pass-through value that is used to fill the masked-off lanes +/// of the result +/// \p Name - name of the result variable +CallInst *IRBuilderBase::CreateMaskedExpandLoad(Type *Ty, Value *Ptr, + Value *Mask, Value *PassThru, + const Twine &Name) { + auto *PtrTy = cast(Ptr->getType()); + assert(Ty->isVectorTy() && "Type should be vector"); + assert(PtrTy->isOpaqueOrPointeeTypeMatches( + cast(Ty)->getElementType()) && + "Wrong element type"); + assert(Mask && "Mask should not be all-ones (null)"); + if (!PassThru) + PassThru = UndefValue::get(Ty); + Type *OverloadedTypes[] = {Ty}; + Value *Ops[] = {Ptr, Mask, PassThru}; + return CreateMaskedIntrinsic(Intrinsic::masked_expandload, Ops, + OverloadedTypes, Name); +} + +/// Create a call to Masked Compress Store intrinsic +/// \p Val - data to be stored, +/// \p Ptr - base pointer for the store +/// \p Mask - vector of booleans which indicates what vector lanes should +/// be accessed in memory +CallInst *IRBuilderBase::CreateMaskedCompressStore(Value *Val, Value *Ptr, + Value *Mask) { + auto *PtrTy = cast(Ptr->getType()); + Type *DataTy = Val->getType(); + assert(DataTy->isVectorTy() && "Val should be a vector"); + assert(PtrTy->isOpaqueOrPointeeTypeMatches( + cast(DataTy)->getElementType()) && + "Wrong element type"); + assert(Mask && "Mask should not be all-ones (null)"); + Type *OverloadedTypes[] = {DataTy}; + Value *Ops[] = {Val, Ptr, Mask}; + return CreateMaskedIntrinsic(Intrinsic::masked_compressstore, Ops, + OverloadedTypes); +} + template static std::vector getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,