-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[TTI][ASan][RISCV] Move InterestingMemoryOperand to Analysis and embed in MemIntrinsicInfo #157863
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[TTI][ASan][RISCV] Move InterestingMemoryOperand to Analysis and embed in MemIntrinsicInfo #157863
Conversation
@llvm/pr-subscribers-compiler-rt-sanitizer @llvm/pr-subscribers-backend-risc-v Author: Hank Chang (HankChang736) Changes…d in MemIntrinsicInfo Previously asan considers target intrinsics as black boxes, so asan could not instrument accurate check. This patch make SmallVector<InterestingMemoryOperand> a member of MemIntrinsicInfo so that TTI can make targets describe their intrinsic informations to asan. Note,
Patch is 176.61 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/157863.diff 8 Files Affected:
diff --git a/llvm/include/llvm/Analysis/InterestingMemoryOperand.h b/llvm/include/llvm/Analysis/InterestingMemoryOperand.h
new file mode 100644
index 0000000000000..69960e6f012d4
--- /dev/null
+++ b/llvm/include/llvm/Analysis/InterestingMemoryOperand.h
@@ -0,0 +1,54 @@
+//===- InterestingMemoryOperand.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines InterestingMemoryOperand class that is used when getting
+// the information of a memory reference instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INTERESTINGMEMORYOPERAND_H
+#define LLVM_ANALYSIS_INTERESTINGMEMORYOPERAND_H
+
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/TypeSize.h"
+
+namespace llvm {
+class InterestingMemoryOperand {
+public:
+ Use *PtrUse;
+ bool IsWrite;
+ Type *OpType;
+ TypeSize TypeStoreSize = TypeSize::getFixed(0);
+ MaybeAlign Alignment;
+ // The mask Value, if we're looking at a masked load/store.
+ Value *MaybeMask;
+ // The EVL Value, if we're looking at a vp intrinsic.
+ Value *MaybeEVL;
+ // The Stride Value, if we're looking at a strided load/store.
+ Value *MaybeStride;
+
+ InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite,
+ class Type *OpType, MaybeAlign Alignment,
+ Value *MaybeMask = nullptr,
+ Value *MaybeEVL = nullptr,
+ Value *MaybeStride = nullptr)
+ : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
+ MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) {
+ const DataLayout &DL = I->getDataLayout();
+ TypeStoreSize = DL.getTypeStoreSizeInBits(OpType);
+ PtrUse = &I->getOperandUse(OperandNo);
+ }
+
+ Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); }
+
+ Value *getPtr() { return PtrUse->get(); }
+};
+
+} // namespace llvm
+#endif
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index a5e98bb7bc137..17d03ffb020b9 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -24,6 +24,7 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Analysis/IVDescriptors.h"
+#include "llvm/Analysis/InterestingMemoryOperand.h"
#include "llvm/IR/FMF.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
@@ -87,6 +88,8 @@ struct MemIntrinsicInfo {
bool WriteMem = false;
bool IsVolatile = false;
+ SmallVector<InterestingMemoryOperand, 1> Interesting;
+
bool isUnordered() const {
return (Ordering == AtomicOrdering::NotAtomic ||
Ordering == AtomicOrdering::Unordered) &&
diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
index 4e0e9010b42f0..6d2def3d2b72d 100644
--- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
+++ b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
@@ -14,6 +14,7 @@
#define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H
#include "llvm/Analysis/CFG.h"
+#include "llvm/Analysis/InterestingMemoryOperand.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instruction.h"
@@ -21,38 +22,6 @@
#include "llvm/IR/Module.h"
namespace llvm {
-
-class InterestingMemoryOperand {
-public:
- Use *PtrUse;
- bool IsWrite;
- Type *OpType;
- TypeSize TypeStoreSize = TypeSize::getFixed(0);
- MaybeAlign Alignment;
- // The mask Value, if we're looking at a masked load/store.
- Value *MaybeMask;
- // The EVL Value, if we're looking at a vp intrinsic.
- Value *MaybeEVL;
- // The Stride Value, if we're looking at a strided load/store.
- Value *MaybeStride;
-
- InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite,
- class Type *OpType, MaybeAlign Alignment,
- Value *MaybeMask = nullptr,
- Value *MaybeEVL = nullptr,
- Value *MaybeStride = nullptr)
- : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
- MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) {
- const DataLayout &DL = I->getDataLayout();
- TypeStoreSize = DL.getTypeStoreSizeInBits(OpType);
- PtrUse = &I->getOperandUse(OperandNo);
- }
-
- Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); }
-
- Value *getPtr() { return PtrUse->get(); }
-};
-
// Get AddressSanitizer parameters.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
bool IsKasan, uint64_t *ShadowBase,
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 899806bf37348..0921463988a05 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 1ca513214f67c..d9a93ec3ae89f 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -15,6 +15,7 @@
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/PatternMatch.h"
#include <cmath>
#include <optional>
@@ -2703,6 +2704,82 @@ void RISCVTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
BaseT::getPeelingPreferences(L, SE, PP);
}
+bool RISCVTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
+ MemIntrinsicInfo &Info) const {
+ const DataLayout &DL = getDataLayout();
+ Intrinsic::ID IID = Inst->getIntrinsicID();
+ LLVMContext &C = Inst->getContext();
+ bool HasMask = false;
+ switch (IID) {
+ case Intrinsic::riscv_vle_mask:
+ case Intrinsic::riscv_vse_mask:
+ HasMask = true;
+ [[fallthrough]];
+ case Intrinsic::riscv_vle:
+ case Intrinsic::riscv_vse: {
+ // Intrinsic interface:
+ // riscv_vle(merge, ptr, vl)
+ // riscv_vle_mask(merge, ptr, mask, vl, policy)
+ // riscv_vse(val, ptr, vl)
+ // riscv_vse_mask(val, ptr, mask, vl, policy)
+ bool IsWrite = Inst->getType()->isVoidTy();
+ Type *Ty = IsWrite ? Inst->getArgOperand(0)->getType() : Inst->getType();
+ const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IID);
+ unsigned VLIndex = RVVIInfo->VLOperand;
+ unsigned PtrOperandNo = VLIndex - 1 - HasMask;
+ MaybeAlign Alignment =
+ Inst->getArgOperand(PtrOperandNo)->getPointerAlignment(DL);
+ Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C));
+ Value *Mask = ConstantInt::getTrue(MaskType);
+ if (HasMask)
+ Mask = Inst->getArgOperand(VLIndex - 1);
+ Value *EVL = Inst->getArgOperand(VLIndex);
+ Info.Interesting.emplace_back(Inst, PtrOperandNo, IsWrite, Ty, Alignment,
+ Mask, EVL);
+ return true;
+ }
+ case Intrinsic::riscv_vlse_mask:
+ case Intrinsic::riscv_vsse_mask:
+ HasMask = true;
+ [[fallthrough]];
+ case Intrinsic::riscv_vlse:
+ case Intrinsic::riscv_vsse: {
+ // Intrinsic interface:
+ // riscv_vlse(merge, ptr, stride, vl)
+ // riscv_vlse_mask(merge, ptr, stride, mask, vl, policy)
+ // riscv_vsse(val, ptr, stride, vl)
+ // riscv_vsse_mask(val, ptr, stride, mask, vl, policy)
+ bool IsWrite = Inst->getType()->isVoidTy();
+ Type *Ty = IsWrite ? Inst->getArgOperand(0)->getType() : Inst->getType();
+ const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IID);
+ unsigned VLIndex = RVVIInfo->VLOperand;
+ unsigned PtrOperandNo = VLIndex - 2 - HasMask;
+ MaybeAlign Alignment =
+ Inst->getArgOperand(PtrOperandNo)->getPointerAlignment(DL);
+
+ Value *Stride = Inst->getArgOperand(PtrOperandNo + 1);
+ // Use the pointer alignment as the element alignment if the stride is a
+ // multiple of the pointer alignment. Otherwise, the element alignment
+ // should be the greatest common divisor of pointer alignment and stride.
+ // For simplicity, just consider unalignment for elements.
+ unsigned PointerAlign = Alignment.valueOrOne().value();
+ if (!isa<ConstantInt>(Stride) ||
+ cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
+ Alignment = Align(1);
+
+ Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C));
+ Value *Mask = ConstantInt::getTrue(MaskType);
+ if (HasMask)
+ Mask = Inst->getArgOperand(VLIndex - 1);
+ Value *EVL = Inst->getArgOperand(VLIndex);
+ Info.Interesting.emplace_back(Inst, PtrOperandNo, IsWrite, Ty, Alignment,
+ Mask, EVL, Stride);
+ return true;
+ }
+ }
+ return false;
+}
+
unsigned RISCVTTIImpl::getRegUsageForType(Type *Ty) const {
if (Ty->isVectorTy()) {
// f16 with only zvfhmin and bf16 will be promoted to f32
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 6bd7d51daff69..356b15ae7afa3 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -158,6 +158,9 @@ class RISCVTTIImpl final : public BasicTTIImplBase<RISCVTTIImpl> {
void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
TTI::PeelingPreferences &PP) const override;
+ bool getTgtMemIntrinsic(IntrinsicInst *Inst,
+ MemIntrinsicInfo &Info) const override;
+
unsigned getMinVectorRegisterBitWidth() const override {
return ST->useRVVForFixedLengthVectors() ? 16 : 0;
}
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 42c3d4a4f4c46..a9125c4bc47a5 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -29,6 +29,7 @@
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/StackSafetyAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Demangle/Demangle.h"
@@ -803,7 +804,8 @@ struct AddressSanitizer {
bool ignoreAccess(Instruction *Inst, Value *Ptr);
void getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
+ Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting,
+ const TargetTransformInfo *TTI);
void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
InterestingMemoryOperand &O, bool UseCalls,
@@ -843,7 +845,7 @@ struct AddressSanitizer {
void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool suppressInstrumentationSiteForDebug(int &Instrumented);
- bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
+ bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI);
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
void markEscapedLocalAllocas(Function &F);
@@ -1314,7 +1316,8 @@ PreservedAnalyses AddressSanitizerPass::run(Module &M,
Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
Options.UseAfterScope, Options.UseAfterReturn);
const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
- Modified |= FunctionSanitizer.instrumentFunction(F, &TLI);
+ const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
+ Modified |= FunctionSanitizer.instrumentFunction(F, &TLI, &TTI);
}
Modified |= ModuleSanitizer.instrumentModule();
if (!Modified)
@@ -1452,7 +1455,8 @@ bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
}
void AddressSanitizer::getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
+ Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting,
+ const TargetTransformInfo *TTI) {
// Do not instrument the load fetching the dynamic shadow address.
if (LocalDynamicShadow == I)
return;
@@ -1570,6 +1574,12 @@ void AddressSanitizer::getInterestingMemoryOperands(
break;
}
default:
+ if (auto *II = dyn_cast<IntrinsicInst>(I)) {
+ MemIntrinsicInfo IntrInfo;
+ if (TTI->getTgtMemIntrinsic(II, IntrInfo))
+ Interesting = IntrInfo.Interesting;
+ return;
+ }
for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
ignoreAccess(I, CI->getArgOperand(ArgNo)))
@@ -2985,7 +2995,8 @@ bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
}
bool AddressSanitizer::instrumentFunction(Function &F,
- const TargetLibraryInfo *TLI) {
+ const TargetLibraryInfo *TLI,
+ const TargetTransformInfo *TTI) {
bool FunctionModified = false;
// Do not apply any instrumentation for naked functions.
@@ -3038,7 +3049,7 @@ bool AddressSanitizer::instrumentFunction(Function &F,
if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
continue;
SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
- getInterestingMemoryOperands(&Inst, InterestingOperands);
+ getInterestingMemoryOperands(&Inst, InterestingOperands, TTI);
if (!InterestingOperands.empty()) {
for (auto &Operand : InterestingOperands) {
diff --git a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll
new file mode 100644
index 0000000000000..34b88daca4025
--- /dev/null
+++ b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll
@@ -0,0 +1,2304 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=riscv64 -mattr=+v -passes=asan \
+; RUN: -asan-instrumentation-with-call-threshold=0 -S | FileCheck %s
+
+declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ i64)
+define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* align 4 %0, i64 %1) sanitize_address {
+; CHECK-LABEL: @intrinsic_vle_v_nxv1i32_nxv1i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i64 [[TMP1:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP12:%.*]]
+; CHECK: 4:
+; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 [[TMP5]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP4]] ], [ [[IV_NEXT:%.*]], [[TMP11:%.*]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = extractelement <vscale x 1 x i1> splat (i1 true), i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP11]]
+; CHECK: 8:
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP0:%.*]], i64 0, i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP9]] to i64
+; CHECK-NEXT: call void @__asan_load4(i64 [[TMP10]])
+; CHECK-NEXT: br label [[TMP11]]
+; CHECK: 11:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP6]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP12]]
+; CHECK: 12:
+; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.p0.i64(<vscale x 1 x i32> undef, ptr [[TMP0]], i64 [[TMP1]])
+; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
+;
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+ <vscale x 1 x i32> undef,
+ <vscale x 1 x i32>* %0,
+ i64 %1)
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i1>,
+ i64,
+ i64)
+define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* align 4 %1, <vscale x 1 x i1> %2, i64 %3) sanitize_address {
+; CHECK-LABEL: @intrinsic_vle_mask_v_nxv1i32_nxv1i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP14:%.*]]
+; CHECK: 6:
+; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP7]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP6]] ], [ [[IV_NEXT:%.*]], [[TMP13:%.*]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <vscale x 1 x i1> [[TMP2:%.*]], i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP13]]
+; CHECK: 10:
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP1:%.*]], i64 0, i64 [[IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[TMP11]] to i64
+; CHECK-NEXT: call void @__asan_load4(i64 [[TMP12]])
+; CHECK-NEXT: br label [[TMP13]]
+; CHECK: 13:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP8]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP14]]
+; CHECK: 14:
+; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32.p0.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], <vscale x 1 x i1> [[TMP2]], i64 [[TMP3]], i64 1)
+; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
+;
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3, i64 1)
+ ret <vscale x 1 x i32> %a
+}
+
+declare void @llvm.riscv.vse.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ i64)
+define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* align 4 %1, i64 %2) sanitize_address {
+; CHECK-LABEL: @intrinsic_vse_v_nxv1i32_nxv1i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP4]], label [[TMP5:%.*]], label [[TMP13:%.*]]
+; CHECK: 5:
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP6]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP5]] ], [ [[IV_NEXT:%.*]], [[TMP12:%.*]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <vscale x 1 x i1> splat (i1 true), i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP12]]
+; CHECK: 9:
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP1:%.*]], i64 0, i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] t...
[truncated]
|
@llvm/pr-subscribers-llvm-transforms Author: Hank Chang (HankChang736) Changes…d in MemIntrinsicInfo Previously asan considers target intrinsics as black boxes, so asan could not instrument accurate check. This patch make SmallVector<InterestingMemoryOperand> a member of MemIntrinsicInfo so that TTI can make targets describe their intrinsic informations to asan. Note,
Patch is 176.61 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/157863.diff 8 Files Affected:
diff --git a/llvm/include/llvm/Analysis/InterestingMemoryOperand.h b/llvm/include/llvm/Analysis/InterestingMemoryOperand.h
new file mode 100644
index 0000000000000..69960e6f012d4
--- /dev/null
+++ b/llvm/include/llvm/Analysis/InterestingMemoryOperand.h
@@ -0,0 +1,54 @@
+//===- InterestingMemoryOperand.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines InterestingMemoryOperand class that is used when getting
+// the information of a memory reference instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INTERESTINGMEMORYOPERAND_H
+#define LLVM_ANALYSIS_INTERESTINGMEMORYOPERAND_H
+
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/TypeSize.h"
+
+namespace llvm {
+class InterestingMemoryOperand {
+public:
+ Use *PtrUse;
+ bool IsWrite;
+ Type *OpType;
+ TypeSize TypeStoreSize = TypeSize::getFixed(0);
+ MaybeAlign Alignment;
+ // The mask Value, if we're looking at a masked load/store.
+ Value *MaybeMask;
+ // The EVL Value, if we're looking at a vp intrinsic.
+ Value *MaybeEVL;
+ // The Stride Value, if we're looking at a strided load/store.
+ Value *MaybeStride;
+
+ InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite,
+ class Type *OpType, MaybeAlign Alignment,
+ Value *MaybeMask = nullptr,
+ Value *MaybeEVL = nullptr,
+ Value *MaybeStride = nullptr)
+ : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
+ MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) {
+ const DataLayout &DL = I->getDataLayout();
+ TypeStoreSize = DL.getTypeStoreSizeInBits(OpType);
+ PtrUse = &I->getOperandUse(OperandNo);
+ }
+
+ Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); }
+
+ Value *getPtr() { return PtrUse->get(); }
+};
+
+} // namespace llvm
+#endif
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index a5e98bb7bc137..17d03ffb020b9 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -24,6 +24,7 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Analysis/IVDescriptors.h"
+#include "llvm/Analysis/InterestingMemoryOperand.h"
#include "llvm/IR/FMF.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
@@ -87,6 +88,8 @@ struct MemIntrinsicInfo {
bool WriteMem = false;
bool IsVolatile = false;
+ SmallVector<InterestingMemoryOperand, 1> Interesting;
+
bool isUnordered() const {
return (Ordering == AtomicOrdering::NotAtomic ||
Ordering == AtomicOrdering::Unordered) &&
diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
index 4e0e9010b42f0..6d2def3d2b72d 100644
--- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
+++ b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
@@ -14,6 +14,7 @@
#define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H
#include "llvm/Analysis/CFG.h"
+#include "llvm/Analysis/InterestingMemoryOperand.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instruction.h"
@@ -21,38 +22,6 @@
#include "llvm/IR/Module.h"
namespace llvm {
-
-class InterestingMemoryOperand {
-public:
- Use *PtrUse;
- bool IsWrite;
- Type *OpType;
- TypeSize TypeStoreSize = TypeSize::getFixed(0);
- MaybeAlign Alignment;
- // The mask Value, if we're looking at a masked load/store.
- Value *MaybeMask;
- // The EVL Value, if we're looking at a vp intrinsic.
- Value *MaybeEVL;
- // The Stride Value, if we're looking at a strided load/store.
- Value *MaybeStride;
-
- InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite,
- class Type *OpType, MaybeAlign Alignment,
- Value *MaybeMask = nullptr,
- Value *MaybeEVL = nullptr,
- Value *MaybeStride = nullptr)
- : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
- MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) {
- const DataLayout &DL = I->getDataLayout();
- TypeStoreSize = DL.getTypeStoreSizeInBits(OpType);
- PtrUse = &I->getOperandUse(OperandNo);
- }
-
- Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); }
-
- Value *getPtr() { return PtrUse->get(); }
-};
-
// Get AddressSanitizer parameters.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
bool IsKasan, uint64_t *ShadowBase,
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 899806bf37348..0921463988a05 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 1ca513214f67c..d9a93ec3ae89f 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -15,6 +15,7 @@
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/PatternMatch.h"
#include <cmath>
#include <optional>
@@ -2703,6 +2704,82 @@ void RISCVTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
BaseT::getPeelingPreferences(L, SE, PP);
}
+bool RISCVTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
+ MemIntrinsicInfo &Info) const {
+ const DataLayout &DL = getDataLayout();
+ Intrinsic::ID IID = Inst->getIntrinsicID();
+ LLVMContext &C = Inst->getContext();
+ bool HasMask = false;
+ switch (IID) {
+ case Intrinsic::riscv_vle_mask:
+ case Intrinsic::riscv_vse_mask:
+ HasMask = true;
+ [[fallthrough]];
+ case Intrinsic::riscv_vle:
+ case Intrinsic::riscv_vse: {
+ // Intrinsic interface:
+ // riscv_vle(merge, ptr, vl)
+ // riscv_vle_mask(merge, ptr, mask, vl, policy)
+ // riscv_vse(val, ptr, vl)
+ // riscv_vse_mask(val, ptr, mask, vl, policy)
+ bool IsWrite = Inst->getType()->isVoidTy();
+ Type *Ty = IsWrite ? Inst->getArgOperand(0)->getType() : Inst->getType();
+ const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IID);
+ unsigned VLIndex = RVVIInfo->VLOperand;
+ unsigned PtrOperandNo = VLIndex - 1 - HasMask;
+ MaybeAlign Alignment =
+ Inst->getArgOperand(PtrOperandNo)->getPointerAlignment(DL);
+ Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C));
+ Value *Mask = ConstantInt::getTrue(MaskType);
+ if (HasMask)
+ Mask = Inst->getArgOperand(VLIndex - 1);
+ Value *EVL = Inst->getArgOperand(VLIndex);
+ Info.Interesting.emplace_back(Inst, PtrOperandNo, IsWrite, Ty, Alignment,
+ Mask, EVL);
+ return true;
+ }
+ case Intrinsic::riscv_vlse_mask:
+ case Intrinsic::riscv_vsse_mask:
+ HasMask = true;
+ [[fallthrough]];
+ case Intrinsic::riscv_vlse:
+ case Intrinsic::riscv_vsse: {
+ // Intrinsic interface:
+ // riscv_vlse(merge, ptr, stride, vl)
+ // riscv_vlse_mask(merge, ptr, stride, mask, vl, policy)
+ // riscv_vsse(val, ptr, stride, vl)
+ // riscv_vsse_mask(val, ptr, stride, mask, vl, policy)
+ bool IsWrite = Inst->getType()->isVoidTy();
+ Type *Ty = IsWrite ? Inst->getArgOperand(0)->getType() : Inst->getType();
+ const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IID);
+ unsigned VLIndex = RVVIInfo->VLOperand;
+ unsigned PtrOperandNo = VLIndex - 2 - HasMask;
+ MaybeAlign Alignment =
+ Inst->getArgOperand(PtrOperandNo)->getPointerAlignment(DL);
+
+ Value *Stride = Inst->getArgOperand(PtrOperandNo + 1);
+ // Use the pointer alignment as the element alignment if the stride is a
+ // multiple of the pointer alignment. Otherwise, the element alignment
+ // should be the greatest common divisor of pointer alignment and stride.
+ // For simplicity, just consider unalignment for elements.
+ unsigned PointerAlign = Alignment.valueOrOne().value();
+ if (!isa<ConstantInt>(Stride) ||
+ cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
+ Alignment = Align(1);
+
+ Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C));
+ Value *Mask = ConstantInt::getTrue(MaskType);
+ if (HasMask)
+ Mask = Inst->getArgOperand(VLIndex - 1);
+ Value *EVL = Inst->getArgOperand(VLIndex);
+ Info.Interesting.emplace_back(Inst, PtrOperandNo, IsWrite, Ty, Alignment,
+ Mask, EVL, Stride);
+ return true;
+ }
+ }
+ return false;
+}
+
unsigned RISCVTTIImpl::getRegUsageForType(Type *Ty) const {
if (Ty->isVectorTy()) {
// f16 with only zvfhmin and bf16 will be promoted to f32
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 6bd7d51daff69..356b15ae7afa3 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -158,6 +158,9 @@ class RISCVTTIImpl final : public BasicTTIImplBase<RISCVTTIImpl> {
void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
TTI::PeelingPreferences &PP) const override;
+ bool getTgtMemIntrinsic(IntrinsicInst *Inst,
+ MemIntrinsicInfo &Info) const override;
+
unsigned getMinVectorRegisterBitWidth() const override {
return ST->useRVVForFixedLengthVectors() ? 16 : 0;
}
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 42c3d4a4f4c46..a9125c4bc47a5 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -29,6 +29,7 @@
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/StackSafetyAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Demangle/Demangle.h"
@@ -803,7 +804,8 @@ struct AddressSanitizer {
bool ignoreAccess(Instruction *Inst, Value *Ptr);
void getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
+ Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting,
+ const TargetTransformInfo *TTI);
void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
InterestingMemoryOperand &O, bool UseCalls,
@@ -843,7 +845,7 @@ struct AddressSanitizer {
void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool suppressInstrumentationSiteForDebug(int &Instrumented);
- bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
+ bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI);
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
void markEscapedLocalAllocas(Function &F);
@@ -1314,7 +1316,8 @@ PreservedAnalyses AddressSanitizerPass::run(Module &M,
Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
Options.UseAfterScope, Options.UseAfterReturn);
const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
- Modified |= FunctionSanitizer.instrumentFunction(F, &TLI);
+ const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
+ Modified |= FunctionSanitizer.instrumentFunction(F, &TLI, &TTI);
}
Modified |= ModuleSanitizer.instrumentModule();
if (!Modified)
@@ -1452,7 +1455,8 @@ bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
}
void AddressSanitizer::getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
+ Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting,
+ const TargetTransformInfo *TTI) {
// Do not instrument the load fetching the dynamic shadow address.
if (LocalDynamicShadow == I)
return;
@@ -1570,6 +1574,12 @@ void AddressSanitizer::getInterestingMemoryOperands(
break;
}
default:
+ if (auto *II = dyn_cast<IntrinsicInst>(I)) {
+ MemIntrinsicInfo IntrInfo;
+ if (TTI->getTgtMemIntrinsic(II, IntrInfo))
+ Interesting = IntrInfo.Interesting;
+ return;
+ }
for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
ignoreAccess(I, CI->getArgOperand(ArgNo)))
@@ -2985,7 +2995,8 @@ bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
}
bool AddressSanitizer::instrumentFunction(Function &F,
- const TargetLibraryInfo *TLI) {
+ const TargetLibraryInfo *TLI,
+ const TargetTransformInfo *TTI) {
bool FunctionModified = false;
// Do not apply any instrumentation for naked functions.
@@ -3038,7 +3049,7 @@ bool AddressSanitizer::instrumentFunction(Function &F,
if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
continue;
SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
- getInterestingMemoryOperands(&Inst, InterestingOperands);
+ getInterestingMemoryOperands(&Inst, InterestingOperands, TTI);
if (!InterestingOperands.empty()) {
for (auto &Operand : InterestingOperands) {
diff --git a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll
new file mode 100644
index 0000000000000..34b88daca4025
--- /dev/null
+++ b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll
@@ -0,0 +1,2304 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=riscv64 -mattr=+v -passes=asan \
+; RUN: -asan-instrumentation-with-call-threshold=0 -S | FileCheck %s
+
+declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ i64)
+define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* align 4 %0, i64 %1) sanitize_address {
+; CHECK-LABEL: @intrinsic_vle_v_nxv1i32_nxv1i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i64 [[TMP1:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP12:%.*]]
+; CHECK: 4:
+; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 [[TMP5]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP4]] ], [ [[IV_NEXT:%.*]], [[TMP11:%.*]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = extractelement <vscale x 1 x i1> splat (i1 true), i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP11]]
+; CHECK: 8:
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP0:%.*]], i64 0, i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP9]] to i64
+; CHECK-NEXT: call void @__asan_load4(i64 [[TMP10]])
+; CHECK-NEXT: br label [[TMP11]]
+; CHECK: 11:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP6]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP12]]
+; CHECK: 12:
+; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.p0.i64(<vscale x 1 x i32> undef, ptr [[TMP0]], i64 [[TMP1]])
+; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
+;
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+ <vscale x 1 x i32> undef,
+ <vscale x 1 x i32>* %0,
+ i64 %1)
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i1>,
+ i64,
+ i64)
+define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* align 4 %1, <vscale x 1 x i1> %2, i64 %3) sanitize_address {
+; CHECK-LABEL: @intrinsic_vle_mask_v_nxv1i32_nxv1i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP14:%.*]]
+; CHECK: 6:
+; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP7]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP6]] ], [ [[IV_NEXT:%.*]], [[TMP13:%.*]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <vscale x 1 x i1> [[TMP2:%.*]], i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP13]]
+; CHECK: 10:
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP1:%.*]], i64 0, i64 [[IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[TMP11]] to i64
+; CHECK-NEXT: call void @__asan_load4(i64 [[TMP12]])
+; CHECK-NEXT: br label [[TMP13]]
+; CHECK: 13:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP8]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP14]]
+; CHECK: 14:
+; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32.p0.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], <vscale x 1 x i1> [[TMP2]], i64 [[TMP3]], i64 1)
+; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
+;
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3, i64 1)
+ ret <vscale x 1 x i32> %a
+}
+
+declare void @llvm.riscv.vse.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ i64)
+define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* align 4 %1, i64 %2) sanitize_address {
+; CHECK-LABEL: @intrinsic_vse_v_nxv1i32_nxv1i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP4]], label [[TMP5:%.*]], label [[TMP13:%.*]]
+; CHECK: 5:
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP6]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP5]] ], [ [[IV_NEXT:%.*]], [[TMP12:%.*]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <vscale x 1 x i1> splat (i1 true), i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP12]]
+; CHECK: 9:
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP1:%.*]], i64 0, i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] t...
[truncated]
|
You can test this locally with the following command:git-clang-format --diff origin/main HEAD --extensions cpp,h -- llvm/include/llvm/Analysis/InterestingMemoryOperand.h llvm/include/llvm/Analysis/TargetTransformInfo.h llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h llvm/lib/Analysis/TargetTransformInfo.cpp llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
View the diff from clang-format here.diff --git a/llvm/include/llvm/Analysis/InterestingMemoryOperand.h b/llvm/include/llvm/Analysis/InterestingMemoryOperand.h
index 7a3815544..abcf9a841 100644
--- a/llvm/include/llvm/Analysis/InterestingMemoryOperand.h
+++ b/llvm/include/llvm/Analysis/InterestingMemoryOperand.h
@@ -52,4 +52,4 @@ public:
} // namespace llvm
-#endif // LLVM_ANALYSIS_INTERESTINGMEMORYOPERAND_H
+#endif // LLVM_ANALYSIS_INTERESTINGMEMORYOPERAND_H
|
You can test this locally with the following command:git diff -U0 --pickaxe-regex -S '([^a-zA-Z0-9#_-]undef[^a-zA-Z0-9_-]|UndefValue::get)' 'HEAD~1' HEAD llvm/include/llvm/Analysis/InterestingMemoryOperand.h llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll llvm/include/llvm/Analysis/TargetTransformInfo.h llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h llvm/lib/Analysis/TargetTransformInfo.cpp llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp The following files introduce new uses of undef:
Undef is now deprecated and should only be used in the rare cases where no replacement is possible. For example, a load of uninitialized memory yields In tests, avoid using For example, this is considered a bad practice: define void @fn() {
...
br i1 undef, ...
} Please use the following instead: define void @fn(i1 %cond) {
...
br i1 %cond, ...
} Please refer to the Undefined Behavior Manual for more information. |
98d58c4
to
06e9ef8
Compare
Does this replace #135198? |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
lgtm
…d in MemIntrinsicInfo Previously asan considers target intrinsics as black boxes, so asan could not instrument accurate check. This patch make SmallVector<InterestingMemoryOperand> a member of MemIntrinsicInfo so that TTI can make targets describe their intrinsic informations to asan. Note, 1. This patch move InterestingMemoryOperand from Transforms to Analysis. 2. Extend MemIntrinsicInfo by adding a SmallVector<InterestingMemoryOperand> member. 3. This patch does not support RVV indexed/segment load/store.
c25adb6
to
660ae74
Compare
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/144/builds/35844 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/10/builds/13801 Here is the relevant piece of the build log for the reference
|
reverted because it broke the buildbots. also should have been more clear: please fix the linter complaint |
…o Analysis and embed in MemIntrinsicInfo" (#159700) Reverts llvm/llvm-project#157863
It seems that the fail case all contains an error message, Maybe add |
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/160/builds/25176 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/180/builds/25316 Here is the relevant piece of the build log for the reference
|
…and embed in MemIntrinsicInfo (llvm#157863) Previously asan considers target intrinsics as black boxes, so asan could not instrument accurate check. This patch make SmallVector<InterestingMemoryOperand> a member of MemIntrinsicInfo so that TTI can make targets describe their intrinsic informations to asan. Note, 1. This patch move InterestingMemoryOperand from Transforms to Analysis. 2. Extend MemIntrinsicInfo by adding a SmallVector<InterestingMemoryOperand> member. 3. This patch does not support RVV indexed/segment load/store.
…nd embed in MemIntrinsicInfo #157863 (#159713) [Previously reverted due to failures on asan-rvv-intrinsics.ll, the test case is riscv only and it is triggered by other target] Reland [#157863](#157863), and add `; REQUIRES: riscv-registered-target` in test case to skip the configuration that doesn't register riscv target. Previously asan considers target intrinsics as black boxes, so asan could not instrument accurate check. This patch make SmallVector<InterestingMemoryOperand> a member of MemIntrinsicInfo so that TTI can make targets describe their intrinsic informations to asan. Note, 1. This patch move InterestingMemoryOperand from Transforms to Analysis. 2. Extend MemIntrinsicInfo by adding a SmallVector<InterestingMemoryOperand> member. 3. This patch does not support RVV indexed/segment load/store.
… Analysis and embed in MemIntrinsicInfo #157863 (#159713) [Previously reverted due to failures on asan-rvv-intrinsics.ll, the test case is riscv only and it is triggered by other target] Reland [#157863](llvm/llvm-project#157863), and add `; REQUIRES: riscv-registered-target` in test case to skip the configuration that doesn't register riscv target. Previously asan considers target intrinsics as black boxes, so asan could not instrument accurate check. This patch make SmallVector<InterestingMemoryOperand> a member of MemIntrinsicInfo so that TTI can make targets describe their intrinsic informations to asan. Note, 1. This patch move InterestingMemoryOperand from Transforms to Analysis. 2. Extend MemIntrinsicInfo by adding a SmallVector<InterestingMemoryOperand> member. 3. This patch does not support RVV indexed/segment load/store.
…d in MemIntrinsicInfo (llvm#157863) Previously asan considers target intrinsics as black boxes, so asan could not instrument accurate check. This patch make SmallVector<InterestingMemoryOperand> a member of MemIntrinsicInfo so that TTI can make targets describe their intrinsic informations to asan. Note, 1. This patch move InterestingMemoryOperand from Transforms to Analysis. 2. Extend MemIntrinsicInfo by adding a SmallVector<InterestingMemoryOperand> member. 3. This patch does not support RVV indexed/segment load/store.
…and embed in MemIntrinsicInfo" (llvm#159700) Reverts llvm#157863
…nd embed in MemIntrinsicInfo llvm#157863 (llvm#159713) [Previously reverted due to failures on asan-rvv-intrinsics.ll, the test case is riscv only and it is triggered by other target] Reland [llvm#157863](llvm#157863), and add `; REQUIRES: riscv-registered-target` in test case to skip the configuration that doesn't register riscv target. Previously asan considers target intrinsics as black boxes, so asan could not instrument accurate check. This patch make SmallVector<InterestingMemoryOperand> a member of MemIntrinsicInfo so that TTI can make targets describe their intrinsic informations to asan. Note, 1. This patch move InterestingMemoryOperand from Transforms to Analysis. 2. Extend MemIntrinsicInfo by adding a SmallVector<InterestingMemoryOperand> member. 3. This patch does not support RVV indexed/segment load/store.
Previously asan considers target intrinsics as black boxes, so asan could not instrument accurate check. This patch make SmallVector a member of MemIntrinsicInfo so that TTI can make targets describe their intrinsic informations to asan.
Note,