-
Notifications
You must be signed in to change notification settings - Fork 15.4k
[Clang][counted_by] Correct signed counted_by values #171260
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
If the 'counted_by' value is signed, we will incorrectly allow accesses when the value is negative. This has obvious bad effects as it will allow accessing a huge swath of unallocated memory. Also clarify and rearrange the parameters to make them more perspicuous. Fixes: 170987
|
@llvm/pr-subscribers-clang @llvm/pr-subscribers-clang-codegen Author: Bill Wendling (bwendling) ChangesIf the 'counted_by' value is signed, we will incorrectly allow accesses Also clarify and rearrange the parameters to make them more Fixes: 170987 Patch is 135.78 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/171260.diff 5 Files Affected:
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 3bde8e1fa2ac3..31d6948f08ad5 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -1235,43 +1235,65 @@ llvm::Value *CodeGenFunction::EmitLoadOfCountedByField(
return nullptr;
}
-void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
- llvm::Value *Index, QualType IndexType,
+void CodeGenFunction::EmitBoundsCheck(const Expr *ArrayExpr,
+ const Expr *ArrayExprBase,
+ llvm::Value *IndexVal, QualType IndexType,
bool Accessed) {
assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
"should not be called unless adding bounds checks");
const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
getLangOpts().getStrictFlexArraysLevel();
- QualType IndexedType;
- llvm::Value *Bound =
- getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
+ QualType ArrayExprBaseType;
+ llvm::Value *BoundsVal = getArrayIndexingBound(
+ *this, ArrayExprBase, ArrayExprBaseType, StrictFlexArraysLevel);
- EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
+ EmitBoundsCheckImpl(ArrayExpr, ArrayExprBaseType, IndexVal, IndexType,
+ BoundsVal, getContext().getSizeType(), Accessed);
}
-void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
- llvm::Value *Index,
+void CodeGenFunction::EmitBoundsCheckImpl(const Expr *ArrayExpr,
+ QualType ArrayBaseType,
+ llvm::Value *IndexVal,
QualType IndexType,
- QualType IndexedType, bool Accessed) {
- if (!Bound)
+ llvm::Value *BoundsVal,
+ QualType BoundsType, bool Accessed) {
+ if (!BoundsVal)
return;
auto CheckKind = SanitizerKind::SO_ArrayBounds;
auto CheckHandler = SanitizerHandler::OutOfBounds;
SanitizerDebugLocation SanScope(this, {CheckKind}, CheckHandler);
+ // All hail the C implicit type conversion rules!!!
bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
- llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
- llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
+ bool BoundsSigned = BoundsType->isSignedIntegerOrEnumerationType();
+
+ const ASTContext &Ctx = getContext();
+ llvm::Type *Ty = ConvertType(
+ Ctx.getTypeSize(IndexType) >= Ctx.getTypeSize(BoundsType) ? IndexType
+ : BoundsType);
+
+ llvm::Value *IndexInst = Builder.CreateIntCast(IndexVal, Ty, IndexSigned);
+ llvm::Value *BoundsInst = Builder.CreateIntCast(BoundsVal, Ty, false);
llvm::Constant *StaticData[] = {
- EmitCheckSourceLocation(E->getExprLoc()),
- EmitCheckTypeDescriptor(IndexedType),
- EmitCheckTypeDescriptor(IndexType)
+ EmitCheckSourceLocation(ArrayExpr->getExprLoc()),
+ EmitCheckTypeDescriptor(ArrayBaseType),
+ EmitCheckTypeDescriptor(IndexType),
};
- llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
- : Builder.CreateICmpULE(IndexVal, BoundVal);
- EmitCheck(std::make_pair(Check, CheckKind), CheckHandler, StaticData, Index);
+
+ llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexInst, BoundsInst)
+ : Builder.CreateICmpULE(IndexInst, BoundsInst);
+
+ if (BoundsSigned) {
+ // Don't allow a negative bounds.
+ llvm::Value *Cmp = Builder.CreateICmpSGT(
+ BoundsVal, llvm::ConstantInt::get(BoundsVal->getType(), 0));
+ Check = Builder.CreateAnd(Cmp, Check);
+ }
+
+ EmitCheck(std::make_pair(Check, CheckKind), CheckHandler, StaticData,
+ IndexInst);
}
llvm::MDNode *CodeGenFunction::buildAllocToken(QualType AllocType) {
@@ -4608,9 +4630,10 @@ static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
/// i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
/// similar to emit the correct GEP.
void CodeGenFunction::EmitCountedByBoundsChecking(
- const Expr *E, llvm::Value *Idx, Address Addr, QualType IdxTy,
- QualType ArrayTy, bool Accessed, bool FlexibleArray) {
- const auto *ME = dyn_cast<MemberExpr>(E->IgnoreImpCasts());
+ const Expr *ArrayExpr, QualType ArrayType, Address ArrayInst,
+ QualType IndexType, llvm::Value *IndexVal, bool Accessed,
+ bool FlexibleArray) {
+ const auto *ME = dyn_cast<MemberExpr>(ArrayExpr->IgnoreImpCasts());
if (!ME || !ME->getMemberDecl()->getType()->isCountAttributedType())
return;
@@ -4627,11 +4650,11 @@ void CodeGenFunction::EmitCountedByBoundsChecking(
if (std::optional<int64_t> Diff =
getOffsetDifferenceInBits(*this, CountFD, FD)) {
- if (!Addr.isValid()) {
+ if (!ArrayInst.isValid()) {
// An invalid Address indicates we're checking a pointer array access.
// Emit the checked L-Value here.
- LValue LV = EmitCheckedLValue(E, TCK_MemberAccess);
- Addr = LV.getAddress();
+ LValue LV = EmitCheckedLValue(ArrayExpr, TCK_MemberAccess);
+ ArrayInst = LV.getAddress();
}
// FIXME: The 'static_cast' is necessary, otherwise the result turns into a
@@ -4640,17 +4663,19 @@ void CodeGenFunction::EmitCountedByBoundsChecking(
// Create a GEP with the byte offset between the counted object and the
// count and use that to load the count value.
- Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Int8PtrTy, Int8Ty);
+ ArrayInst = Builder.CreatePointerBitCastOrAddrSpaceCast(ArrayInst,
+ Int8PtrTy, Int8Ty);
- llvm::Type *CountTy = ConvertType(CountFD->getType());
- llvm::Value *Res =
- Builder.CreateInBoundsGEP(Int8Ty, Addr.emitRawPointer(*this),
+ llvm::Type *BoundsType = ConvertType(CountFD->getType());
+ llvm::Value *BoundsVal =
+ Builder.CreateInBoundsGEP(Int8Ty, ArrayInst.emitRawPointer(*this),
Builder.getInt32(*Diff), ".counted_by.gep");
- Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
- ".counted_by.load");
+ BoundsVal = Builder.CreateAlignedLoad(BoundsType, BoundsVal, getIntAlign(),
+ ".counted_by.load");
// Now emit the bounds checking.
- EmitBoundsCheckImpl(E, Res, Idx, IdxTy, ArrayTy, Accessed);
+ EmitBoundsCheckImpl(ArrayExpr, ArrayType, IndexVal, IndexType, BoundsVal,
+ CountFD->getType(), Accessed);
}
}
@@ -4796,9 +4821,9 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
auto *Idx = EmitIdxAfterBase(/*Promote*/true);
if (SanOpts.has(SanitizerKind::ArrayBounds))
- EmitCountedByBoundsChecking(Array, Idx, ArrayLV.getAddress(),
- E->getIdx()->getType(), Array->getType(),
- Accessed, /*FlexibleArray=*/true);
+ EmitCountedByBoundsChecking(Array, Array->getType(), ArrayLV.getAddress(),
+ E->getIdx()->getType(), Idx, Accessed,
+ /*FlexibleArray=*/true);
// Propagate the alignment from the array itself to the result.
QualType arrayType = Array->getType();
@@ -4850,8 +4875,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
if (const auto *CE = dyn_cast_if_present<CastExpr>(Base);
CE && CE->getCastKind() == CK_LValueToRValue)
- EmitCountedByBoundsChecking(CE, Idx, Address::invalid(),
- E->getIdx()->getType(), ptrType, Accessed,
+ EmitCountedByBoundsChecking(CE, ptrType, Address::invalid(),
+ E->getIdx()->getType(), Idx, Accessed,
/*FlexibleArray=*/false);
}
}
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 8c4c1c8c2dc95..664ee1547ccf1 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -3339,11 +3339,12 @@ class CodeGenFunction : public CodeGenTypeCache {
/// Emit a check that \p Base points into an array object, which
/// we can access at index \p Index. \p Accessed should be \c false if we
/// this expression is used as an lvalue, for instance in "&Arr[Idx]".
- void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
- QualType IndexType, bool Accessed);
- void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
- llvm::Value *Index, QualType IndexType,
- QualType IndexedType, bool Accessed);
+ void EmitBoundsCheck(const Expr *ArrayExpr, const Expr *ArrayExprBase,
+ llvm::Value *Index, QualType IndexType, bool Accessed);
+ void EmitBoundsCheckImpl(const Expr *ArrayExpr, QualType ArrayBaseType,
+ llvm::Value *IndexVal, QualType IndexType,
+ llvm::Value *BoundsVal, QualType BoundsType,
+ bool Accessed);
/// Returns debug info, with additional annotation if
/// CGM.getCodeGenOpts().SanitizeAnnotateDebugInfo[Ordinal] is enabled for
@@ -3372,9 +3373,9 @@ class CodeGenFunction : public CodeGenTypeCache {
// Emit bounds checking for flexible array and pointer members with the
// counted_by attribute.
- void EmitCountedByBoundsChecking(const Expr *E, llvm::Value *Idx,
- Address Addr, QualType IdxTy,
- QualType ArrayTy, bool Accessed,
+ void EmitCountedByBoundsChecking(const Expr *ArrayExpr, QualType ArrayType,
+ Address ArrayInst, QualType IndexType,
+ llvm::Value *IndexVal, bool Accessed,
bool FlexibleArray);
llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
diff --git a/clang/test/CodeGen/attr-counted-by-for-pointers.c b/clang/test/CodeGen/attr-counted-by-for-pointers.c
index c5729fd017d8c..7b0be04b51a30 100644
--- a/clang/test/CodeGen/attr-counted-by-for-pointers.c
+++ b/clang/test/CodeGen/attr-counted-by-for-pointers.c
@@ -30,19 +30,21 @@ struct annotated_ptr {
// SANITIZE-WITH-ATTR-LABEL: define dso_local void @test1(
// SANITIZE-WITH-ATTR-SAME: ptr noundef [[P:%.*]], i32 noundef [[INDEX:%.*]], ptr noundef [[VALUE:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
// SANITIZE-WITH-ATTR-NEXT: [[ENTRY:.*:]]
-// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 8
-// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META6:![0-9]+]]
-// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp ult i64 [[IDXPROM]], [[TMP0]], !nosanitize [[META6]]
-// SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP1]], label %[[CONT10:.*]], label %[[HANDLER_OUT_OF_BOUNDS:.*]], !prof [[PROF7:![0-9]+]], !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = icmp ult i32 [[INDEX]], [[DOTCOUNTED_BY_LOAD]], !nosanitize [[META6:![0-9]+]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[DOTCOUNTED_BY_LOAD]], 0, !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[TMP0]], !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP2]], label %[[CONT10:.*]], label %[[HANDLER_OUT_OF_BOUNDS:.*]], !prof [[PROF7:![0-9]+]], !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[HANDLER_OUT_OF_BOUNDS]]:
-// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB2:[0-9]+]], i64 [[IDXPROM]]) #[[ATTR3:[0-9]+]], !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP3:%.*]] = zext i32 [[INDEX]] to i64, !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB2:[0-9]+]], i64 [[TMP3]]) #[[ATTR3:[0-9]+]], !nosanitize [[META6]]
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[CONT10]]:
// SANITIZE-WITH-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
-// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA8:![0-9]+]]
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP4:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA8:![0-9]+]]
+// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP4]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA14:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -83,19 +85,21 @@ void test1(struct annotated_ptr *p, int index, struct foo *value) {
// SANITIZE-WITH-ATTR-LABEL: define dso_local void @test2(
// SANITIZE-WITH-ATTR-SAME: ptr noundef [[P:%.*]], i32 noundef [[INDEX:%.*]], ptr noundef [[VALUE:%.*]]) local_unnamed_addr #[[ATTR0]] {
// SANITIZE-WITH-ATTR-NEXT: [[ENTRY:.*:]]
-// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 8
-// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META6]]
-// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp ult i64 [[IDXPROM]], [[TMP0]], !nosanitize [[META6]]
-// SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP1]], label %[[CONT10:.*]], label %[[HANDLER_OUT_OF_BOUNDS:.*]], !prof [[PROF7]], !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = icmp ult i32 [[INDEX]], [[DOTCOUNTED_BY_LOAD]], !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[DOTCOUNTED_BY_LOAD]], 0, !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[TMP0]], !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP2]], label %[[CONT10:.*]], label %[[HANDLER_OUT_OF_BOUNDS:.*]], !prof [[PROF7]], !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[HANDLER_OUT_OF_BOUNDS]]:
-// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB4:[0-9]+]], i64 [[IDXPROM]]) #[[ATTR3]], !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP3:%.*]] = zext i32 [[INDEX]] to i64, !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB4:[0-9]+]], i64 [[TMP3]]) #[[ATTR3]], !nosanitize [[META6]]
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[CONT10]]:
// SANITIZE-WITH-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
-// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA8]]
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP4:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA8]]
+// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP4]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA14]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -136,19 +140,21 @@ void test2(struct annotated_ptr *p, int index, struct foo *value) {
// SANITIZE-WITH-ATTR-LABEL: define dso_local void @test3(
// SANITIZE-WITH-ATTR-SAME: ptr noundef [[P:%.*]], i32 noundef [[INDEX:%.*]], ptr noundef [[VALUE:%.*]]) local_unnamed_addr #[[ATTR0]] {
// SANITIZE-WITH-ATTR-NEXT: [[ENTRY:.*:]]
-// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 8
-// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META6]]
-// SANITIZE-WITH-ATTR-NEXT: [[DOTNOT:%.*]] = icmp ugt i64 [[IDXPROM]], [[TMP0]], !nosanitize [[META6]]
-// SANITIZE-WITH-ATTR-NEXT: br i1 [[DOTNOT]], label %[[HANDLER_OUT_OF_BOUNDS:.*]], label %[[CONT10:.*]], !prof [[PROF16:![0-9]+]], !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = icmp ule i32 [[INDEX]], [[DOTCOUNTED_BY_LOAD]], !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[DOTCOUNTED_BY_LOAD]], 0, !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[TMP0]], !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP2]], label %[[CONT10:.*]], label %[[HANDLER_OUT_OF_BOUNDS:.*]], !prof [[PROF7]], !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[HANDLER_OUT_OF_BOUNDS]]:
-// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB5:[0-9]+]], i64 [[IDXPROM]]) #[[ATTR3]], !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP3:%.*]] = zext i32 [[INDEX]] to i64, !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB5:[0-9]+]], i64 [[TMP3]]) #[[ATTR3]], !nosanitize [[META6]]
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META6]]
// SANITIZE-WITH-ATTR: [[CONT10]]:
// SANITIZE-WITH-ATTR-NEXT: [[BUF:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
-// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA8]]
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP1]], i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP4:%.*]] = load ptr, ptr [[BUF]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA8]]
+// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP4]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store ptr [[VALUE]], ptr [[ARRAYIDX]], align 8, !tbaa [[_ZTS3FOOPTR_TBAA14]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -258,24 +264,25 @@ size_t test5(struct annotated_ptr *p, int index) {
return __bdos((struct foo **)((char *)p->buf));
}
-// SANITIZE-WITH-ATTR-LABEL: define dso_local range(i64 0, 17179869177) i64 @test6(
+// SANITIZE-WITH-ATTR-LABEL: define dso_local range(i64 -17179869168, 34359738361) i64 @test6(
// SANITIZE-WITH-ATTR-SAME: ptr noundef [[P:%.*]], i32 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// SANITIZE-WITH-ATTR-NEXT: [[ENTRY:.*:]]
-// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 4
-// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META6]]
-// SANITIZE-WITH-ATTR-NEXT: [[DOTNOT:%.*]] = icmp ugt i64 [[IDXPROM]], [[TMP0]], !nosanitize [[META6]]
-// SANITIZE-WITH-ATTR-NEXT: br i1 [[DOTNOT]], label %[[HANDLER_OUT_OF_BOUNDS:.*]], label %[[CONT8:.*]], !prof [[PROF16]], !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = icmp ule i32 [[INDEX]], [[DOTCOUNTED_BY_LOAD]], !nosanitize [[META6]]
+// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[DOTCOUNTED_BY_LOAD]], 0, !nosanitize [[META6]]
+// SANITIZE-WIT...
[truncated]
|
|
This lacks new testcases. However I think that's okay, because the current tests generate the correct code generation. If you have suggestions for unit tests, please let me know. |
antoniofrighetto
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LG on my side from the tests update, please wait for an additional look.
|
I think we need tests for having the counter set to, e.g. |
If the 'counted_by' value is signed, we will incorrectly allow accesses
when the value is negative. This has obvious bad effects as it will
allow accessing a huge swath of unallocated memory.
Also clarify and rearrange the parameters to make them more
perspicuous.
Fixes: #170987.