diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerBinaryMetadata.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerBinaryMetadata.cpp index 142b9c38e5fcb3..26e4d53c348090 100644 --- a/llvm/lib/Transforms/Instrumentation/SanitizerBinaryMetadata.cpp +++ b/llvm/lib/Transforms/Instrumentation/SanitizerBinaryMetadata.cpp @@ -33,6 +33,7 @@ #include "llvm/IR/Value.h" #include "llvm/InitializePasses.h" #include "llvm/Pass.h" +#include "llvm/ProfileData/InstrProf.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Transforms/Instrumentation.h" @@ -170,6 +171,9 @@ class SanitizerBinaryMetadata { // Returns the section end marker name. Twine getSectionEnd(StringRef SectionSuffix); + // Returns true if the access to the address should be considered "atomic". + bool pretendAtomicAccess(Value *Addr); + Module &Mod; const SanitizerBinaryMetadataOptions Options; const Triple TargetTriple; @@ -338,6 +342,29 @@ bool useAfterReturnUnsafe(Instruction &I) { return false; } +bool SanitizerBinaryMetadata::pretendAtomicAccess(Value *Addr) { + assert(Addr && "Expected non-null Addr"); + + Addr = Addr->stripInBoundsOffsets(); + auto *GV = dyn_cast(Addr); + if (!GV) + return false; + + if (GV->hasSection()) { + const auto OF = Triple(Mod.getTargetTriple()).getObjectFormat(); + const auto ProfSec = + getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false); + if (GV->getSection().endswith(ProfSec)) + return true; + } + + if (GV->getName().startswith("__llvm_gcov") || + GV->getName().startswith("__llvm_gcda")) + return true; + + return false; +} + bool SanitizerBinaryMetadata::runOn(Instruction &I, MetadataInfoSet &MIS, MDBuilder &MDB, uint32_t &FeatureMask) { SmallVector InstMetadata; @@ -350,7 +377,21 @@ bool SanitizerBinaryMetadata::runOn(Instruction &I, MetadataInfoSet &MIS, if (Options.Atomics && I.mayReadOrWriteMemory()) { auto SSID = getAtomicSyncScopeID(&I); - if (SSID.has_value() && *SSID != SyncScope::SingleThread) { + bool IsAtomic = SSID.has_value() && *SSID != SyncScope::SingleThread; + + if (!IsAtomic) { + // Check to pretend some compiler-generated accesses are atomic, to avoid + // false positives in data-race analysis. + Value *Addr = nullptr; + if (auto *SI = dyn_cast(&I)) + Addr = SI->getPointerOperand(); + else if (auto *LI = dyn_cast(&I)) + Addr = LI->getPointerOperand(); + if (Addr) + IsAtomic = pretendAtomicAccess(Addr); + } + + if (IsAtomic) { NumMetadataAtomics++; InstMetadata.push_back(&MetadataInfo::Atomics); } diff --git a/llvm/test/Instrumentation/SanitizerBinaryMetadata/pretend-atomic-access.ll b/llvm/test/Instrumentation/SanitizerBinaryMetadata/pretend-atomic-access.ll new file mode 100644 index 00000000000000..92e8c543ed4f64 --- /dev/null +++ b/llvm/test/Instrumentation/SanitizerBinaryMetadata/pretend-atomic-access.ll @@ -0,0 +1,84 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes='module(sanmd-module)' -sanitizer-metadata-atomics -S | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@__profc_test_gep = private global [1 x i64] zeroinitializer, section "__llvm_prf_cnts", align 8 +@__profc_test_bitcast = private global [2 x i64] zeroinitializer, section "__llvm_prf_cnts", align 8 +@__profc_test_bitcast_foo = private global [1 x i64] zeroinitializer, section "__llvm_prf_cnts", align 8 + +@__llvm_gcov_ctr = internal global [1 x i64] zeroinitializer +@__llvm_gcov_ctr.1 = internal global [1 x i64] zeroinitializer +@__llvm_gcov_global_state_pred = internal global i32 0 +@__llvm_gcda_foo = internal global i32 0 + +define i32 @test_gep() sanitize_thread { +; CHECK-LABEL: @test_gep( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PGOCOUNT:%.*]] = load i64, ptr @__profc_test_gep, align 8, !pcsections !2 +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[PGOCOUNT]], 1 +; CHECK-NEXT: store i64 [[TMP0]], ptr @__profc_test_gep, align 8, !pcsections !2 +; CHECK-NEXT: [[GCOVCOUNT:%.*]] = load i64, ptr @__llvm_gcov_ctr, align 8, !pcsections !2 +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[GCOVCOUNT]], 1 +; CHECK-NEXT: store i64 [[TMP1]], ptr @__llvm_gcov_ctr, align 8, !pcsections !2 +; CHECK-NEXT: [[GCOVCOUNT_1:%.*]] = load i64, ptr @__llvm_gcov_ctr.1, align 8, !pcsections !2 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[GCOVCOUNT_1]], 1 +; CHECK-NEXT: store i64 [[TMP2]], ptr @__llvm_gcov_ctr.1, align 8, !pcsections !2 +; CHECK-NEXT: ret i32 1 +; +entry: + %pgocount = load i64, ptr @__profc_test_gep + %0 = add i64 %pgocount, 1 + store i64 %0, ptr @__profc_test_gep + + %gcovcount = load i64, ptr @__llvm_gcov_ctr + %1 = add i64 %gcovcount, 1 + store i64 %1, ptr @__llvm_gcov_ctr + + %gcovcount.1 = load i64, ptr @__llvm_gcov_ctr.1 + %2 = add i64 %gcovcount.1, 1 + store i64 %2, ptr @__llvm_gcov_ctr.1 + + ret i32 1 +} + +define i32 @test_bitcast() sanitize_thread { +; CHECK-LABEL: @test_bitcast( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__profc_test_bitcast, align 8, !pcsections !2 +; CHECK-NEXT: [[DOTPROMOTED5:%.*]] = load i64, ptr @__profc_test_bitcast_foo, align 8, !pcsections !2 +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[DOTPROMOTED5]], 10 +; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i64> [[TMP0]], +; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr @__profc_test_bitcast, align 8, !pcsections !2 +; CHECK-NEXT: store i64 [[TMP1]], ptr @__profc_test_bitcast_foo, align 8, !pcsections !2 +; CHECK-NEXT: ret i32 undef +; +entry: + %0 = load <2 x i64>, ptr @__profc_test_bitcast, align 8 + %.promoted5 = load i64, ptr @__profc_test_bitcast_foo, align 8 + %1 = add i64 %.promoted5, 10 + %2 = add <2 x i64> %0, + store <2 x i64> %2, ptr @__profc_test_bitcast, align 8 + store i64 %1, ptr @__profc_test_bitcast_foo, align 8 + ret i32 undef +} + +define void @test_load() sanitize_thread { +; CHECK-LABEL: @test_load( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__llvm_gcov_global_state_pred, align 4, !pcsections !2 +; CHECK-NEXT: store i32 1, ptr @__llvm_gcov_global_state_pred, align 4, !pcsections !2 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__llvm_gcda_foo, align 4, !pcsections !2 +; CHECK-NEXT: store i32 1, ptr @__llvm_gcda_foo, align 4, !pcsections !2 +; CHECK-NEXT: ret void +; +entry: + %0 = load i32, ptr @__llvm_gcov_global_state_pred + store i32 1, ptr @__llvm_gcov_global_state_pred + + %1 = load i32, ptr @__llvm_gcda_foo + store i32 1, ptr @__llvm_gcda_foo + + ret void +}