Skip to content

Commit

Permalink
[scudo] Clean up tests.
Browse files Browse the repository at this point in the history
Modify the tests so that all clang warnings can be turned up to high.

Fix all places flagged by -Wconversion.

Fix a few unused variables not marked with UNUSED.

For the memtag testing, only compile some tests for 64 bit since
compiling them on 32 bit leads to warnings/errors. All of the tests
are already skipped on 32 bit OSes, so this will not affect any
real tests.

Reviewed By: Chia-hungDuan

Differential Revision: https://reviews.llvm.org/D155749
  • Loading branch information
cferris1000 committed Jul 20, 2023
1 parent 2f34288 commit af41f79
Show file tree
Hide file tree
Showing 8 changed files with 53 additions and 29 deletions.
32 changes: 20 additions & 12 deletions compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
Expand Up @@ -154,9 +154,10 @@ void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
const scudo::uptr Align = 1U << AlignLog;
for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
if (static_cast<scudo::sptr>(1U << SizeLog) + Delta < 0)
if ((1LL << SizeLog) + Delta < 0)
continue;
const scudo::uptr Size = (1U << SizeLog) + Delta;
const scudo::uptr Size =
static_cast<scudo::uptr>((1LL << SizeLog) + Delta);
void *P = Allocator->allocate(Size, Origin, Align);
EXPECT_NE(P, nullptr);
EXPECT_TRUE(Allocator->isOwned(P));
Expand Down Expand Up @@ -333,7 +334,8 @@ SCUDO_TYPED_TEST(ScudoCombinedDeathTest, ReallocateSame) {
const char Marker = 0xab;
memset(P, Marker, ReallocSize);
for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
const scudo::uptr NewSize = ReallocSize + Delta;
const scudo::uptr NewSize =
static_cast<scudo::uptr>(static_cast<scudo::sptr>(ReallocSize) + Delta);
void *NewP = Allocator->reallocate(P, NewSize);
EXPECT_EQ(NewP, P);
for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
Expand All @@ -355,11 +357,13 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
std::vector<void *> V;
for (scudo::uptr I = 0; I < 64U; I++)
V.push_back(Allocator->allocate(
rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
static_cast<scudo::uptr>(std::rand()) %
(TypeParam::Primary::SizeClassMap::MaxSize / 2U),
Origin));
Allocator->disable();
Allocator->iterateOverChunks(
0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
[](uintptr_t Base, size_t Size, void *Arg) {
[](uintptr_t Base, UNUSED size_t Size, void *Arg) {
std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
void *P = reinterpret_cast<void *>(Base);
EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
Expand Down Expand Up @@ -444,7 +448,9 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
std::vector<void *> V;
for (scudo::uptr I = 0; I < 64U; I++)
V.push_back(Allocator->allocate(
rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
static_cast<scudo::uptr>(std::rand()) %
(TypeParam::Primary::SizeClassMap::MaxSize / 2U),
Origin));
for (auto P : V)
Allocator->deallocate(P, Origin);

Expand All @@ -463,7 +469,9 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ForceCacheDrain) NO_THREAD_SAFETY_ANALYSIS {
std::vector<void *> V;
for (scudo::uptr I = 0; I < 64U; I++)
V.push_back(Allocator->allocate(
rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
static_cast<scudo::uptr>(std::rand()) %
(TypeParam::Primary::SizeClassMap::MaxSize / 2U),
Origin));
for (auto P : V)
Allocator->deallocate(P, Origin);

Expand Down Expand Up @@ -494,7 +502,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
}
std::vector<std::pair<void *, scudo::uptr>> V;
for (scudo::uptr I = 0; I < 256U; I++) {
const scudo::uptr Size = std::rand() % 4096U;
const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) % 4096U;
void *P = Allocator->allocate(Size, Origin);
// A region could have ran out of memory, resulting in a null P.
if (P)
Expand Down Expand Up @@ -727,17 +735,17 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {

// Regression test: make realloc-in-place happen at the very right end of a
// mapped region.
constexpr int nPtrs = 10000;
for (int i = 1; i < 32; ++i) {
constexpr size_t nPtrs = 10000;
for (scudo::uptr i = 1; i < 32; ++i) {
scudo::uptr Size = 16 * i - 1;
std::vector<void *> Ptrs;
for (int i = 0; i < nPtrs; ++i) {
for (size_t i = 0; i < nPtrs; ++i) {
void *P = Allocator->allocate(Size, Origin);
P = Allocator->reallocate(P, Size + 1);
Ptrs.push_back(P);
}

for (int i = 0; i < nPtrs; ++i)
for (size_t i = 0; i < nPtrs; ++i)
Allocator->deallocate(Ptrs[i], Origin);
}
}
Expand Down
12 changes: 12 additions & 0 deletions compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp
Expand Up @@ -76,12 +76,16 @@ TEST_F(MemtagTest, ArchMemoryTagGranuleSize) {
}

TEST_F(MemtagTest, ExtractTag) {
// The test is already skipped on anything other than 64 bit. But
// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
#if defined(__LP64__)
uptr Tags = 0;
// Try all value for the top byte and check the tags values are in the
// expected range.
for (u64 Top = 0; Top < 0x100; ++Top)
Tags = Tags | (1u << extractTag(Addr | (Top << 56)));
EXPECT_EQ(0xffffull, Tags);
#endif
}

TEST_F(MemtagDeathTest, AddFixedTag) {
Expand Down Expand Up @@ -121,10 +125,14 @@ TEST_F(MemtagTest, SelectRandomTag) {
}

TEST_F(MemtagTest, SelectRandomTagWithMask) {
// The test is already skipped on anything other than 64 bit. But
// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
#if defined(__LP64__)
for (uptr j = 0; j < 32; ++j) {
for (uptr i = 0; i < 1000; ++i)
EXPECT_NE(j, extractTag(selectRandomTag(Addr, 1ull << j)));
}
#endif
}

TEST_F(MemtagDeathTest, SKIP_NO_DEBUG(LoadStoreTagUnaligned)) {
Expand Down Expand Up @@ -158,6 +166,9 @@ TEST_F(MemtagDeathTest, SKIP_NO_DEBUG(StoreTagsUnaligned)) {
}

TEST_F(MemtagTest, StoreTags) {
// The test is already skipped on anything other than 64 bit. But
// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
#if defined(__LP64__)
const uptr MaxTaggedSize = 4 * archMemoryTagGranuleSize();
for (uptr Size = 0; Size <= MaxTaggedSize; ++Size) {
uptr NoTagBegin = Addr + archMemoryTagGranuleSize();
Expand Down Expand Up @@ -186,6 +197,7 @@ TEST_F(MemtagTest, StoreTags) {
// Reset tags without using StoreTags.
MemMap.releasePagesToOS(Addr, BufferSize);
}
#endif
}

} // namespace scudo
Expand Down
7 changes: 4 additions & 3 deletions compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
Expand Up @@ -253,7 +253,8 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryIterate) {
Cache.init(nullptr, Allocator.get());
std::vector<std::pair<scudo::uptr, void *>> V;
for (scudo::uptr I = 0; I < 64U; I++) {
const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize;
const scudo::uptr Size =
static_cast<scudo::uptr>(std::rand()) % Primary::SizeClassMap::MaxSize;
const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
void *P = Cache.allocate(ClassId);
V.push_back(std::make_pair(ClassId, P));
Expand Down Expand Up @@ -300,8 +301,8 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
Cv.wait(Lock);
}
for (scudo::uptr I = 0; I < 256U; I++) {
const scudo::uptr Size =
std::rand() % Primary::SizeClassMap::MaxSize / 4;
const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) %
Primary::SizeClassMap::MaxSize / 4;
const scudo::uptr ClassId =
Primary::SizeClassMap::getClassIdBySize(Size);
void *P = Cache.allocate(ClassId);
Expand Down
5 changes: 3 additions & 2 deletions compiler-rt/lib/scudo/standalone/tests/release_test.cpp
Expand Up @@ -134,8 +134,9 @@ TEST(ScudoReleaseTest, FreePagesRangeTracker) {
// Strip trailing '.'-pages before comparing the results as they are not
// going to be reported to range_recorder anyway.
const char *LastX = strrchr(TestCase, 'x');
std::string Expected(TestCase,
LastX == nullptr ? 0 : (LastX - TestCase + 1));
std::string Expected(
TestCase,
LastX == nullptr ? 0U : static_cast<size_t>(LastX - TestCase + 1));
EXPECT_STREQ(Expected.c_str(), Recorder.ReportedPages.c_str());
}
}
Expand Down
14 changes: 8 additions & 6 deletions compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
Expand Up @@ -136,10 +136,10 @@ TEST_F(MapAllocatorTest, SecondaryCombinations) {
AlignLog++) {
const scudo::uptr Align = 1U << AlignLog;
for (scudo::sptr Delta = -128; Delta <= 128; Delta += 8) {
if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
if ((1LL << SizeLog) + Delta <= 0)
continue;
const scudo::uptr UserSize =
scudo::roundUp((1U << SizeLog) + Delta, MinAlign);
const scudo::uptr UserSize = scudo::roundUp(
static_cast<scudo::uptr>((1LL << SizeLog) + Delta), MinAlign);
const scudo::uptr Size =
HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0);
void *P = Allocator->allocate(Options, Size, Align);
Expand All @@ -160,7 +160,8 @@ TEST_F(MapAllocatorTest, SecondaryIterate) {
std::vector<void *> V;
const scudo::uptr PageSize = scudo::getPageSizeCached();
for (scudo::uptr I = 0; I < 32U; I++)
V.push_back(Allocator->allocate(Options, (std::rand() % 16) * PageSize));
V.push_back(Allocator->allocate(
Options, (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize));
auto Lambda = [&V](scudo::uptr Block) {
EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast<void *>(Block)),
V.end());
Expand Down Expand Up @@ -215,8 +216,9 @@ struct MapAllocatorWithReleaseTest : public MapAllocatorTest {
}
for (scudo::uptr I = 0; I < 128U; I++) {
// Deallocate 75% of the blocks.
const bool Deallocate = (rand() & 3) != 0;
void *P = Allocator->allocate(Options, (std::rand() % 16) * PageSize);
const bool Deallocate = (std::rand() & 3) != 0;
void *P = Allocator->allocate(
Options, (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize);
if (Deallocate)
Allocator->deallocate(Options, P);
else
Expand Down
2 changes: 1 addition & 1 deletion compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp
Expand Up @@ -38,7 +38,7 @@ template <class Config> class MockAllocator {

void unmapTestOnly() { TSDRegistry.unmapTestOnly(this); }
void initCache(CacheT *Cache) { *Cache = {}; }
void commitBack(scudo::TSD<MockAllocator> *TSD) {}
void commitBack(UNUSED scudo::TSD<MockAllocator> *TSD) {}
TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
void callPostInitCallback() {}

Expand Down
8 changes: 4 additions & 4 deletions compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp
Expand Up @@ -267,7 +267,7 @@ TEST(ScudoWrappersCTest, MallOpt) {

TEST(ScudoWrappersCTest, OtherAlloc) {
#if HAVE_PVALLOC
const size_t PageSize = sysconf(_SC_PAGESIZE);
const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));

void *P = pvalloc(Size);
EXPECT_NE(P, nullptr);
Expand Down Expand Up @@ -329,7 +329,7 @@ TEST(ScudoWrappersCTest, MallInfo2) {
static uintptr_t BoundaryP;
static size_t Count;

static void callback(uintptr_t Base, size_t Size, void *Arg) {
static void callback(uintptr_t Base, UNUSED size_t Size, UNUSED void *Arg) {
if (scudo::archSupportsMemoryTagging()) {
Base = scudo::untagPointer(Base);
BoundaryP = scudo::untagPointer(BoundaryP);
Expand All @@ -343,7 +343,7 @@ static void callback(uintptr_t Base, size_t Size, void *Arg) {
// aligned on a page, then run the malloc_iterate on both the pages that the
// block is a boundary for. It must only be seen once by the callback function.
TEST(ScudoWrappersCTest, MallocIterateBoundary) {
const size_t PageSize = sysconf(_SC_PAGESIZE);
const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
#if SCUDO_ANDROID
// Android uses a 16 byte alignment for both 32 bit and 64 bit.
const size_t BlockDelta = 16U;
Expand Down Expand Up @@ -461,7 +461,7 @@ static pthread_mutex_t Mutex;
static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER;
static bool Ready;

static void *enableMalloc(void *Unused) {
static void *enableMalloc(UNUSED void *Unused) {
// Initialize the allocator for this thread.
void *P = malloc(Size);
EXPECT_NE(P, nullptr);
Expand Down
Expand Up @@ -103,7 +103,7 @@ static void stressNew() {
Cv.wait(Lock);
}
for (size_t I = 0; I < 256U; I++) {
const size_t N = std::rand() % 128U;
const size_t N = static_cast<size_t>(std::rand()) % 128U;
uintptr_t *P = new uintptr_t[N];
if (P) {
memset(P, 0x42, sizeof(uintptr_t) * N);
Expand Down

0 comments on commit af41f79

Please sign in to comment.