Skip to content

Commit

Permalink
Disable CPU EP's allocator's arena when address sanitizer is enabled (m…
Browse files Browse the repository at this point in the history
…icrosoft#19485)

### Description
Disable CPU EP's allocator's arena when address sanitizer is enabled,
because it masks problems. For example, the code in
onnxruntime/test/quantization/quantization_test.cc has a memory leak
problem: it allocated a buffer but didn't free it, but most memory leak
check tool cannot detect that because the buffer was from an arena and
the arena was finally freed.

### Motivation and Context
Provider better memory leak check coverage.
  • Loading branch information
snnn committed Feb 12, 2024
1 parent c831031 commit 9cb97ee
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 13 deletions.
3 changes: 2 additions & 1 deletion onnxruntime/core/providers/cpu/cpu_execution_provider.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
// Licensed under the MIT License.

#include "core/providers/cpu/cpu_execution_provider.h"
#include <absl/base/config.h>
#include "core/framework/op_kernel.h"
#include "core/framework/kernel_registry.h"
#include "core/mlas/inc/mlas.h"
Expand Down Expand Up @@ -29,7 +30,7 @@ CPUExecutionProvider::CPUExecutionProvider(const CPUExecutionProviderInfo& info)

std::vector<AllocatorPtr> CPUExecutionProvider::CreatePreferredAllocators() {
bool create_arena = info_.create_arena;
#if defined(USE_JEMALLOC) || defined(USE_MIMALLOC)
#if defined(USE_JEMALLOC) || defined(USE_MIMALLOC) || defined(ABSL_HAVE_ADDRESS_SANITIZER)
// JEMalloc/mimalloc already have memory pool, so just use device allocator.
create_arena = false;
#elif !(defined(__amd64__) || defined(_M_AMD64) || defined(__aarch64__) || defined(_M_ARM64))
Expand Down
3 changes: 2 additions & 1 deletion onnxruntime/test/framework/allocator_test.cc
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <absl/base/config.h>

#include "core/framework/allocator.h"

Expand All @@ -15,7 +16,7 @@ TEST(AllocatorTest, CPUAllocatorTest) {
EXPECT_EQ(cpu_arena->Info().id, 0);

// arena is disabled for CPUExecutionProvider on x86 and JEMalloc
#if (defined(__amd64__) || defined(_M_AMD64) || defined(__aarch64__) || defined(_M_ARM64)) && !defined(USE_JEMALLOC) && !defined(USE_MIMALLOC)
#if (defined(__amd64__) || defined(_M_AMD64) || defined(__aarch64__) || defined(_M_ARM64)) && !defined(USE_JEMALLOC) && !defined(USE_MIMALLOC) && !defined(ABSL_HAVE_ADDRESS_SANITIZER)
EXPECT_EQ(cpu_arena->Info().alloc_type, OrtAllocatorType::OrtArenaAllocator);
#else
EXPECT_EQ(cpu_arena->Info().alloc_type, OrtAllocatorType::OrtDeviceAllocator);
Expand Down
3 changes: 2 additions & 1 deletion onnxruntime/test/framework/session_state_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
// Licensed under the MIT License.

#include <iostream>
#include <absl/base/config.h>

#include "asserts.h"
#include "core/framework/execution_providers.h"
Expand Down Expand Up @@ -215,7 +216,7 @@ TEST_P(SessionStateTestP, TestInitializerProcessing) {
// if the relevant session option config flag is set
// For this test we need to enable the arena-based allocator which is not supported on x86 builds, so
// enable this test only on x64 builds
#if (defined(__amd64__) || defined(_M_AMD64) || defined(__aarch64__) || defined(_M_ARM64)) && !defined(USE_MIMALLOC)
#if (defined(__amd64__) || defined(_M_AMD64) || defined(__aarch64__) || defined(_M_ARM64)) && !defined(USE_MIMALLOC) && !defined(ABSL_HAVE_ADDRESS_SANITIZER)
TEST(SessionStateTest, TestInitializerMemoryAllocatedUsingNonArenaMemory) {
AllocatorPtr cpu_allocator = std::make_shared<CPUAllocator>();
// Part 1: Feature turned ON (i.e.) allocate from non-arena memory
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/test/framework/tensor_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

#include "gmock/gmock.h"
#include "gtest/gtest.h"

#include <absl/base/config.h>
#include <sstream>

namespace onnxruntime {
Expand Down Expand Up @@ -138,7 +138,7 @@ TEST(TensorTest, EmptyTensorTest) {
EXPECT_EQ(location.id, 0);

// arena is disabled for CPUExecutionProvider on x86 and JEMalloc
#if (defined(__amd64__) || defined(_M_AMD64) || defined(__aarch64__) || defined(_M_ARM64)) && !defined(USE_JEMALLOC) && !defined(USE_MIMALLOC)
#if (defined(__amd64__) || defined(_M_AMD64) || defined(__aarch64__) || defined(_M_ARM64)) && !defined(USE_JEMALLOC) && !defined(USE_MIMALLOC) && !defined(ABSL_HAVE_ADDRESS_SANITIZER)
EXPECT_EQ(location.alloc_type, OrtAllocatorType::OrtArenaAllocator);
#else
EXPECT_EQ(location.alloc_type, OrtAllocatorType::OrtDeviceAllocator);
Expand Down
14 changes: 6 additions & 8 deletions onnxruntime/test/quantization/quantization_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -99,24 +99,22 @@ void EnsureQuantizedTensorParam(const float scale, const T zero_point) {

// First, create the scale tensor:
auto alloc = TestCPUExecutionProvider()->CreatePreferredAllocators()[0];
auto num_bytes = shape.Size() * sizeof(float);
void* data = alloc->Alloc(num_bytes);
float* float_data = static_cast<float*>(data);
IAllocatorUniquePtr<float> buffer = IAllocator::MakeUniquePtr<float>(alloc, shape.Size());
float* float_data = buffer.get();
float_data[0] = scale;
Tensor scale_tensor(DataTypeImpl::GetType<float>(),
shape,
data,
float_data,
alloc->Info(),
/*offset=*/0);

// Next, create the zero_point tensor:
auto T_num_bytes = shape.Size() * sizeof(T);
void* T_data = alloc->Alloc(T_num_bytes);
T* typed_data = static_cast<T*>(T_data);
IAllocatorUniquePtr<T> buffer2 = IAllocator::MakeUniquePtr<T>(alloc, shape.Size());
T* typed_data = buffer2.get();
typed_data[0] = zero_point;
Tensor zero_point_tensor(DataTypeImpl::GetType<T>(),
shape,
T_data,
typed_data,
alloc->Info(),
/*offset=*/0);

Expand Down

0 comments on commit 9cb97ee

Please sign in to comment.