Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Provide explicit pool size and avoid RMM detail APIs #14741

Merged
Merged
6 changes: 4 additions & 2 deletions cpp/benchmarks/fixture/benchmark_fixture.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -17,6 +17,7 @@
#pragma once

#include <benchmark/benchmark.h>
#include <rmm/cuda_device.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/owning_wrapper.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
Expand All @@ -33,7 +34,8 @@ inline auto make_pool_instance()
{
static rmm::mr::cuda_memory_resource cuda_mr;
static auto pool_mr =
std::make_shared<rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource>>(&cuda_mr);
std::make_shared<rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource>>(
&cuda_mr, rmm::percent_of_free_device_memory(50));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm thinking about whether we should parse percent_of_free_device_memory from some env variable, instead of hard coding like this in many places. By doing so we can control the number we want at runtime.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This PR is just fixing cuDF so that it has the same behavior, and doesn't break when I merge upcoming deprecation PRs. I will file a cuDF issue for this request, but I think the cuDF team should handle it.

return pool_mr;
}
} // namespace
Expand Down
9 changes: 6 additions & 3 deletions cpp/benchmarks/fixture/nvbench_fixture.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
* Copyright (c) 2021-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -17,6 +17,7 @@

#include <cudf/utilities/error.hpp>

#include <rmm/cuda_device.hpp>
#include <rmm/mr/device/arena_memory_resource.hpp>
#include <rmm/mr/device/cuda_async_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
Expand All @@ -42,7 +43,8 @@ struct nvbench_base_fixture {

inline auto make_pool()
{
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(make_cuda());
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(
make_cuda(), rmm::percent_of_free_device_memory(50));
}

inline auto make_async() { return std::make_shared<rmm::mr::cuda_async_memory_resource>(); }
Expand All @@ -56,7 +58,8 @@ struct nvbench_base_fixture {

inline auto make_managed_pool()
{
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(make_managed());
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(
make_managed(), rmm::percent_of_free_device_memory(50));
}

inline std::shared_ptr<rmm::mr::device_memory_resource> create_memory_resource(
Expand Down
5 changes: 3 additions & 2 deletions cpp/examples/basic/src/process_csv.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
* Copyright (c) 2021-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -19,6 +19,7 @@
#include <cudf/io/csv.hpp>
#include <cudf/table/table.hpp>

#include <rmm/cuda_device.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
Expand Down Expand Up @@ -82,7 +83,7 @@ int main(int argc, char** argv)
// Construct a memory pool using the CUDA memory resource
// Using a memory pool for device memory allocations is important for good performance in libcudf.
// The pool defaults to allocating half of the available GPU memory.
rmm::mr::pool_memory_resource mr{&cuda_mr};
rmm::mr::pool_memory_resource mr{&cuda_mr, rmm::percent_of_free_device_memory(50)};

// Set the pool resource to be used by default for all device memory allocations
// Note: It is the user's responsibility to ensure the `mr` object stays alive for the duration of
Expand Down
8 changes: 6 additions & 2 deletions cpp/examples/nested_types/deduplication.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
* Copyright (c) 2023-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -25,6 +25,7 @@
#include <cudf/stream_compaction.hpp>
#include <cudf/table/table_view.hpp>

#include <rmm/cuda_device.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/owning_wrapper.hpp>
Expand Down Expand Up @@ -57,7 +58,10 @@
std::shared_ptr<rmm::mr::device_memory_resource> create_memory_resource(bool pool)
{
auto cuda_mr = std::make_shared<rmm::mr::cuda_memory_resource>();
if (pool) { return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(cuda_mr); }
if (pool) {
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(
cuda_mr, rmm::percent_of_free_device_memory(50));
}
return cuda_mr;
}

Expand Down
6 changes: 4 additions & 2 deletions cpp/examples/strings/common.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
* Copyright (c) 2022-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -22,6 +22,7 @@
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>

#include <rmm/cuda_device.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/owning_wrapper.hpp>
Expand Down Expand Up @@ -60,7 +61,8 @@ auto make_cuda_mr() { return std::make_shared<rmm::mr::cuda_memory_resource>();
*/
auto make_pool_mr()
{
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(make_cuda_mr());
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(
make_cuda_mr(), rmm::percent_of_free_device_memory(50));
}

/**
Expand Down
6 changes: 3 additions & 3 deletions cpp/include/cudf_test/testing_main.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

#include <cudf/utilities/error.hpp>

#include <rmm/aligned.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/arena_memory_resource.hpp>
#include <rmm/mr/device/binning_memory_resource.hpp>
Expand All @@ -43,9 +44,8 @@ inline auto make_managed() { return std::make_shared<rmm::mr::managed_memory_res

inline auto make_pool()
{
auto const [free, total] = rmm::detail::available_device_memory();
auto min_alloc =
rmm::detail::align_down(std::min(free, total / 10), rmm::detail::CUDA_ALLOCATION_ALIGNMENT);
auto const [free, total] = rmm::available_device_memory();
auto min_alloc = rmm::align_down(std::min(free, total / 10), rmm::CUDA_ALLOCATION_ALIGNMENT);
harrism marked this conversation as resolved.
Show resolved Hide resolved
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(make_cuda(), min_alloc);
}

Expand Down
Loading