Skip to content

Commit

Permalink
Adapt to github.com/rapidsai/rmm/pull/1221 which moves allocator call…
Browse files Browse the repository at this point in the history
…backs

The allocator callbacks now live in their own submodules (so that RMM
does not, for example, import pytorch unless required) and so must be
explicitly imported.
  • Loading branch information
wence- committed Feb 24, 2023
1 parent 75b6d80 commit 90b2bf6
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 3 deletions.
3 changes: 2 additions & 1 deletion python/examples/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,13 +117,14 @@ def main():
import cupy as xp

import rmm
from rmm.allocators.cupy import rmm_cupy_allocator

rmm.reinitialize(
pool_allocator=True,
managed_memory=False,
)
xp.cuda.runtime.setDevice(0)
xp.cuda.set_allocator(rmm.rmm_cupy_allocator)
xp.cuda.set_allocator(rmm_cupy_allocator)
else:
import numpy as xp

Expand Down
4 changes: 3 additions & 1 deletion python/ucxx/benchmarks/cudf_merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,10 +242,12 @@ def _get_worker_command(

async def worker(rank, eps, args):
# Setting current device and make RMM use it
from rmm.allocators.cupy import rmm_cupy_allocator

rmm.reinitialize(pool_allocator=True, initial_pool_size=args.rmm_init_pool_size)

# Make cupy use RMM
cupy.cuda.set_allocator(rmm.rmm_cupy_allocator)
cupy.cuda.set_allocator(rmm_cupy_allocator)

df1 = generate_chunk(rank, args.chunk_size, args.n_chunks, "build", args.frac_match)
df2 = generate_chunk(rank, args.chunk_size, args.n_chunks, "other", args.frac_match)
Expand Down
3 changes: 2 additions & 1 deletion python/ucxx/benchmarks/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,13 +58,14 @@ def get_allocator(
import cupy as xp

import rmm
from rmm.allocators.cupy import rmm_cupy_allocator

rmm.reinitialize(
pool_allocator=True,
managed_memory=rmm_managed_memory,
initial_pool_size=rmm_init_pool_size,
)
xp.cuda.set_allocator(rmm.rmm_cupy_allocator)
xp.cuda.set_allocator(rmm_cupy_allocator)

return xp

Expand Down

0 comments on commit 90b2bf6

Please sign in to comment.