Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 66 additions & 24 deletions cuda_core/cuda/core/experimental/_memoryview.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from ._dlpack cimport *

import functools
import warnings
from typing import Optional

import numpy
Expand Down Expand Up @@ -78,30 +79,72 @@ cdef class StridedMemoryView:
bint readonly
object exporting_obj

# If using dlpack, this is a strong reference to the result of
# obj.__dlpack__() so we can lazily create shape and strides from
# it later. If using CAI, this is a reference to the source
# `__cuda_array_interface__` object.
cdef object metadata

# The tensor object if has obj has __dlpack__, otherwise must be NULL
cdef DLTensor *dl_tensor

# Memoized properties
cdef tuple _shape
cdef tuple _strides
cdef bint _strides_init # Has the strides tuple been init'ed?
cdef object _dtype

def __init__(self, obj=None, stream_ptr=None):
cdef:
# If using dlpack, this is a strong reference to the result of
# obj.__dlpack__() so we can lazily create shape and strides from
# it later. If using CAI, this is a reference to the source
# `__cuda_array_interface__` object.
object metadata

# The tensor object if has obj has __dlpack__, otherwise must be NULL
DLTensor *dl_tensor

# Memoized properties
tuple _shape
tuple _strides
# a `None` value for _strides has defined meaning in dlpack and
# the cuda array interface, meaning C order, contiguous.
#
# this flag helps prevent unnecessary recompuation of _strides
bint _strides_init
object _dtype

def __init__(self, obj: object = None, stream_ptr: int | None = None) -> None:
cdef str clsname = self.__class__.__name__
if obj is not None:
# populate self's attributes
if check_has_dlpack(obj):
warnings.warn(
f"Constructing a {clsname} directly from a DLPack-supporting object is deprecated; "
"Use `StridedMemoryView.from_dlpack` or `StridedMemoryView.from_any_interface` instead."
)
Comment on lines +107 to +110
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we make this a FutureWarning (https://docs.python.org/3/library/exceptions.html#FutureWarning) or whatever warning type is most appropriate for this deprecation?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the one we want is DeprecationWarning. Those are usually off by default, but tools like pytest will collect and display them at the end, so they get shown to developers using the library during their development rather than to "end users". FutureWarning is always on, so is meant for non-developers to see. (Think of like a data processing script that will require the user to change the format of their data in the future...) Fuzzy boundaries between those, of course.

view_as_dlpack(obj, stream_ptr, self)
else:
warnings.warn(
f"Constructing a {clsname} directly from a CUDA-array-interface-supporting object is deprecated; "
"Use `StridedMemoryView.from_cuda_array_interface` or `StridedMemoryView.from_any_interface` instead."
)
view_as_cai(obj, stream_ptr, self)
else:
pass
warnings.warn(
f"Constructing an empty {clsname} is deprecated; "
"use one of the classmethods `from_dlpack`, `from_cuda_array_interface` or `from_any_interface` "
"to construct a StridedMemoryView from an object"
)

@classmethod
def from_dlpack(cls, obj: object, stream_ptr: int | None=None) -> StridedMemoryView:
cdef StridedMemoryView buf
with warnings.catch_warnings():
warnings.simplefilter("ignore")
buf = cls()
view_as_dlpack(obj, stream_ptr, buf)
return buf

@classmethod
def from_cuda_array_interface(cls, obj: object, stream_ptr: int | None=None) -> StridedMemoryView:
cdef StridedMemoryView buf
with warnings.catch_warnings():
warnings.simplefilter("ignore")
buf = cls()
view_as_cai(obj, stream_ptr, buf)
return buf

@classmethod
def from_any_interface(cls, obj: object, stream_ptr: int | None = None) -> StridedMemoryView:
if check_has_dlpack(obj):
return cls.from_dlpack(obj, stream_ptr)
return cls.from_cuda_array_interface(obj, stream_ptr)

def __dealloc__(self):
if self.dl_tensor == NULL:
Expand All @@ -121,7 +164,7 @@ cdef class StridedMemoryView:
dlm_tensor.deleter(dlm_tensor)

@property
def shape(self) -> tuple[int]:
def shape(self) -> tuple[int, ...]:
if self._shape is None:
if self.exporting_obj is not None:
if self.dl_tensor != NULL:
Expand All @@ -136,7 +179,7 @@ cdef class StridedMemoryView:
return self._shape

@property
def strides(self) -> Optional[tuple[int]]:
def strides(self) -> Optional[tuple[int, ...]]:
cdef int itemsize
if self._strides_init is False:
if self.exporting_obj is not None:
Expand Down Expand Up @@ -193,6 +236,7 @@ cdef str get_simple_repr(obj):
return obj_repr



cdef bint check_has_dlpack(obj) except*:
cdef bint has_dlpack
if hasattr(obj, "__dlpack__") and hasattr(obj, "__dlpack_device__"):
Expand All @@ -206,8 +250,7 @@ cdef bint check_has_dlpack(obj) except*:


cdef class _StridedMemoryViewProxy:

cdef:
cdef readonly:
object obj
bint has_dlpack

Expand All @@ -217,9 +260,9 @@ cdef class _StridedMemoryViewProxy:

cpdef StridedMemoryView view(self, stream_ptr=None):
if self.has_dlpack:
return view_as_dlpack(self.obj, stream_ptr)
return StridedMemoryView.from_dlpack(self.obj, stream_ptr)
else:
return view_as_cai(self.obj, stream_ptr)
return StridedMemoryView.from_cuda_array_interface(self.obj, stream_ptr)


cdef StridedMemoryView view_as_dlpack(obj, stream_ptr, view=None):
Expand Down Expand Up @@ -354,7 +397,6 @@ cdef object dtype_dlpack_to_numpy(DLDataType* dtype):
return numpy.dtype(np_dtype)


# Also generate for Python so we can test this code path
cpdef StridedMemoryView view_as_cai(obj, stream_ptr, view=None):
cdef dict cai_data = obj.__cuda_array_interface__
if cai_data["version"] < 3:
Expand Down
4 changes: 2 additions & 2 deletions cuda_core/tests/test_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -628,15 +628,15 @@ def test_strided_memory_view_leak():
arr = np.zeros(1048576, dtype=np.uint8)
before = sys.getrefcount(arr)
for idx in range(10):
StridedMemoryView(arr, stream_ptr=-1)
StridedMemoryView.from_any_interface(arr, stream_ptr=-1)
after = sys.getrefcount(arr)
assert before == after


def test_strided_memory_view_refcnt():
# Use Fortran ordering so strides is used
a = np.zeros((64, 4), dtype=np.uint8, order="F")
av = StridedMemoryView(a, stream_ptr=-1)
av = StridedMemoryView.from_any_interface(a, stream_ptr=-1)
# segfaults if refcnt is wrong
assert av.shape[0] == 64
assert sys.getrefcount(av.shape) >= 2
Expand Down
24 changes: 20 additions & 4 deletions cuda_core/tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
import numpy as np
import pytest
from cuda.core.experimental import Device
from cuda.core.experimental._memoryview import view_as_cai
from cuda.core.experimental.utils import StridedMemoryView, args_viewable_as_strided_memory


Expand Down Expand Up @@ -78,7 +77,13 @@ def my_func(arr):

def test_strided_memory_view_cpu(self, in_arr):
# stream_ptr=-1 means "the consumer does not care"
view = StridedMemoryView(in_arr, stream_ptr=-1)
view = StridedMemoryView.from_any_interface(in_arr, stream_ptr=-1)
self._check_view(view, in_arr)

def test_strided_memory_view_cpu_init(self, in_arr):
# stream_ptr=-1 means "the consumer does not care"
with pytest.warns(UserWarning, match="deprecated"):
view = StridedMemoryView(in_arr, stream_ptr=-1)
self._check_view(view, in_arr)

def _check_view(self, view, in_arr):
Expand Down Expand Up @@ -147,7 +152,18 @@ def test_strided_memory_view_cpu(self, in_arr, use_stream):
# This is the consumer stream
s = dev.create_stream() if use_stream else None

view = StridedMemoryView(in_arr, stream_ptr=s.handle if s else -1)
view = StridedMemoryView.from_any_interface(in_arr, stream_ptr=s.handle if s else -1)
self._check_view(view, in_arr, dev)

def test_strided_memory_view_init(self, in_arr, use_stream):
# TODO: use the device fixture?
dev = Device()
dev.set_current()
# This is the consumer stream
s = dev.create_stream() if use_stream else None

with pytest.warns(UserWarning, match="deprecated"):
view = StridedMemoryView(in_arr, stream_ptr=s.handle if s else -1)
self._check_view(view, in_arr, dev)

def _check_view(self, view, in_arr, dev):
Expand Down Expand Up @@ -179,7 +195,7 @@ def test_cuda_array_interface_gpu(self, in_arr, use_stream):
# The usual path in `StridedMemoryView` prefers the DLPack interface
# over __cuda_array_interface__, so we call `view_as_cai` directly
# here so we can test the CAI code path.
view = view_as_cai(in_arr, stream_ptr=s.handle if s else -1)
view = StridedMemoryView.from_cuda_array_interface(in_arr, stream_ptr=s.handle if s else -1)
self._check_view(view, in_arr, dev)

def _check_view(self, view, in_arr, dev):
Expand Down
Loading