Skip to content

Commit

Permalink
Add a CPU device check and pyarrow test for cuda (trial)
Browse files Browse the repository at this point in the history
  • Loading branch information
AlenkaF committed Nov 28, 2023
1 parent f82068e commit 6c886fd
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 2 deletions.
10 changes: 8 additions & 2 deletions cpp/src/arrow/c/dlpack.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

#include "arrow/array/array_base.h"
#include "arrow/c/dlpack_abi.h"
#include "arrow/device.h"
#include "arrow/type.h"

namespace arrow {
Expand Down Expand Up @@ -104,8 +105,13 @@ Status ExportArray(const std::shared_ptr<Array>& arr, DLManagedTensor** out) {

// Define DLDevice struct
DLDevice ctx;
ctx.device_id = 0;
ctx.device_type = DLDeviceType::kDLCPU;
if (array_ref->buffers[1]->device_type() == DeviceAllocationType::kCPU) {
ctx.device_id = 0;
ctx.device_type = DLDeviceType::kDLCPU;
} else {
return Status::NotImplemented(
"DLPack support is implemented only for buffers on CPU device.");
}
dlm_tensor->dl_tensor.device = ctx;

dlm_tensor->dl_tensor.ndim = 1;
Expand Down
16 changes: 16 additions & 0 deletions python/pyarrow/tests/test_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -3636,3 +3636,19 @@ def test_dlpack_not_supported():
"not supported by DLPack."):
arr = pa.array([True, False, True])
np.from_dlpack(arr)

def test_dlpack_cuda_not_supported():
cuda = pytest.importorskip("pyarrow.cuda")

schema = pa.schema([pa.field('f0', pa.int16())])
a0 = pa.array([1, 2, 3], type = pa.int16())
batch = pa.record_batch([a0], schema=schema)

cbuf = cuda.serialize_record_batch(batch, cuda.Context(0))
cbatch = cuda.read_record_batch(cbuf, batch.schema)
carr = cbatch["a0"]

# CudaBuffers not yet supported
with pytest.raises(NotImplementedError, match="DLPack support is implemented "
"only for buffers on CPU device."):
np.from_dlpack(carr)

0 comments on commit 6c886fd

Please sign in to comment.