diff --git a/cpp/src/arrow/c/dlpack.cc b/cpp/src/arrow/c/dlpack.cc index 40a9a1a86cf30..cb9fcc55176c0 100644 --- a/cpp/src/arrow/c/dlpack.cc +++ b/cpp/src/arrow/c/dlpack.cc @@ -19,6 +19,7 @@ #include "arrow/array/array_base.h" #include "arrow/c/dlpack_abi.h" +#include "arrow/device.h" #include "arrow/type.h" namespace arrow { @@ -104,8 +105,13 @@ Status ExportArray(const std::shared_ptr& arr, DLManagedTensor** out) { // Define DLDevice struct DLDevice ctx; - ctx.device_id = 0; - ctx.device_type = DLDeviceType::kDLCPU; + if (array_ref->buffers[1]->device_type() == DeviceAllocationType::kCPU) { + ctx.device_id = 0; + ctx.device_type = DLDeviceType::kDLCPU; + } else { + return Status::NotImplemented( + "DLPack support is implemented only for buffers on CPU device."); + } dlm_tensor->dl_tensor.device = ctx; dlm_tensor->dl_tensor.ndim = 1; diff --git a/python/pyarrow/tests/test_array.py b/python/pyarrow/tests/test_array.py index 24d3d970901d6..8483abf410c7b 100644 --- a/python/pyarrow/tests/test_array.py +++ b/python/pyarrow/tests/test_array.py @@ -3636,3 +3636,19 @@ def test_dlpack_not_supported(): "not supported by DLPack."): arr = pa.array([True, False, True]) np.from_dlpack(arr) + +def test_dlpack_cuda_not_supported(): + cuda = pytest.importorskip("pyarrow.cuda") + + schema = pa.schema([pa.field('f0', pa.int16())]) + a0 = pa.array([1, 2, 3], type = pa.int16()) + batch = pa.record_batch([a0], schema=schema) + + cbuf = cuda.serialize_record_batch(batch, cuda.Context(0)) + cbatch = cuda.read_record_batch(cbuf, batch.schema) + carr = cbatch["a0"] + + # CudaBuffers not yet supported + with pytest.raises(NotImplementedError, match="DLPack support is implemented " + "only for buffers on CPU device."): + np.from_dlpack(carr)