Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 9 additions & 11 deletions python/pyspark/pandas/tests/computation/test_apply_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,20 +262,18 @@ def identify3(x) -> ps.DataFrame[float, [int, List[int]]]:
actual.columns = ["a", "b"]
self.assert_eq(normalize_array_values(actual._to_pandas()), normalize_array_values(pdf))

# For NumPy typing, NumPy version should be 1.21+
if LooseVersion(np.__version__) >= LooseVersion("1.21"):
import numpy.typing as ntp
import numpy.typing as ntp

psdf = ps.from_pandas(pdf)
psdf = ps.from_pandas(pdf)

def identify4(
x,
) -> ps.DataFrame[float, [int, ntp.NDArray[int]]]:
return x
def identify4(
x,
) -> ps.DataFrame[float, [int, ntp.NDArray[int]]]:
return x

actual = psdf.pandas_on_spark.apply_batch(identify4)
actual.columns = ["a", "b"]
self.assert_eq(normalize_array_values(actual._to_pandas()), normalize_array_values(pdf))
actual = psdf.pandas_on_spark.apply_batch(identify4)
actual.columns = ["a", "b"]
self.assert_eq(normalize_array_values(actual._to_pandas()), normalize_array_values(pdf))

arrays = [[1, 2, 3, 4, 5, 6, 7, 8, 9], ["a", "b", "c", "d", "e", "f", "g", "h", "i"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
Expand Down
20 changes: 9 additions & 11 deletions python/pyspark/pandas/tests/test_typedef.py
Original file line number Diff line number Diff line change
Expand Up @@ -407,17 +407,15 @@ def test_as_spark_type_pandas_on_spark_dtype(self):
(np.dtype("object"), ArrayType(spark_type)),
)

# For NumPy typing, NumPy version should be 1.21+
if LooseVersion(np.__version__) >= LooseVersion("1.21"):
import numpy.typing as ntp

self.assertEqual(
as_spark_type(ntp.NDArray[numpy_or_python_type]), ArrayType(spark_type)
)
self.assertEqual(
pandas_on_spark_type(ntp.NDArray[numpy_or_python_type]),
(np.dtype("object"), ArrayType(spark_type)),
)
import numpy.typing as ntp

self.assertEqual(
as_spark_type(ntp.NDArray[numpy_or_python_type]), ArrayType(spark_type)
)
self.assertEqual(
pandas_on_spark_type(ntp.NDArray[numpy_or_python_type]),
(np.dtype("object"), ArrayType(spark_type)),
)

with self.assertRaisesRegex(TypeError, "Type uint64 was not understood."):
as_spark_type(np.dtype("uint64"))
Expand Down
20 changes: 8 additions & 12 deletions python/pyspark/pandas/typedef/typehints.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,18 +150,14 @@ def as_spark_type(
- dictionaries of field_name -> type
- Python3's typing system
"""
# For NumPy typing, NumPy version should be 1.21+
if LooseVersion(np.__version__) >= LooseVersion("1.21"):
if (
hasattr(tpe, "__origin__")
and tpe.__origin__ is np.ndarray
and hasattr(tpe, "__args__")
and len(tpe.__args__) > 1
):
# numpy.typing.NDArray
return types.ArrayType(
as_spark_type(tpe.__args__[1].__args__[0], raise_error=raise_error)
)
if (
hasattr(tpe, "__origin__")
and tpe.__origin__ is np.ndarray
and hasattr(tpe, "__args__")
and len(tpe.__args__) > 1
):
# numpy.typing.NDArray
return types.ArrayType(as_spark_type(tpe.__args__[1].__args__[0], raise_error=raise_error))

if isinstance(tpe, np.dtype) and tpe == np.dtype("object"):
pass
Expand Down