Skip to content

Commit

Permalink
Upstream CI (#35)
Browse files Browse the repository at this point in the history
  • Loading branch information
crusaderky committed Mar 10, 2024
1 parent 5fe7496 commit cc95d48
Show file tree
Hide file tree
Showing 12 changed files with 98 additions and 46 deletions.
26 changes: 25 additions & 1 deletion .github/workflows/pytest.yml
Original file line number Diff line number Diff line change
@@ -1,11 +1,17 @@
name: Test latest
name: Test

on:
push:
branches: [main]
pull_request:
branches: ['*']

# When this workflow is queued, automatically cancel any previous running
# or pending jobs from the same branch
concurrency:
group: tests-${{ github.ref }}
cancel-in-progress: true

defaults:
run:
shell: bash -l {0}
Expand Down Expand Up @@ -44,6 +50,10 @@ jobs:
- os: windows
python-version: '3.8'
requirements: minimal
# Test on nightly builds of requirements
- os: ubuntu
python-version: '3.12'
requirements: upstream

steps:
- name: Checkout
Expand All @@ -69,6 +79,20 @@ jobs:
if: ${{ matrix.os == 'macosx' }}
run: mamba install clang_osx-64

- name: Install nightly builds
if: ${{ matrix.requirements == 'upstream' }}
run: |
# Pick up https://github.com/mamba-org/mamba/pull/2903
mamba install -n base 'mamba>=1.5.2'
mamba uninstall --force numpy pandas scipy pyarrow
python -m pip install --no-deps --pre --prefer-binary \
--extra-index-url https://pypi.fury.io/arrow-nightlies/ \
pyarrow
python -m pip install --no-deps --pre \
-i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \
numpy pandas scipy

- name: Show conda options
run: conda config --show

Expand Down
2 changes: 1 addition & 1 deletion ci/requirements-latest.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name: xarray-extras
channels:
- defaults
- conda-forge
dependencies:
- dask
- numba
Expand Down
2 changes: 1 addition & 1 deletion ci/requirements-minimal.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@ dependencies:
- pytest
- pytest-cov
- scipy=1.9
- xarray=2022.6.0
- xarray=2022.11.0
19 changes: 19 additions & 0 deletions ci/requirements-upstream.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
name: xarray-extras
channels:
- conda-forge
dependencies:
- dask
# - numba # Not compatible with numpy 2
- numpy
- pandas
- pyarrow
- pytest
- pytest-cov
- scipy
- xarray
- pip
- pip:
- git+https://github.com/dask/dask
- git+https://github.com/dask/distributed
- git+https://github.com/pydata/xarray
# numpy, pandas, pyarrow, and scipy are upgraded to nightly builds by pytest.yml
8 changes: 4 additions & 4 deletions doc/whats-new.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,17 +9,17 @@ v0.6.0 (Unreleased)
-------------------
- Bumped minimum version of all dependencies:

========== ====== ========
========== ====== =========
Dependency v0.5.0 v0.6.0
========== ====== ========
========== ====== =========
python 3.7 3.8
dask 2021.4.0 2022.6.0
numba 0.52 0.56
numpy 1.18 1.23
pandas 1.1 1.5
scipy 1.5 1.9
xarray 0.16 2022.6.0
========== ====== ========
xarray 0.16 2022.11.0
========== ====== =========

- Added support for Python 3.10, 3.11, and 3.12
- Added support for recent versions of Pandas (tested up to 2.2) and xarray
Expand Down
10 changes: 7 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@ classifiers = [
]
requires-python = ">=3.8"
dependencies = [
"dask >= 2022.6",
"dask >= 2022.6.0",
"numba >= 0.56",
"numpy >= 1.23",
"pandas >= 1.5",
"scipy >= 1.9",
"xarray >= 2022.6.0",
"xarray >= 2022.11.0",
]
dynamic = ["version"]

Expand Down Expand Up @@ -62,7 +62,11 @@ python_files = ["test_*.py"]
testpaths = ["xarray_extras/tests"]
filterwarnings = [
"error",
"ignore:datetime.datetime.utcfromtimestamp():DeprecationWarning",
# FIXME these need to be fixed in xarray
"ignore:__array_wrap__ must accept context and return:DeprecationWarning",
# FIXME these need to be looked at
'ignore:.*will no longer be implicitly promoted:FutureWarning',
'ignore:.*updating coordinate .* with a PandasMultiIndex would leave the multi-index level coordinates .* in an inconsistent state:FutureWarning',
]

[tool.coverage.report]
Expand Down
8 changes: 8 additions & 0 deletions xarray_extras/compat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
from __future__ import annotations

try:
from xarray.namedarray.pycompat import array_type
except ImportError: # <2024.2.0
from xarray.core.pycompat import array_type # type: ignore[no-redef]

dask_array_type = array_type("dask")
2 changes: 1 addition & 1 deletion xarray_extras/csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def to_csv(
return None

# Merge chunks on all dimensions beyond the first
x = x.chunk((x.chunks[0], *((s,) for s in x.shape[1:]))) # type: ignore[arg-type]
x = x.chunk({dim: -1 for dim in x.dims[1:]})

# Manually define the dask graph
tok = tokenize(x.data, index, columns, compression, path, kwargs)
Expand Down
9 changes: 1 addition & 8 deletions xarray_extras/interpolate.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,9 @@
import numpy as np
import xarray

from xarray_extras.compat import dask_array_type
from xarray_extras.kernels import interpolate as kernels

try:
from xarray.core.pycompat import array_type

dask_array_type = array_type("dask")
except ImportError: # xarray <2022.11.0
from xarray.core.pycompat import dask_array_type # type: ignore


__all__ = ("splrep", "splev")


Expand Down
54 changes: 27 additions & 27 deletions xarray_extras/tests/test_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def assert_to_csv_with_path_type(

@pytest.mark.parametrize("dtype", [np.int64, np.float64])
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"x": 1}])
@pytest.mark.parametrize("header", [False, True])
@pytest.mark.parametrize("lineterminator", ["\n", "\r\n"])
def test_series(chunks, nogil, dtype, header, lineterminator):
Expand All @@ -74,7 +74,7 @@ def test_series(chunks, nogil, dtype, header, lineterminator):
@pytest.mark.parametrize("path_type", ["path", "str"])
@pytest.mark.parametrize("dtype", [np.int64, np.float64])
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"x": 1}])
@pytest.mark.parametrize("header", [False, True])
@pytest.mark.parametrize("lineterminator", ["\n", "\r\n"])
def test_series_with_path(path_type, chunks, nogil, dtype, header, lineterminator):
Expand All @@ -86,7 +86,7 @@ def test_series_with_path(path_type, chunks, nogil, dtype, header, lineterminato

@pytest.mark.parametrize("dtype", [np.int64, np.float64])
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"r": 1, "c": 1}])
@pytest.mark.parametrize("lineterminator", ["\n", "\r\n"])
def test_dataframe(chunks, nogil, dtype, lineterminator):
x = xarray.DataArray(
Expand All @@ -99,7 +99,7 @@ def test_dataframe(chunks, nogil, dtype, lineterminator):

@pytest.mark.parametrize("dtype", [np.int64, np.float64])
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"r": 1, "c": 1}])
def test_multiindex(chunks, nogil, dtype):
x = xarray.DataArray(
[[1, 2], [3, 4]],
Expand All @@ -117,24 +117,24 @@ def test_multiindex(chunks, nogil, dtype):

@pytest.mark.parametrize("dtype", [np.int64, np.float64])
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"r": 1, "c": 1}])
def test_no_header(chunks, nogil, dtype):
x = xarray.DataArray([[1, 2], [3, 4]])
x = xarray.DataArray([[1, 2], [3, 4]], dims=["r", "c"])
assert_to_csv(x, chunks, nogil, dtype, index=False, header=False)


@pytest.mark.parametrize("dtype", [np.int64, np.float64])
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"r": 1, "c": 1}])
def test_custom_header(chunks, nogil, dtype):
x = xarray.DataArray([[1, 2], [3, 4]])
x = xarray.DataArray([[1, 2], [3, 4]], dims=["r", "c"])
assert_to_csv(x, chunks, nogil, dtype, header=["foo", "bar"])


@pytest.mark.parametrize("encoding", ["utf-8", "utf-16"])
@pytest.mark.parametrize("dtype", [np.int64, np.float64])
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"r": 1, "c": 1}])
@pytest.mark.parametrize("lineterminator", ["\n", "\r\n"])
def test_encoding(chunks, nogil, dtype, encoding, lineterminator):
# Note: in Python 2.7, default encoding is ascii in pandas and utf-8 in
Expand All @@ -151,17 +151,17 @@ def test_encoding(chunks, nogil, dtype, encoding, lineterminator):
@pytest.mark.parametrize("float_format", ["%f", "%.2f", "%.15f", "%.5e"])
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"x": 1}])
def test_kwargs(chunks, nogil, dtype, float_format, sep):
x = xarray.DataArray([1.0, 1.1, 1.000000000000001, 123.456789])
x = xarray.DataArray([1.0, 1.1, 1.000000000000001, 123.456789], dims=["x"])
assert_to_csv(x, chunks, nogil, dtype, float_format=float_format, sep=sep)


@pytest.mark.parametrize("na_rep", ["", "nan"])
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"x": 1}])
def test_na_rep(chunks, nogil, na_rep):
x = xarray.DataArray([np.nan, 1])
x = xarray.DataArray([np.nan, 1], dims=["x"])
assert_to_csv(x, chunks, nogil, np.float64, na_rep=na_rep)


Expand All @@ -176,9 +176,9 @@ def test_na_rep(chunks, nogil, na_rep):
)
@pytest.mark.parametrize("dtype", [np.int64, np.float64])
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"x": 1}])
def test_compression(chunks, nogil, dtype, compression, open_func):
x = xarray.DataArray([1, 2])
x = xarray.DataArray([1, 2], dims=["x"])
assert_to_csv(x, chunks, nogil, dtype, compression=compression, open_func=open_func)


Expand All @@ -192,17 +192,17 @@ def test_compression(chunks, nogil, dtype, compression, open_func):
],
)
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"x": 1}])
def test_compression_infer(ext, open_func, nogil, chunks):
x = xarray.DataArray([1, 2])
x = xarray.DataArray([1, 2], dims=["x"])
assert_to_csv(
x, chunks=chunks, nogil=nogil, dtype=np.float64, ext=ext, open_func=open_func
)


@pytest.mark.parametrize("dtype", [np.int64, np.float64])
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"r": 1, "c": 1}])
def test_empty(chunks, nogil, dtype):
x = xarray.DataArray(
[[1, 2, 3, 4]], dims=["r", "c"], coords={"c": [10, 20, 30, 40]}
Expand All @@ -214,7 +214,7 @@ def test_empty(chunks, nogil, dtype):
@pytest.mark.parametrize("x", [0, -(2**63)])
@pytest.mark.parametrize("index", ["a", "a" * 1000])
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"x": 1}])
def test_buffer_overflow_int(chunks, nogil, index, x):
a = xarray.DataArray([x], dims=["x"], coords={"x": [index]})
assert_to_csv(a, chunks, nogil, np.int64)
Expand All @@ -225,7 +225,7 @@ def test_buffer_overflow_int(chunks, nogil, index, x):
@pytest.mark.parametrize("na_rep", ["", "na" * 500])
@pytest.mark.parametrize("float_format", ["%.16f", "%.1000f", "a" * 1000 + "%.0f"])
@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"x": 1}])
def test_buffer_overflow_float(chunks, nogil, float_format, na_rep, index, coord, x):
if nogil and not index and np.isnan(x) and na_rep == "":
# Expected: b'""\n'
Expand All @@ -246,10 +246,10 @@ def test_buffer_overflow_float(chunks, nogil, float_format, na_rep, index, coord

@pytest.mark.parametrize("encoding", ["utf-8", "utf-16"])
@pytest.mark.parametrize("dtype", [str, object])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"x": 1}])
@pytest.mark.parametrize("lineterminator", ["\n", "\r\n"])
def test_pandas_only(chunks, dtype, encoding, lineterminator):
x = xarray.DataArray(["foo", "Crème brûlée"])
x = xarray.DataArray(["foo", "Crème brûlée"], dims=["x"])
assert_to_csv(
x,
chunks=chunks,
Expand All @@ -261,17 +261,17 @@ def test_pandas_only(chunks, dtype, encoding, lineterminator):


@pytest.mark.parametrize("dtype", [np.complex64, np.complex128])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"x": 1}])
def test_pandas_only_complex(chunks, dtype):
x = xarray.DataArray([1 + 2j])
x = xarray.DataArray([1 + 2j], dims=["x"])
assert_to_csv(x, chunks=chunks, nogil=False, dtype=dtype)


@pytest.mark.parametrize("nogil", [False, True])
@pytest.mark.parametrize("chunks", [None, 1])
@pytest.mark.parametrize("chunks", [None, {"x": 1}])
def test_mode(chunks, nogil):
x = xarray.DataArray([1, 2])
y = xarray.DataArray([3, 4])
x = xarray.DataArray([1, 2], dims=["x"])
y = xarray.DataArray([3, 4], dims=["x"])
if chunks:
x = x.chunk(chunks)
y = y.chunk(chunks)
Expand Down
2 changes: 2 additions & 0 deletions xarray_extras/tests/test_cumulatives.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
import xarray
from xarray.testing import assert_equal

pytest.importorskip("numba") # Not available in upstream CI

import xarray_extras.cumulatives as cum

# Skip 0 and 1 as they're neutral in addition and multiplication
Expand Down
2 changes: 2 additions & 0 deletions xarray_extras/tests/test_numba_extras.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import numpy
import pytest

pytest.importorskip("numba") # Not available in upstream CI

from xarray_extras.numba_extras import guvectorize

DTYPES = [
Expand Down

0 comments on commit cc95d48

Please sign in to comment.