Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 31 additions & 0 deletions src/squidpy/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,37 @@ def verbosity(level: int) -> Generator[None, None, None]:
sc.settings.verbosity = verbosity


def deprecated_params(
params: dict[str, str],
) -> Callable[..., Any]:
"""Decorator that warns when deprecated keyword arguments are passed.

Parameters
----------
params
Mapping of deprecated parameter names to the version in which
they will be removed, e.g. ``{"n_jobs": "1.10.0"}``.
"""

def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
for k in list(kwargs):
if k in params:
warnings.warn(
f"Parameter `{k}` of `{func.__name__}()` is deprecated "
f"and has no effect. It will be removed in squidpy v{params[k]}.",
FutureWarning,
stacklevel=2,
)
kwargs.pop(k)
return func(*args, **kwargs)

return wrapper

return decorator


string_types = (bytes, str)


Expand Down
15 changes: 3 additions & 12 deletions src/squidpy/gr/_ppatterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from squidpy._constants._constants import SpatialAutocorr
from squidpy._constants._pkg_constants import Key
from squidpy._docs import d, inject_docs
from squidpy._utils import NDArrayA, Signal, SigQueue, _get_n_cores, parallelize
from squidpy._utils import NDArrayA, Signal, SigQueue, _get_n_cores, deprecated_params, parallelize
from squidpy.gr._utils import (
_assert_categorical_obs,
_assert_connectivity_key,
Expand Down Expand Up @@ -342,16 +342,13 @@ def _co_occurrence_helper(v_x: NDArrayA, v_y: NDArrayA, v_radium: NDArrayA, labs


@d.dedent
@deprecated_params({"n_splits": "1.10.0", "n_jobs": "1.10.0", "backend": "1.10.0", "show_progress_bar": "1.10.0"})
def co_occurrence(
adata: AnnData | SpatialData,
cluster_key: str,
spatial_key: str = Key.obsm.spatial,
interval: int | NDArrayA = 50,
copy: bool = False,
n_splits: int | None = None,
n_jobs: int | None = None,
backend: str = "loky",
show_progress_bar: bool = True,
) -> tuple[NDArrayA, NDArrayA] | None:
"""
Compute co-occurrence probability of clusters.
Expand All @@ -365,10 +362,6 @@ def co_occurrence(
Distances interval at which co-occurrence is computed. If :class:`int`, uniformly spaced interval
of the given size will be used.
%(copy)s
n_splits
Number of splits in which to divide the spatial coordinates in
:attr:`anndata.AnnData.obsm` ``['{spatial_key}']``.
%(parallelize)s

Returns
-------
Expand Down Expand Up @@ -406,9 +399,7 @@ def co_occurrence(

# Compute co-occurrence probabilities using the fast numba routine.
out = _co_occurrence_helper(spatial_x, spatial_y, interval, labs)
start = logg.info(
f"Calculating co-occurrence probabilities for `{len(interval)}` intervals using `{n_jobs}` core(s) and `{n_splits}` splits"
)
start = logg.info(f"Calculating co-occurrence probabilities for `{len(interval)}` intervals")

if copy:
logg.info("Finish", time=start)
Expand Down
8 changes: 3 additions & 5 deletions tests/graph/test_ppatterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,12 +137,10 @@ def test_co_occurrence(adata: AnnData):
assert arr.shape[1] == arr.shape[0] == adata.obs["leiden"].unique().shape[0]


# @pytest.mark.parametrize(("ys", "xs"), [(10, 10), (None, None), (10, 20)])
@pytest.mark.parametrize(("n_jobs", "n_splits"), [(1, 2), (2, 2)])
def test_co_occurrence_reproducibility(adata: AnnData, n_jobs: int, n_splits: int):
def test_co_occurrence_reproducibility(adata: AnnData):
"""Check co_occurrence reproducibility results."""
arr_1, interval_1 = co_occurrence(adata, cluster_key="leiden", copy=True, n_jobs=n_jobs, n_splits=n_splits)
arr_2, interval_2 = co_occurrence(adata, cluster_key="leiden", copy=True, n_jobs=n_jobs, n_splits=n_splits)
arr_1, interval_1 = co_occurrence(adata, cluster_key="leiden", copy=True)
arr_2, interval_2 = co_occurrence(adata, cluster_key="leiden", copy=True)

np.testing.assert_array_equal(sorted(interval_1), sorted(interval_2))
np.testing.assert_allclose(arr_1, arr_2)
Expand Down
Loading