Skip to content

Commit

Permalink
Merge 4eb409a into 27da6a3
Browse files Browse the repository at this point in the history
  • Loading branch information
philippjfr committed Feb 14, 2022
2 parents 27da6a3 + 4eb409a commit b35476b
Show file tree
Hide file tree
Showing 42 changed files with 287 additions and 222 deletions.
6 changes: 6 additions & 0 deletions .github/workflows/build.yml
Expand Up @@ -89,9 +89,15 @@ jobs:
eval "$(conda shell.bash hook)"
conda activate test-environment
doit develop_install $CHANS_DEV -o unit_tests
# See https://github.com/holoviz/holoviews/issues/5167
conda install $CHANS_DEV "nbconvert<6"
pip uninstall -y holoviews
doit pip_on_conda
- name: doit env_capture
run: |
eval "$(conda shell.bash hook)"
conda activate test-environment
doit env_capture
- name: pip build
run: |
eval "$(conda shell.bash hook)"
Expand Down
2 changes: 1 addition & 1 deletion holoviews/core/data/__init__.py
Expand Up @@ -800,7 +800,7 @@ def sample(self, samples=[], bounds=None, closest=True, **kwargs):
# may be replaced with more general handling
# see https://github.com/ioam/holoviews/issues/1173
from ...element import Table, Curve
datatype = ['dataframe', 'dictionary', 'dask', 'ibis']
datatype = ['dataframe', 'dictionary', 'dask', 'ibis', 'cuDF']
if len(samples) == 1:
sel = {kd.name: s for kd, s in zip(self.kdims, samples[0])}
dims = [kd for kd, v in sel.items() if not np.isscalar(v)]
Expand Down
10 changes: 8 additions & 2 deletions holoviews/core/data/cudf.py
Expand Up @@ -247,7 +247,7 @@ def select(cls, dataset, selection_mask=None, **selection):

indexed = cls.indexed(dataset, selection)
if selection_mask is not None:
df = df.loc[selection_mask]
df = df.iloc[selection_mask]
if indexed and len(df) == 1 and len(dataset.vdims) == 1:
return df[dataset.vdims[0].name].iloc[0]
return df
Expand Down Expand Up @@ -284,7 +284,13 @@ def aggregate(cls, dataset, dimensions, function, **kwargs):
if not hasattr(reindexed, agg):
raise ValueError('%s aggregation is not supported on cudf DataFrame.' % agg)
agg = getattr(reindexed, agg)()
data = dict(((col, [v]) for col, v in zip(agg.index.values_host, agg.to_array())))
try:
data = dict(((col, [v]) for col, v in zip(agg.index.values_host, agg.to_numpy())))
except Exception:
# Give FutureWarning: 'The to_array method will be removed in a future cuDF release.
# Consider using `to_numpy` instead.'
# Seen in cudf=21.12.01
data = dict(((col, [v]) for col, v in zip(agg.index.values_host, agg.to_array())))
df = util.pd.DataFrame(data, columns=list(agg.index.values_host))

dropped = []
Expand Down
7 changes: 5 additions & 2 deletions holoviews/core/data/ibis.py
Expand Up @@ -34,8 +34,11 @@ def has_rowid(cls):

@classmethod
def is_rowid_zero_indexed(cls, data):
from ibis.client import find_backends, validate_backends
(backend,) = validate_backends(list(find_backends(data)))
try:
from ibis.client import find_backends, validate_backends
(backend,) = validate_backends(list(find_backends(data)))
except Exception:
backend = data._find_backend()
return type(backend).__module__ in cls.zero_indexed_backend_modules

@classmethod
Expand Down
6 changes: 3 additions & 3 deletions holoviews/core/data/pandas.py
Expand Up @@ -164,7 +164,7 @@ def range(cls, dataset, dimension):
column = dataset.data[dimension.name]
if column.dtype.kind == 'O':
if (not isinstance(dataset.data, pd.DataFrame) or
util.LooseVersion(pd.__version__) < '0.17.0'):
util.LooseVersion(pd.__version__) < util.LooseVersion('0.17.0')):
column = column.sort(inplace=False)
else:
column = column.sort_values()
Expand All @@ -187,7 +187,7 @@ def range(cls, dataset, dimension):

@classmethod
def concat_fn(cls, dataframes, **kwargs):
if util.pandas_version >= '0.23.0':
if util.pandas_version >= util.LooseVersion('0.23.0'):
kwargs['sort'] = False
return pd.concat(dataframes, **kwargs)

Expand Down Expand Up @@ -292,7 +292,7 @@ def sort(cls, dataset, by=[], reverse=False):
cols = [dataset.get_dimension(d, strict=True).name for d in by]

if (not isinstance(dataset.data, pd.DataFrame) or
util.LooseVersion(pd.__version__) < '0.17.0'):
util.LooseVersion(pd.__version__) < util.LooseVersion('0.17.0')):
return dataset.data.sort(columns=cols, ascending=not reverse)
return dataset.data.sort_values(by=cols, ascending=not reverse)

Expand Down
14 changes: 11 additions & 3 deletions holoviews/core/data/xarray.py
Expand Up @@ -217,9 +217,17 @@ def retrieve_unit_and_label(dim):
# not need to be canonicalized
if any(len(da.coords[c].shape) > 1 for c in da.coords):
continue
undeclared = [
c for c in da.coords if c not in kdims and len(da[c].shape) == 1 and
da[c].shape[0] > 1]
undeclared = []
for c in da.coords:
if c in kdims or len(da[c].shape) != 1 or da[c].shape[0] <= 1:
# Skip if coord is declared, represents irregular coordinates or is constant
continue
elif all(d in kdims for d in da[c].dims):
continue # Skip if coord is alias for another dimension
elif any(all(d in da[kd.name].dims for d in da[c].dims) for kd in kdims):
# Skip if all the dims on the coord are present on another coord
continue
undeclared.append(c)
if undeclared:
raise DataError(
'The coordinates on the %r DataArray do not match the '
Expand Down
81 changes: 50 additions & 31 deletions holoviews/core/util.py
Expand Up @@ -11,7 +11,7 @@

from collections import defaultdict, OrderedDict
from contextlib import contextmanager
from distutils.version import LooseVersion as _LooseVersion
from packaging.version import Version as LooseVersion
from functools import partial
from threading import Thread, Event
from types import FunctionType
Expand All @@ -37,7 +37,6 @@
RecursionError = RecursionError if sys.version_info.minor > 4 else RuntimeError # noqa
_getargspec = inspect.getfullargspec
get_keywords = operator.attrgetter('varkw')
LooseVersion = _LooseVersion
else:
import __builtin__ as builtins # noqa (compatibility)
from collections import Iterable # noqa (compatibility)
Expand All @@ -50,23 +49,6 @@
_getargspec = inspect.getargspec
get_keywords = operator.attrgetter('keywords')

class LooseVersion(_LooseVersion):
"""
Subclassed to avoid unicode issues in python2
"""

def __init__ (self, vstring=None):
if isinstance(vstring, unicode):
vstring = str(vstring)
self.parse(vstring)

def __cmp__(self, other):
if isinstance(other, unicode):
other = str(other)
if isinstance(other, basestring):
other = LooseVersion(other)
return cmp(self.version, other.version)

numpy_version = LooseVersion(np.__version__)
param_version = LooseVersion(param.__version__)

Expand All @@ -83,13 +65,13 @@ def __cmp__(self, other):
if pd:
pandas_version = LooseVersion(pd.__version__)
try:
if pandas_version >= '1.3.0':
if pandas_version >= LooseVersion('1.3.0'):
from pandas.core.dtypes.dtypes import DatetimeTZDtype as DatetimeTZDtypeType
from pandas.core.dtypes.generic import ABCSeries, ABCIndex as ABCIndexClass
elif pandas_version >= '0.24.0':
elif pandas_version >= LooseVersion('0.24.0'):
from pandas.core.dtypes.dtypes import DatetimeTZDtype as DatetimeTZDtypeType
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
elif pandas_version > '0.20.0':
elif pandas_version > LooseVersion('0.20.0'):
from pandas.core.dtypes.dtypes import DatetimeTZDtypeType
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
else:
Expand All @@ -100,10 +82,10 @@ def __cmp__(self, other):
datetime_types = datetime_types + pandas_datetime_types
timedelta_types = timedelta_types + pandas_timedelta_types
arraylike_types = arraylike_types + (ABCSeries, ABCIndexClass)
if pandas_version > '0.23.0':
if pandas_version > LooseVersion('0.23.0'):
from pandas.core.dtypes.generic import ABCExtensionArray
arraylike_types = arraylike_types + (ABCExtensionArray,)
if pandas_version > '1.0':
if pandas_version > LooseVersion('1.0'):
from pandas.core.arrays.masked import BaseMaskedArray
masked_types = (BaseMaskedArray,)
except Exception as e:
Expand Down Expand Up @@ -864,7 +846,7 @@ def isnat(val):
"""
if (isinstance(val, (np.datetime64, np.timedelta64)) or
(isinstance(val, np.ndarray) and val.dtype.kind == 'M')):
if numpy_version >= '1.13':
if numpy_version >= LooseVersion('1.13'):
return np.isnat(val)
else:
return val.view('i8') == nat_as_integer
Expand Down Expand Up @@ -902,15 +884,15 @@ def isfinite(val):
elif val.dtype.kind in 'US':
return ~pd.isna(val) if pd else np.ones_like(val, dtype=bool)
finite = np.isfinite(val)
if pd and pandas_version >= '1.0.0':
if pd and pandas_version >= LooseVersion('1.0.0'):
finite &= ~pd.isna(val)
return finite
elif isinstance(val, datetime_types+timedelta_types):
return not isnat(val)
elif isinstance(val, (basestring, bytes)):
return True
finite = np.isfinite(val)
if pd and pandas_version >= '1.0.0':
if pd and pandas_version >= LooseVersion('1.0.0'):
if finite is pd.NA:
return False
return finite & (~pd.isna(val))
Expand Down Expand Up @@ -1569,17 +1551,27 @@ def is_param_method(obj, has_deps=False):
def resolve_dependent_value(value):
"""Resolves parameter dependencies on the supplied value
Resolves parameter values, Parameterized instance methods and
parameterized functions with dependencies on the supplied value.
Resolves parameter values, Parameterized instance methods,
parameterized functions with dependencies on the supplied value,
including such parameters embedded in a list, tuple, or dictionary.
Args:
value: A value which will be resolved
Returns:
A new dictionary where any parameter dependencies have been
A new value where any parameter dependencies have been
resolved.
"""
range_widget = False
if isinstance(value, list):
value = [resolve_dependent_value(v) for v in value]
elif isinstance(value, tuple):
value = tuple(resolve_dependent_value(v) for v in value)
elif isinstance(value, dict):
value = {
resolve_dependent_value(k): resolve_dependent_value(v) for k, v in value.items()
}

if 'panel' in sys.modules:
from panel.widgets import RangeSlider, Widget
range_widget = isinstance(value, RangeSlider)
Expand Down Expand Up @@ -1614,7 +1606,7 @@ def resolve_dependent_kwargs(kwargs):
kwargs (dict): A dictionary of keyword arguments
Returns:
A new dictionary with where any parameter dependencies have been
A new dictionary where any parameter dependencies have been
resolved.
"""
return {k: resolve_dependent_value(v) for k, v in kwargs.items()}
Expand Down Expand Up @@ -2294,3 +2286,30 @@ def cast_array_to_int64(array):
category=FutureWarning,
)
return array.astype('int64')


def flatten(line):
"""
Flatten an arbitrarily nested sequence.
Inspired by: pd.core.common.flatten
Parameters
----------
line : sequence
The sequence to flatten
Notes
-----
This only flattens list, tuple, and dict sequences.
Returns
-------
flattened : generator
"""

for element in line:
if any(isinstance(element, tp) for tp in (list, tuple, dict)):
yield from flatten(element)
else:
yield element
22 changes: 11 additions & 11 deletions holoviews/operation/datashader.py
Expand Up @@ -2,7 +2,7 @@

import warnings

from collections import Callable
from collections.abc import Callable
from functools import partial

import param
Expand Down Expand Up @@ -489,13 +489,13 @@ def _process(self, element, key=None):

if agg.ndim == 2:
# Replacing x and y coordinates to avoid numerical precision issues
eldata = agg if ds_version > '0.5.0' else (xs, ys, agg.data)
eldata = agg if ds_version > LooseVersion('0.5.0') else (xs, ys, agg.data)
return self.p.element_type(eldata, **params)
else:
layers = {}
for c in agg.coords[agg_fn.column].data:
cagg = agg.sel(**{agg_fn.column: c})
eldata = cagg if ds_version > '0.5.0' else (xs, ys, cagg.data)
eldata = cagg if ds_version > LooseVersion('0.5.0') else (xs, ys, cagg.data)
layers[c] = self.p.element_type(eldata, **params)
return NdOverlay(layers, kdims=[data.get_dimension(agg_fn.column)])

Expand Down Expand Up @@ -791,13 +791,13 @@ def _process(self, element, key=None):

if agg.ndim == 2:
# Replacing x and y coordinates to avoid numerical precision issues
eldata = agg if ds_version > '0.5.0' else (xs, ys, agg.data)
eldata = agg if ds_version > LooseVersion('0.5.0') else (xs, ys, agg.data)
return self.p.element_type(eldata, **params)
else:
layers = {}
for c in agg.coords[agg_fn.column].data:
cagg = agg.sel(**{agg_fn.column: c})
eldata = cagg if ds_version > '0.5.0' else (xs, ys, cagg.data)
eldata = cagg if ds_version > LooseVersion('0.5.0') else (xs, ys, cagg.data)
layers[c] = self.p.element_type(eldata, **params)
return NdOverlay(layers, kdims=[element.get_dimension(agg_fn.column)])

Expand Down Expand Up @@ -895,7 +895,7 @@ def _get_xarrays(self, element, coords, xtype, ytype):


def _process(self, element, key=None):
if ds_version <= '0.5.0':
if ds_version <= LooseVersion('0.5.0'):
raise RuntimeError('regrid operation requires datashader>=0.6.0')

# Compute coords, anges and size
Expand Down Expand Up @@ -1057,7 +1057,7 @@ def _process(self, element, key=None):
precompute = self.p.precompute
if interp == 'linear': interp = 'bilinear'
wireframe = False
if (not (element.vdims or (isinstance(element, TriMesh) and element.nodes.vdims))) and ds_version <= '0.6.9':
if (not (element.vdims or (isinstance(element, TriMesh) and element.nodes.vdims))) and ds_version <= LooseVersion('0.6.9'):
self.p.aggregator = ds.any() if isinstance(agg, ds.any) or agg == 'any' else ds.count()
return aggregate._process(self, element, key)
elif ((not interp and (isinstance(agg, (ds.any, ds.count)) or
Expand Down Expand Up @@ -1114,11 +1114,11 @@ class quadmesh_rasterize(trimesh_rasterize):
"""

def _precompute(self, element, agg):
if ds_version <= '0.7.0':
if ds_version <= LooseVersion('0.7.0'):
return super(quadmesh_rasterize, self)._precompute(element.trimesh(), agg)

def _process(self, element, key=None):
if ds_version <= '0.7.0':
if ds_version <= LooseVersion('0.7.0'):
return super(quadmesh_rasterize, self)._process(element, key)

if element.interface.datatype != 'xarray':
Expand Down Expand Up @@ -1347,7 +1347,7 @@ def _process(self, element, key=None):

if self.p.clims:
shade_opts['span'] = self.p.clims
elif ds_version > '0.5.0' and cnorm != 'eq_hist':
elif ds_version > LooseVersion('0.5.0') and self.p.cnorm != 'eq_hist':
shade_opts['span'] = element.range(vdim)

params = dict(get_param_values(element), kdims=kdims,
Expand Down Expand Up @@ -1602,7 +1602,7 @@ class SpreadingOperation(LinkableOperation):
to make sparse plots more visible.
"""

how = param.ObjectSelector(default='source' if ds_version <= '0.11.1' else None,
how = param.ObjectSelector(default='source' if ds_version <= LooseVersion('0.11.1') else None,
objects=[None, 'source', 'over', 'saturate', 'add', 'max', 'min'], doc="""
The name of the compositing operator to use when combining
pixels. Default of None uses 'over' operator for RGB elements
Expand Down

0 comments on commit b35476b

Please sign in to comment.