Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cf/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@
)

# Check the version of dask
_minimum_vn = "2022.03.0"
_minimum_vn = "2022.6.0"
if LooseVersion(dask.__version__) < LooseVersion(_minimum_vn):
raise RuntimeError(
f"Bad dask version: cf requires dask>={_minimum_vn}. "
Expand Down
67 changes: 0 additions & 67 deletions cf/data/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -5914,73 +5914,6 @@ def concatenate_data(cls, data_list, axis):
assert len(data_list) == 1
return data_list[0]

@classmethod
def reconstruct_sectioned_data(cls, sections, cyclic=(), hardmask=None):
"""Expects a dictionary of Data objects with ordering
information as keys, as output by the section method when called
with a Data object. Returns a reconstructed cf.Data object with
the sections in the original order.

:Parameters:

sections: `dict`
The dictionary of `Data` objects with ordering information
as keys.

:Returns:

`Data`
The resulting reconstructed Data object.

**Examples**

>>> d = cf.Data(numpy.arange(120).reshape(2, 3, 4, 5))
>>> x = d.section([1, 3])
>>> len(x)
8
>>> e = cf.Data.reconstruct_sectioned_data(x)
>>> e.equals(d)
True

"""
ndims = len(list(sections.keys())[0])

for i in range(ndims - 1, -1, -1):
keys = sorted(sections.keys())
if i == 0:
if keys[0][i] is None:
assert len(keys) == 1
return tuple(sections.values())[0]
else:
data_list = []
for k in keys:
data_list.append(sections[k])

out = cls.concatenate_data(data_list, i)

out.cyclic(cyclic)
if hardmask is not None:
out.hardmask = hardmask

return out

if keys[0][i] is not None:
new_sections = {}
new_key = keys[0][:i]
data_list = []
for k in keys:
if k[:i] == new_key:
data_list.append(sections[k])
else:
new_sections[new_key] = cls.concatenate_data(
data_list, axis=i
)
new_key = k[:i]
data_list = [sections[k]]

new_sections[new_key] = cls.concatenate_data(data_list, i)
sections = new_sections

def argmax(self, axis=None, unravel=False):
"""Return the indices of the maximum values along an axis.

Expand Down
36 changes: 36 additions & 0 deletions cf/data/mixin/deprecations.py
Original file line number Diff line number Diff line change
Expand Up @@ -922,3 +922,39 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"manipulations. This may change in the future (see "
"https://github.com/dask/dask/issues/3245 for more details)."
)

@classmethod
def reconstruct_sectioned_data(cls, sections, cyclic=(), hardmask=None):
"""Expects a dictionary of Data objects with ordering
information as keys, as output by the section method when called
with a Data object. Returns a reconstructed cf.Data object with
the sections in the original order.

Deprecated at version TODODASK and is no longer available.

:Parameters:

sections: `dict`
The dictionary of `Data` objects with ordering information
as keys.

:Returns:

`Data`
The resulting reconstructed Data object.

**Examples**

>>> d = cf.Data(numpy.arange(120).reshape(2, 3, 4, 5))
>>> x = d.section([1, 3])
>>> len(x)
8
>>> e = cf.Data.reconstruct_sectioned_data(x)
>>> e.equals(d)
True

"""
raise DeprecationError(
"Data method 'reconstruct_sectioned_data' has been deprecated "
"at version TODODASK and is no longer available"
)
11 changes: 1 addition & 10 deletions cf/test/test_Data.py
Original file line number Diff line number Diff line change
Expand Up @@ -2493,14 +2493,6 @@ def test_Data_section(self):
self.assertEqual(key, (None, None, None))
self.assertTrue(value.equals(d))

@unittest.skipIf(TEST_DASKIFIED_ONLY, "Needs reconstruct_sectioned_data")
def test_Data_reconstruct_sectioned_data(self):
if self.test_only and inspect.stack()[0][3] not in self.test_only:
return

# TODODASK: Write when Data.reconstruct_sectioned_data is
# daskified

@unittest.skipIf(TEST_DASKIFIED_ONLY, "no attr. 'partition_configuration'")
def test_Data_count(self):
if self.test_only and inspect.stack()[0][3] not in self.test_only:
Expand Down Expand Up @@ -2532,7 +2524,6 @@ def test_Data_exp(self):
# self.assertTrue((d.array==c).all()) so need a
# check which accounts for floating point calcs:
np.testing.assert_allclose(d.array, c)
# --- End: for

d = cf.Data(a, "m")
with self.assertRaises(Exception):
Expand Down Expand Up @@ -3954,7 +3945,7 @@ def test_Data_soften_mask(self):
d.soften_mask()
self.assertFalse(d.hardmask)
self.assertEqual(len(d.to_dask_array().dask.layers), 2)

def test_Data_compressed_array(self):
import cfdm

Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ numpy>=1.22
cfdm>=1.9.1.0, <1.9.2.0
psutil>=0.6.0
cfunits>=3.3.4
dask>=2022.03.0
dask>=2022.6.0