Skip to content

Commit

Permalink
Code style cleanup (#227)
Browse files Browse the repository at this point in the history
* Remove from __future__ imports now they are no longer needed

* Remove superflous 'assert dask' statements from test modules

* Code stype cleanup, always import numpy as np

* Code style cleanup, always import dask.array as da

* Always use 'import scipy.ndimage'

* Import dask_image.ndfilters instead of import ... as da_ndf

* flake8 code style

* Oops, forgot to fix a merge conflict

* Import dask_image.ndinterp into tests, don't rename as da_ndinterp

* Fix test, smallest chunk must be larger than spline overlap region
  • Loading branch information
GenevieveBuckley committed May 24, 2021
1 parent 414b7c6 commit aeff4d1
Show file tree
Hide file tree
Showing 45 changed files with 517 additions and 747 deletions.
9 changes: 0 additions & 9 deletions .gen_ci_support/ff_ci_pr_build.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,6 @@
normal branch), then the build proceeds without issues.
"""


try:
from future_builtins import (
map,
filter,
)
except ImportError:
pass

import argparse
import codecs
import contextlib
Expand Down
1 change: 1 addition & 0 deletions dask_image/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from ._version import get_versions

__version__ = get_versions()['version']
del get_versions
2 changes: 1 addition & 1 deletion dask_image/dispatch/_dispatch_ndfilters.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-

import numpy as np
import scipy.ndimage.filters
import scipy.ndimage

from ._dispatcher import Dispatcher

Expand Down
1 change: 0 additions & 1 deletion dask_image/dispatch/_dispatch_ndinterp.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
"dispatch_asarray",
]


dispatch_affine_transform = Dispatcher(name="dispatch_affine_transform")


Expand Down
16 changes: 7 additions & 9 deletions dask_image/imread/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,8 @@
import numbers
import warnings

import dask
import dask.array
import dask.delayed
import numpy
import dask.array as da
import numpy as np
import pims

from . import _utils
Expand Down Expand Up @@ -41,14 +39,14 @@ def imread(fname, nframes=1, *, arraytype="numpy"):
raise ValueError("`nframes` must be greater than zero.")

if arraytype == "numpy":
arrayfunc = numpy.asanyarray
arrayfunc = np.asanyarray
elif arraytype == "cupy": # pragma: no cover
import cupy
arrayfunc = cupy.asanyarray

with pims.open(sfname) as imgs:
shape = (len(imgs),) + imgs.frame_shape
dtype = numpy.dtype(imgs.pixel_type)
dtype = np.dtype(imgs.pixel_type)

if nframes == -1:
nframes = shape[0]
Expand All @@ -69,16 +67,16 @@ def imread(fname, nframes=1, *, arraytype="numpy"):
# place source filenames into dask array
filenames = sorted(glob.glob(sfname)) # pims also does this
if len(filenames) > 1:
ar = dask.array.from_array(filenames, chunks=(nframes,))
ar = da.from_array(filenames, chunks=(nframes,))
multiple_files = True
else:
ar = dask.array.from_array(filenames * shape[0], chunks=(nframes,))
ar = da.from_array(filenames * shape[0], chunks=(nframes,))
multiple_files = False

# read in data using encoded filenames
a = ar.map_blocks(
_map_read_frame,
chunks=dask.array.core.normalize_chunks(
chunks=da.core.normalize_chunks(
(nframes,) + shape[1:], shape),
multiple_files=multiple_files,
new_axis=list(range(1, len(shape))),
Expand Down
4 changes: 2 additions & 2 deletions dask_image/imread/_utils.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
import numpy
import numpy as np
import pims


def _read_frame(fn, i, *, arrayfunc=numpy.asanyarray):
def _read_frame(fn, i, *, arrayfunc=np.asanyarray):
with pims.open(fn) as imgs:
return arrayfunc(imgs[i])
50 changes: 10 additions & 40 deletions dask_image/ndfilters/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,46 +19,16 @@
"threshold_local",
]

from ._conv import (
convolve,
correlate,
)

from ._diff import (
laplace,
)

from ._edge import (
prewitt,
sobel,
)

from ._gaussian import (
gaussian_filter,
gaussian_gradient_magnitude,
gaussian_laplace,
)

from ._generic import (
generic_filter,
)

from ._order import (
minimum_filter,
median_filter,
maximum_filter,
rank_filter,
percentile_filter,
)

from ._smooth import (
uniform_filter,
)

from ._threshold import (
threshold_local,
)

from ._conv import convolve, correlate
from ._diff import laplace
from ._edge import prewitt, sobel
from ._gaussian import (gaussian_filter, gaussian_gradient_magnitude,
gaussian_laplace)
from ._generic import generic_filter
from ._order import (maximum_filter, median_filter, minimum_filter,
percentile_filter, rank_filter)
from ._smooth import uniform_filter
from ._threshold import threshold_local

convolve.__module__ = __name__
correlate.__module__ = __name__
Expand Down
9 changes: 4 additions & 5 deletions dask_image/ndfilters/_conv.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
import scipy.ndimage.filters
import scipy.ndimage

from . import _utils
from ..dispatch._dispatch_ndfilters import (dispatch_convolve,
dispatch_correlate)
from ..dispatch._utils import check_arraytypes_compatible
from ..dispatch._dispatch_ndfilters import (
dispatch_convolve,
dispatch_correlate)
from . import _utils

__all__ = [
"convolve",
Expand Down
4 changes: 2 additions & 2 deletions dask_image/ndfilters/_diff.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# -*- coding: utf-8 -*-


import scipy.ndimage.filters
import scipy.ndimage

from . import _utils
from ..dispatch._dispatch_ndfilters import dispatch_laplace
from . import _utils

__all__ = [
"laplace",
Expand Down
4 changes: 2 additions & 2 deletions dask_image/ndfilters/_edge.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@

import numbers

import scipy.ndimage.filters
import scipy.ndimage

from . import _utils
from ..dispatch._dispatch_ndfilters import dispatch_prewitt, dispatch_sobel
from . import _utils

__all__ = [
"prewitt",
Expand Down
17 changes: 8 additions & 9 deletions dask_image/ndfilters/_gaussian.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,13 @@

import numbers

import numpy
import scipy.ndimage.filters
import numpy as np
import scipy.ndimage

from . import _utils
from ..dispatch._dispatch_ndfilters import (
dispatch_gaussian_filter,
dispatch_gaussian_gradient_magnitude,
dispatch_gaussian_filter, dispatch_gaussian_gradient_magnitude,
dispatch_gaussian_laplace)
from . import _utils

__all__ = [
"gaussian_filter",
Expand All @@ -23,9 +22,9 @@
def _get_sigmas(image, sigma):
ndim = image.ndim

nsigmas = numpy.array(sigma)
nsigmas = np.array(sigma)
if nsigmas.ndim == 0:
nsigmas = numpy.array(ndim * [nsigmas[()]])
nsigmas = np.array(ndim * [nsigmas[()]])

if nsigmas.ndim != 1:
raise RuntimeError(
Expand All @@ -46,12 +45,12 @@ def _get_sigmas(image, sigma):


def _get_border(image, sigma, truncate):
sigma = numpy.array(_get_sigmas(image, sigma))
sigma = np.array(_get_sigmas(image, sigma))

if not isinstance(truncate, numbers.Real):
raise TypeError("Must have a real truncate value.")

half_shape = tuple(numpy.ceil(sigma * truncate).astype(int))
half_shape = tuple(np.ceil(sigma * truncate).astype(int))

return half_shape

Expand Down
4 changes: 2 additions & 2 deletions dask_image/ndfilters/_generic.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# -*- coding: utf-8 -*-

import numpy as np
import scipy.ndimage.filters
import scipy.ndimage

from . import _utils
from ..dispatch._dispatch_ndfilters import dispatch_generic_filter
from . import _utils

__all__ = [
"generic_filter",
Expand Down
13 changes: 6 additions & 7 deletions dask_image/ndfilters/_order.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
# -*- coding: utf-8 -*-

import scipy.ndimage.filters
import scipy.ndimage

from ..dispatch._dispatch_ndfilters import (dispatch_maximum_filter,
dispatch_median_filter,
dispatch_minimum_filter,
dispatch_percentile_filter,
dispatch_rank_filter)
from . import _utils
from ..dispatch._dispatch_ndfilters import (
dispatch_minimum_filter,
dispatch_median_filter,
dispatch_maximum_filter,
dispatch_rank_filter,
dispatch_percentile_filter)

__all__ = [
"minimum_filter",
Expand Down
4 changes: 2 additions & 2 deletions dask_image/ndfilters/_smooth.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# -*- coding: utf-8 -*-


import scipy.ndimage.filters
import scipy.ndimage

from ..dispatch._dispatch_ndfilters import dispatch_uniform_filter
from . import _utils
from ._gaussian import gaussian_filter
from ..dispatch._dispatch_ndfilters import dispatch_uniform_filter

__all__ = [
"uniform_filter",
Expand Down
2 changes: 1 addition & 1 deletion dask_image/ndfilters/_threshold.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import dask.array as da
import numpy as np

from . import _gaussian, _generic, _order
from ..dispatch._dispatch_ndfilters import dispatch_threshold_local_mean
from . import _gaussian, _generic, _order

__all__ = [
"threshold_local",
Expand Down
19 changes: 8 additions & 11 deletions dask_image/ndfilters/_utils.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,10 @@
# -*- coding: utf-8 -*-

from __future__ import division

import collections.abc
import collections
import inspect
import numbers
import re

import numpy
import numpy as np


def _get_docstring(func):
Expand Down Expand Up @@ -103,7 +100,7 @@ def _get_size(ndim, size):

if isinstance(size, numbers.Number):
size = ndim * (size,)
size = numpy.array(size)
size = np.array(size)

if size.ndim != 1:
raise RuntimeError("The size must have only one dimension.")
Expand All @@ -120,13 +117,13 @@ def _get_size(ndim, size):


def _get_origin(size, origin=0):
size = numpy.array(size)
size = np.array(size)
ndim = len(size)

if isinstance(origin, numbers.Number):
origin = ndim * (origin,)

origin = numpy.array(origin)
origin = np.array(origin)

if not issubclass(origin.dtype.type, numbers.Integral):
raise TypeError("The origin must be of integral type.")
Expand All @@ -150,8 +147,8 @@ def _get_origin(size, origin=0):


def _get_depth(size, origin=0):
origin = numpy.array(_get_origin(size, origin))
size = numpy.array(size)
origin = np.array(_get_origin(size, origin))
size = np.array(size)

half_size = size // 2
depth = half_size + abs(origin)
Expand All @@ -171,7 +168,7 @@ def _get_footprint(ndim, size=None, footprint=None):
# Get a footprint based on the size.
if size is not None:
size = _get_size(ndim, size)
footprint = numpy.ones(size, dtype=bool)
footprint = np.ones(size, dtype=bool)

# Validate the footprint.
if footprint.ndim != ndim:
Expand Down
Loading

0 comments on commit aeff4d1

Please sign in to comment.