Skip to content

Commit

Permalink
flake8
Browse files Browse the repository at this point in the history
  • Loading branch information
mrocklin committed Apr 28, 2016
1 parent ec6d1e0 commit 4ae2c32
Show file tree
Hide file tree
Showing 40 changed files with 156 additions and 199 deletions.
2 changes: 1 addition & 1 deletion dask/array/chunk.py
Expand Up @@ -8,7 +8,7 @@
import numpy as np
from . import numpy_compat as npcompat

from ..compatibility import builtins, getargspec
from ..compatibility import getargspec
from ..utils import ignoring


Expand Down
3 changes: 0 additions & 3 deletions dask/array/conftest.py
@@ -1,6 +1,3 @@
import pytest


def pytest_ignore_collect(path, config):
if 'fft.py' in str(path):
return True
10 changes: 3 additions & 7 deletions dask/array/core.py
Expand Up @@ -15,16 +15,16 @@
import uuid
from warnings import warn

from toolz.curried import (pipe, partition, concat, unique, pluck, join, first,
from toolz.curried import (pipe, partition, concat, pluck, join, first,
memoize, map, groupby, valmap, accumulate, merge,
curry, reduce, interleave, sliding_window, partial)
reduce, interleave, sliding_window)
import numpy as np

from . import chunk
from .slicing import slice_array
from . import numpy_compat
from ..base import Base, compute, tokenize, normalize_token
from ..utils import (deepmap, ignoring, repr_long_list, concrete, is_integer,
from ..utils import (deepmap, ignoring, concrete, is_integer,
IndexCallable, funcname)
from ..compatibility import unicode, long, getargspec, zip_longest
from .. import threaded, core
Expand Down Expand Up @@ -1752,7 +1752,6 @@ def stack(seq, axis=0):
"\nData has %d dimensions, but got axis=%d" % (ndim, axis))

assert len(set(a.chunks for a in seq)) == 1 # same chunks
shape = seq[0].shape[:axis] + (len(seq),) + seq[0].shape[axis:]
chunks = ( seq[0].chunks[:axis]
+ ((1,) * n,)
+ seq[0].chunks[axis:])
Expand Down Expand Up @@ -1824,9 +1823,6 @@ def concatenate(seq, axis=0):
for j in range(len(bds[0])) if j != axis):
raise ValueError("Block shapes do not align")

shape = (seq[0].shape[:axis]
+ (sum(a.shape[axis] for a in seq),)
+ seq[0].shape[axis + 1:])
chunks = ( seq[0].chunks[:axis]
+ (sum([bd[axis] for bd in bds], ()),)
+ seq[0].chunks[axis + 1:])
Expand Down
1 change: 0 additions & 1 deletion dask/array/learn.py
Expand Up @@ -34,7 +34,6 @@ def fit(model, x, y, get=threaded.get, **kwargs):
Examples
--------
>>> import dask.array as da
>>> X = da.random.random((10, 3), chunks=(5, 3))
>>> y = da.random.random(10, chunks=(5,))
Expand Down
1 change: 0 additions & 1 deletion dask/array/linalg.py
Expand Up @@ -5,7 +5,6 @@
import numpy as np

from ..base import tokenize
from ..compatibility import reduce
from .core import top, dotmany, Array, eye
from .random import RandomState

Expand Down
1 change: 0 additions & 1 deletion dask/array/reductions.py
Expand Up @@ -624,7 +624,6 @@ def cumreduction(func, binop, ident, x, axis, dtype=None):
indices = list(product(*[range(nb) if ii != axis else [i]
for ii, nb in enumerate(x.numblocks)]))
for old, ind in zip(last_indices, indices):
last = (name, 'extra') + old
this_slice = (name, 'extra') + ind
dsk[this_slice] = (binop, (name, 'extra') + old,
(operator.getitem, (m.name,) + old, slc))
Expand Down
1 change: 0 additions & 1 deletion dask/array/slicing.py
Expand Up @@ -459,7 +459,6 @@ def take_sorted(outname, inname, blockdims, index, axis=0):
--------
take - calls this function
"""
n = len(blockdims)
sizes = blockdims[axis] # the blocksizes on the axis that we care about

index_lists = partition_by_size(sizes, sorted(index))
Expand Down
12 changes: 1 addition & 11 deletions dask/array/tests/test_array_core.py
Expand Up @@ -87,16 +87,6 @@ def test_top_supports_broadcasting_rules():
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 0))}


def test_concatenate3():
x = np.array([1, 2])
assert concatenate3([[x, x, x],
[x, x, x]]).shape == (2, 6)

x = np.array([[1, 2]])
assert concatenate3([[x, x, x],
[x, x, x]]).shape == (2, 6)


def test_concatenate3_on_scalars():
assert_eq(concatenate3([1, 2]), np.array([1, 2]))

Expand Down Expand Up @@ -1435,7 +1425,7 @@ def test_histogram_extra_args_and_shapes():
da.histogram(v, bins=bins, weights=w, density=True)[0])


def test_concatenate3():
def test_concatenate3_2():
x = np.array([1, 2])
assert_eq(concatenate3([x, x, x]),
np.array([1, 2, 1, 2, 1, 2]))
Expand Down
2 changes: 1 addition & 1 deletion dask/array/tests/test_chunk.py
Expand Up @@ -5,7 +5,7 @@

import numpy as np

from dask.array.chunk import coarsen, keepdims_wrapper, trim
from dask.array.chunk import coarsen, keepdims_wrapper
import dask.array as da


Expand Down
2 changes: 1 addition & 1 deletion dask/array/tests/test_image.py
Expand Up @@ -7,7 +7,7 @@
pytest.importorskip('skimage')
from dask.array.image import imread as da_imread
import numpy as np
from skimage.io import imread, imsave
from skimage.io import imsave


@contextmanager
Expand Down
2 changes: 0 additions & 2 deletions dask/array/tests/test_linalg.py
Expand Up @@ -192,7 +192,6 @@ def test_lu_1():

@pytest.mark.parametrize('size', [10, 20, 30, 50])
def test_lu_2(size):
import scipy.linalg
np.random.seed(10)
A = np.random.random_integers(0, 10, (size, size))

Expand Down Expand Up @@ -407,7 +406,6 @@ def test_cholesky(shape, chunk):
@pytest.mark.parametrize(("nrow", "ncol", "chunk"),
[(20, 10, 5), (100, 10, 10)])
def test_lstsq(nrow, ncol, chunk):
import scipy.linalg
np.random.seed(1)
A = np.random.random_integers(1, 20, (nrow, ncol))
b = np.random.random_integers(1, 20, nrow)
Expand Down
1 change: 0 additions & 1 deletion dask/array/tests/test_percentiles.py
Expand Up @@ -3,7 +3,6 @@

from dask.utils import skip
import dask.array as da
from dask.array.percentile import _percentile
import dask
import numpy as np

Expand Down
1 change: 0 additions & 1 deletion dask/array/tests/test_random.py
Expand Up @@ -5,7 +5,6 @@
from dask.array.core import Array
from dask.array.random import random, exponential, normal
import dask.array as da
import dask
from dask.multiprocessing import get as mpget
from dask.multiprocessing import _dumps, _loads

Expand Down
2 changes: 1 addition & 1 deletion dask/array/tests/test_reductions.py
Expand Up @@ -6,7 +6,7 @@
import dask.array as da
from dask.core import get_deps
from dask.context import set_options
from dask.utils import ignoring

import numpy as np
# temporary until numpy functions migrated
try:
Expand Down
1 change: 0 additions & 1 deletion dask/array/tests/test_slicing.py
Expand Up @@ -4,7 +4,6 @@
import dask
from dask.compatibility import skip
import dask.array as da
from dask.array import Array
from dask.array.slicing import (slice_array, _slice_1d, take, new_blockdim,
sanitize_index)
from operator import getitem
Expand Down
24 changes: 14 additions & 10 deletions dask/async.py
Expand Up @@ -114,16 +114,18 @@
"""
from __future__ import absolute_import, division, print_function

from operator import add
import sys
import traceback
from operator import add

from .core import (istask, flatten, reverse_dict, get_dependencies, ishashable,
_deps)
_deps)
from .context import _globals
from .order import order
from .callbacks import unpack_callbacks
from .optimize import cull


def inc(x):
return x + 1

Expand All @@ -138,8 +140,8 @@ def start_state_from_dask(dsk, cache=None, sortkey=None):
--------
>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
>>> import pprint
>>> pprint.pprint(start_state_from_dask(dsk)) # doctest: +NORMALIZE_WHITESPACE
>>> from pprint import pprint
>>> pprint(start_state_from_dask(dsk)) # doctest: +NORMALIZE_WHITESPACE
{'cache': {'x': 1, 'y': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
Expand Down Expand Up @@ -210,6 +212,7 @@ def start_state_from_dask(dsk, cache=None, sortkey=None):
2. Manage administrative state to coordinate with the scheduler
'''


def _execute_task(arg, cache, dsk=None):
""" Do the actual work of collecting data and executing a function
Expand Down Expand Up @@ -318,7 +321,7 @@ def finish_task(dsk, key, state, results, sortkey, delete=True,
if DEBUG:
from chest.core import nbytes
print("Key: %s\tDep: %s\t NBytes: %.2f\t Release" % (key, dep,
sum(map(nbytes, state['cache'].values()) / 1e6)))
sum(map(nbytes, state['cache'].values()) / 1e6)))
release_data(dep, state, delete=delete)
elif delete and dep not in results:
release_data(dep, state, delete=delete)
Expand Down Expand Up @@ -371,6 +374,7 @@ def default_get_id():
The main function of the scheduler. Get is the main entry point.
'''


def get_async(apply_async, num_workers, dsk, result, cache=None,
queue=None, get_id=default_get_id, raise_on_exception=False,
rerun_exceptions_locally=None, callbacks=None, **kwargs):
Expand Down Expand Up @@ -558,13 +562,13 @@ def __init__(self, exception, traceback):
def __str__(self):
return (str(self.exception) + "\n\n"
"Traceback\n"
"---------\n"
+ self.traceback)
"---------\n" +
self.traceback)

def __dir__(self):
return sorted(set(dir(type(self))
+ list(self.__dict__)
+ dir(self.exception)))
return sorted(set(dir(type(self)) +
list(self.__dict__) +
dir(self.exception)))

def __getattr__(self, key):
try:
Expand Down
23 changes: 10 additions & 13 deletions dask/bag/core.py
@@ -1,11 +1,7 @@
from __future__ import absolute_import, division, print_function

import bz2
from collections import Iterable, Iterator, defaultdict
from fnmatch import fnmatchcase
from functools import wraps, partial
from glob import glob
import io
import itertools
import math
import os
Expand All @@ -14,23 +10,24 @@

from ..utils import ignoring

from toolz import (merge, frequencies, merge_with, take, reduce,
join, reduceby, valmap, count, map, partition_all, filter,
remove, pluck, groupby, topk, compose, curry)
from toolz import (merge, take, reduce, valmap, map, partition_all, filter,
remove, compose, curry)
from toolz.compatibility import iteritems, zip
import toolz
with ignoring(ImportError):
try:
from cytoolz import (frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)
except:
from toolz import (frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)

from ..base import Base, normalize_token, tokenize
from ..compatibility import (apply, BytesIO, unicode, urlopen, urlparse,
GzipFile)
from ..compatibility import apply, unicode, urlopen
from ..core import list2, quote, istask, get_dependencies, reverse_dict
from ..multiprocessing import get as mpget
from ..optimize import fuse, cull, inline
from ..utils import (file_size, infer_compression, open, system_encoding,
takes_multiple_arguments, textblock, funcname)
from ..utils import (infer_compression, open, system_encoding,
takes_multiple_arguments, funcname)

no_default = '__no__default__'

Expand Down Expand Up @@ -219,7 +216,7 @@ class Item(Base):
@staticmethod
def from_imperative(value):
warn("Deprecation warning: moved to from_delayed")
return self.from_delayed(value)
return from_delayed(value)

@staticmethod
def from_delayed(value):
Expand Down
1 change: 0 additions & 1 deletion dask/base.py
Expand Up @@ -5,7 +5,6 @@
from operator import attrgetter
import pickle
import os
import sys
import uuid
import warnings

Expand Down
1 change: 0 additions & 1 deletion dask/bytes/utils.py
Expand Up @@ -113,7 +113,6 @@ def read_block(f, offset, length, delimiter=None):
f.seek(start + length)
seek_delimiter(f, delimiter, 2**16)
end = f.tell()
eof = not f.read(1)

offset = start
length = end - start
Expand Down
2 changes: 1 addition & 1 deletion dask/core.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import, division, print_function

from operator import add

from itertools import chain

def inc(x):
Expand Down Expand Up @@ -445,7 +446,6 @@ def quote(x):
>>> quote([1, 2, 3])
[1, 2, 3]
>>> from operator import add
>>> quote((add, 1, 2)) # doctest: +SKIP
(tuple, [add, 1, 2])
"""
Expand Down
11 changes: 3 additions & 8 deletions dask/dataframe/core.py
@@ -1,7 +1,7 @@
from __future__ import absolute_import, division, print_function

import bisect
from collections import Iterable, Iterator
from collections import Iterator
from datetime import datetime
from distutils.version import LooseVersion
import math
Expand All @@ -25,7 +25,7 @@
from .. import core
from ..array.core import partial_by_order
from .. import threaded
from ..compatibility import unicode, apply, operator_div, bind_method
from ..compatibility import apply, operator_div, bind_method
from ..utils import (repr_long_list, IndexCallable,
pseudorandom, derived_from, different_seeds)
from ..base import Base, compute, tokenize, normalize_token
Expand Down Expand Up @@ -402,10 +402,6 @@ def _loc_element(self, ind):
raise KeyError('the label [%s] is not in the index' % str(ind))
dsk = {(name, 0): (lambda df: df.loc[ind:ind], (self._name, part))}

if self.ndim == 1:
columns = self.name
else:
columns = ind
return self._constructor(merge(self.dask, dsk), name, self, [ind, ind])

def _loc_slice(self, ind):
Expand Down Expand Up @@ -1167,7 +1163,6 @@ def to_bag(self, index=False):

@derived_from(pd.Series)
def to_frame(self, name=None):
_name = name if name is not None else self.name
return map_partitions(pd.Series.to_frame, self._pd.to_frame(name), self, name)

@classmethod
Expand Down Expand Up @@ -1462,7 +1457,7 @@ def assign(self, **kwargs):

# Figure out columns of the output
df2 = self._pd.assign(**_extract_pd(kwargs))
return elemwise(_assign, self, *pairs)
return elemwise(_assign, self, *pairs, columns=df2)

@derived_from(pd.DataFrame)
def rename(self, index=None, columns=None):
Expand Down
1 change: 0 additions & 1 deletion dask/dataframe/csv.py
Expand Up @@ -11,7 +11,6 @@

from ..bytes import read_bytes
from ..bytes.compression import seekable_files, files as cfiles
from ..utils import ensure_bytes


delayed = delayed(pure=True)
Expand Down

0 comments on commit 4ae2c32

Please sign in to comment.