From 9632f8cb9eaaef3ae0472e64cb7fe8308d3df947 Mon Sep 17 00:00:00 2001 From: Jakub Kaczmarzyk Date: Wed, 10 Oct 2018 10:09:21 -0400 Subject: [PATCH 01/12] yapf-ify --- pydra/__about__.py | 14 +- pydra/__init__.py | 15 +- pydra/_version.py | 129 ++-- pydra/engine/auxiliary.py | 119 ++-- pydra/engine/newengine.py | 367 ++++++++---- pydra/engine/state.py | 41 +- pydra/engine/submitter.py | 49 +- pydra/engine/tests/test_auxiliary.py | 186 ++++-- pydra/engine/tests/test_newnode.py | 710 +++++++++++++++++------ pydra/engine/tests/test_newnode_neuro.py | 38 +- pydra/engine/workers.py | 7 +- 11 files changed, 1166 insertions(+), 509 deletions(-) diff --git a/pydra/__about__.py b/pydra/__about__.py index ad504098fb..0e2b671ab3 100644 --- a/pydra/__about__.py +++ b/pydra/__about__.py @@ -8,15 +8,13 @@ del get_versions CLASSIFIERS = [ - 'Development Status :: 2 - Pre-Alpha', - 'Environment :: Console', + 'Development Status :: 2 - Pre-Alpha', 'Environment :: Console', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Topic :: Scientific/Engineering' + 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering' ] description = 'Pydra dataflow engine' @@ -45,9 +43,8 @@ __longdesc__ = long_description __url__ = 'https://github.com/nipype/pydra' -DOWNLOAD_URL = ( - 'http://github.com/nipype/{name}/archives/{ver}.tar.gz'.format( - name=__packagename__, ver=__version__)) +DOWNLOAD_URL = ('http://github.com/nipype/{name}/archives/{ver}.tar.gz'.format( + name=__packagename__, ver=__version__)) PLATFORMS = 'OS Independent' MAJOR = __version__.split('.')[0] MINOR = __version__.split('.')[1] @@ -64,8 +61,7 @@ SETUP_REQUIRES = ['setuptools>=27.0'] TESTS_REQUIRES = ['pytest-cov', 'codecov', 'pytest-env', 'pytest-xdist'] -LINKS_REQUIRES = [ -] +LINKS_REQUIRES = [] EXTRA_REQUIRES = { 'tests': TESTS_REQUIRES, diff --git a/pydra/__init__.py b/pydra/__init__.py index 6fda8d2f65..9f20279cc9 100644 --- a/pydra/__init__.py +++ b/pydra/__init__.py @@ -1,12 +1,3 @@ -from .__about__ import ( - __version__, - __author__, - __license__, - __maintainer__, - __email__, - __status__, - __url__, - __packagename__, - __description__, - __longdesc__ -) +from .__about__ import (__version__, __author__, __license__, __maintainer__, + __email__, __status__, __url__, __packagename__, + __description__, __longdesc__) diff --git a/pydra/_version.py b/pydra/_version.py index 081b10712d..ff023ee1bc 100644 --- a/pydra/_version.py +++ b/pydra/_version.py @@ -1,4 +1,3 @@ - # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -7,7 +6,6 @@ # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) - """Git implementation of _version.py.""" import errno @@ -58,16 +56,22 @@ class NotThisMethod(Exception): def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" + def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f + return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, +def run_command(commands, + args, + cwd=None, + verbose=False, + hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) @@ -76,10 +80,12 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + p = subprocess.Popen( + [c] + args, + cwd=cwd, + env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] @@ -91,7 +97,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, return None, None else: if verbose: - print("unable to find command, tried %s" % (commands,)) + print("unable to find command, tried %s" % (commands, )) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: @@ -116,9 +122,13 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} + return { + "version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, + "error": None, + "date": None + } else: rootdirs.append(root) root = os.path.dirname(root) # up a level @@ -201,16 +211,23 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + "date": date + } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + "date": None + } @register_vcs_handler("git", "pieces_from_vcs") @@ -225,8 +242,8 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + out, rc = run_command( + GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -234,10 +251,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) + describe_out, rc = run_command( + GITS, [ + "describe", "--tags", "--dirty", "--always", "--long", "--match", + "%s*" % tag_prefix + ], + cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") @@ -269,8 +288,8 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + pieces["error"] = ( + "unable to parse git-describe output: '%s'" % describe_out) return pieces # tag @@ -279,8 +298,8 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % + (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] @@ -293,13 +312,13 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) + count_out, rc = run_command( + GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() + date = run_command( + GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -330,8 +349,7 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered @@ -445,11 +463,13 @@ def render_git_describe_long(pieces): def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None + } if not style or style == "default": style = "pep440" # the default @@ -469,9 +489,13 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + "date": pieces.get("date") + } def get_versions(): @@ -498,10 +522,13 @@ def get_versions(): for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None + } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) @@ -515,6 +542,10 @@ def get_versions(): except NotThisMethod: pass - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + "date": None + } diff --git a/pydra/engine/auxiliary.py b/pydra/engine/auxiliary.py index a34463bc2f..7dbffa5e71 100644 --- a/pydra/engine/auxiliary.py +++ b/pydra/engine/auxiliary.py @@ -4,14 +4,15 @@ logger = logging.getLogger('nipype.workflow') from nipype import Node - # dj: might create a new class or move to State + # Function to change user provided mapper to "reverse polish notation" used in State def mapper2rpn(mapper, other_mappers=None): """ Functions that translate mapper to "reverse polish notation.""" output_mapper = [] - _ordering(mapper, i=0, output_mapper=output_mapper, other_mappers=other_mappers) + _ordering( + mapper, i=0, output_mapper=output_mapper, other_mappers=other_mappers) return output_mapper @@ -23,13 +24,15 @@ def _ordering(el, i, output_mapper, current_sign=None, other_mappers=None): node_nm = el[0][1:] if node_nm not in other_mappers: raise Exception("can't ask for mapper from {}".format(node_nm)) - mapper_mod = change_mapper(mapper=other_mappers[node_nm], name=node_nm) + mapper_mod = change_mapper( + mapper=other_mappers[node_nm], name=node_nm) el = (mapper_mod, el[1]) if type(el[1]) is str and el[1].startswith("_"): node_nm = el[1][1:] if node_nm not in other_mappers: raise Exception("can't ask for mapper from {}".format(node_nm)) - mapper_mod = change_mapper(mapper=other_mappers[node_nm], name=node_nm) + mapper_mod = change_mapper( + mapper=other_mappers[node_nm], name=node_nm) el = (el[0], mapper_mod) _iterate_list(el, ".", other_mappers, output_mapper=output_mapper) elif type(el) is list: @@ -37,20 +40,22 @@ def _ordering(el, i, output_mapper, current_sign=None, other_mappers=None): node_nm = el[0][1:] if node_nm not in other_mappers: raise Exception("can't ask for mapper from {}".format(node_nm)) - mapper_mod = change_mapper(mapper=other_mappers[node_nm], name=node_nm) + mapper_mod = change_mapper( + mapper=other_mappers[node_nm], name=node_nm) el[0] = mapper_mod if type(el[1]) is str and el[1].startswith("_"): node_nm = el[1][1:] if node_nm not in other_mappers: raise Exception("can't ask for mapper from {}".format(node_nm)) - mapper_mod = change_mapper(mapper=other_mappers[node_nm], name=node_nm) + mapper_mod = change_mapper( + mapper=other_mappers[node_nm], name=node_nm) el[1] = mapper_mod _iterate_list(el, "*", other_mappers, output_mapper=output_mapper) elif type(el) is str: output_mapper.append(el) else: raise Exception("mapper has to be a string, a tuple or a list") - + if i > 0: output_mapper.append(current_sign) @@ -58,11 +63,17 @@ def _ordering(el, i, output_mapper, current_sign=None, other_mappers=None): def _iterate_list(element, sign, other_mappers, output_mapper): """ Used in the mapper2rpn to get recursion. """ for i, el in enumerate(element): - _ordering(el, i, current_sign=sign, other_mappers=other_mappers, output_mapper=output_mapper) + _ordering( + el, + i, + current_sign=sign, + other_mappers=other_mappers, + output_mapper=output_mapper) # functions used in State to know which element should be used for a specific axis + def mapping_axis(state_inputs, mapper_rpn): """Having inputs and mapper (in rpn notation), functions returns the axes of output for every input.""" axis_for_input = {} @@ -75,16 +86,21 @@ def mapping_axis(state_inputs, mapper_rpn): right = stack.pop() left = stack.pop() if left == "OUT": - if state_inputs[right].shape == current_shape: #todo:should we allow for one-element array? + if state_inputs[ + right].shape == current_shape: #todo:should we allow for one-element array? axis_for_input[right] = current_axis else: - raise Exception("arrays for scalar operations should have the same size") + raise Exception( + "arrays for scalar operations should have the same size" + ) elif right == "OUT": if state_inputs[left].shape == current_shape: axis_for_input[left] = current_axis else: - raise Exception("arrays for scalar operations should have the same size") + raise Exception( + "arrays for scalar operations should have the same size" + ) else: if state_inputs[right].shape == state_inputs[left].shape: @@ -93,35 +109,51 @@ def mapping_axis(state_inputs, mapper_rpn): axis_for_input[left] = current_axis axis_for_input[right] = current_axis else: - raise Exception("arrays for scalar operations should have the same size") - + raise Exception( + "arrays for scalar operations should have the same size" + ) + stack.append("OUT") elif el == "*": right = stack.pop() left = stack.pop() if left == "OUT": - axis_for_input[right] = [i + 1 + current_axis[-1] - for i in range(state_inputs[right].ndim)] + axis_for_input[right] = [ + i + 1 + current_axis[-1] + for i in range(state_inputs[right].ndim) + ] current_axis = current_axis + axis_for_input[right] - current_shape = tuple([i for i in current_shape + state_inputs[right].shape]) + current_shape = tuple( + [i for i in current_shape + state_inputs[right].shape]) elif right == "OUT": for key in axis_for_input: - axis_for_input[key] = [i + state_inputs[left].ndim - for i in axis_for_input[key]] - - axis_for_input[left] = [i - len(current_shape) + current_axis[-1] + 1 - for i in range(state_inputs[left].ndim)] - current_axis = current_axis + [i + 1 + current_axis[-1] - for i in range(state_inputs[left].ndim)] - current_shape = tuple([i for i in state_inputs[left].shape + current_shape]) + axis_for_input[key] = [ + i + state_inputs[left].ndim + for i in axis_for_input[key] + ] + + axis_for_input[left] = [ + i - len(current_shape) + current_axis[-1] + 1 + for i in range(state_inputs[left].ndim) + ] + current_axis = current_axis + [ + i + 1 + current_axis[-1] + for i in range(state_inputs[left].ndim) + ] + current_shape = tuple( + [i for i in state_inputs[left].shape + current_shape]) else: axis_for_input[left] = list(range(state_inputs[left].ndim)) - axis_for_input[right] = [i + state_inputs[left].ndim - for i in range(state_inputs[right].ndim)] + axis_for_input[right] = [ + i + state_inputs[left].ndim + for i in range(state_inputs[right].ndim) + ] current_axis = axis_for_input[left] + axis_for_input[right] - current_shape = tuple([i for i in - state_inputs[left].shape + state_inputs[right].shape]) + current_shape = tuple([ + i for i in state_inputs[left].shape + + state_inputs[right].shape + ]) stack.append("OUT") else: @@ -149,17 +181,18 @@ def converting_axis2input(state_inputs, axis_for_input, ndim): for i in range(ndim): input_for_axis.append([]) shape.append(0) - + for inp, axis in axis_for_input.items(): for (i, ax) in enumerate(axis): input_for_axis[ax].append(inp) shape[ax] = state_inputs[inp].shape[i] - + return input_for_axis, shape # used in the Node to change names in a mapper + def change_mapper(mapper, name): """changing names of mapper: adding names of the node""" if isinstance(mapper, str): @@ -192,8 +225,10 @@ def _add_name(mlist, name): #Function interface + class FunctionInterface(object): """ A new function interface """ + def __init__(self, function, output_nm, out_read=False, input_map=None): self.function = function if type(output_nm) is list: @@ -209,7 +244,6 @@ def __init__(self, function, output_nm, out_read=False, input_map=None): # flags if we want to read the txt file to save in node.output self.out_read = out_read - def run(self, input): self.output = {} if self.input_map: @@ -217,19 +251,24 @@ def run(self, input): try: input[key_fun] = input.pop(key_inp) except KeyError: - raise Exception("no {} in the input dictionary".format(key_inp)) + raise Exception( + "no {} in the input dictionary".format(key_inp)) fun_output = self.function(**input) - logger.debug("Function Interf, input={}, fun_out={}".format(input, fun_output)) + logger.debug("Function Interf, input={}, fun_out={}".format( + input, fun_output)) if type(fun_output) is tuple: if len(self._output_nm) == len(fun_output): for i, out in enumerate(fun_output): self.output[self._output_nm[i]] = out else: - raise Exception("length of output_nm doesnt match length of the function output") - elif len(self._output_nm)==1: + raise Exception( + "length of output_nm doesnt match length of the function output" + ) + elif len(self._output_nm) == 1: self.output[self._output_nm[0]] = fun_output else: - raise Exception("output_nm doesnt match length of the function output") + raise Exception( + "output_nm doesnt match length of the function output") return fun_output @@ -239,10 +278,12 @@ def run(self, input): # https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary class DotDict(dict): """dot.notation access to dictionary attributes""" + def __getattr__(self, attr): return self.get(attr) - __setattr__= dict.__setitem__ - __delattr__= dict.__delitem__ + + __setattr__ = dict.__setitem__ + __delattr__ = dict.__delitem__ def __getstate__(self): return self @@ -265,4 +306,4 @@ def run(self, inputs, base_dir, dir_nm_el): #have to set again self._output_dir in case of mapper self.nn._output_dir = os.path.join(self.nn.base_dir, self.nn.name) res = self.nn.run() - return res \ No newline at end of file + return res diff --git a/pydra/engine/newengine.py b/pydra/engine/newengine.py index f0ed18c406..722ad57503 100644 --- a/pydra/engine/newengine.py +++ b/pydra/engine/newengine.py @@ -14,17 +14,28 @@ import pdb + # dj ??: should I use EngineBase? class NewBase(object): - def __init__(self, name, mapper=None, inputs=None, other_mappers=None, mem_gb=None, - cache_location=None, print_val=True, *args, **kwargs): + def __init__(self, + name, + mapper=None, + inputs=None, + other_mappers=None, + mem_gb=None, + cache_location=None, + print_val=True, + *args, + **kwargs): self.name = name #dj TODO: I should think what is needed in the __init__ (I redefine some of rhe attributes anyway) if inputs: # adding name of the node to the input name - self._inputs = dict(("{}.{}".format(self.name, key), value) for (key, value) in inputs.items()) - self._inputs = dict((key, np.array(val)) if type(val) is list else (key, val) - for (key, val) in self._inputs.items()) + self._inputs = dict(("{}.{}".format(self.name, key), value) + for (key, value) in inputs.items()) + self._inputs = dict( + (key, np.array(val)) if type(val) is list else (key, val) + for (key, val) in self._inputs.items()) self._state_inputs = self._inputs.copy() else: self._inputs = {} @@ -36,7 +47,10 @@ def __init__(self, name, mapper=None, inputs=None, other_mappers=None, mem_gb=No # information about other nodes' mappers from workflow (in case the mapper from previous node is used) self._other_mappers = other_mappers # create state (takes care of mapper, connects inputs with axes, so we can ask for specifc element) - self._state = state.State(mapper=self._mapper, node_name=self.name, other_mappers=self._other_mappers) + self._state = state.State( + mapper=self._mapper, + node_name=self.name, + other_mappers=self._other_mappers) self._output = {} self._result = {} # flag that says if the node/wf is ready to run (has all input) @@ -52,7 +66,6 @@ def __init__(self, name, mapper=None, inputs=None, other_mappers=None, mem_gb=No self.mem_gb = mem_gb self.cache_location = cache_location - # TBD def join(self, field): pass @@ -69,7 +82,10 @@ def mapper(self): def mapper(self, mapper): self._mapper = mapper # updating state - self._state = state.State(mapper=self._mapper, node_name=self.name, other_mappers=self._other_mappers) + self._state = state.State( + mapper=self._mapper, + node_name=self.name, + other_mappers=self._other_mappers) @property def state_inputs(self): @@ -79,7 +95,6 @@ def state_inputs(self): def state_inputs(self, state_inputs): self._state_inputs.update(state_inputs) - @property def output(self): return self._output @@ -90,11 +105,9 @@ def result(self): self._reading_results() return self._result - def prepare_state_input(self): self._state.prepare_state_input(state_inputs=self.state_inputs) - def map(self, mapper, inputs=None): if self._mapper: raise Exception("mapper is already set") @@ -102,30 +115,32 @@ def map(self, mapper, inputs=None): self._mapper = aux.change_mapper(mapper, self.name) if inputs: - inputs = dict(("{}.{}".format(self.name, key), value) for (key, value) in inputs.items()) - inputs = dict((key, np.array(val)) if type(val) is list else (key, val) + inputs = dict(("{}.{}".format(self.name, key), value) + for (key, value) in inputs.items()) + inputs = dict((key, np.array(val)) if type(val) is list else (key, + val) for (key, val) in inputs.items()) self._inputs.update(inputs) self._state_inputs.update(inputs) if mapper: # updating state if we have a new mapper - self._state = state.State(mapper=self._mapper, node_name=self.name, other_mappers=self._other_mappers) - + self._state = state.State( + mapper=self._mapper, + node_name=self.name, + other_mappers=self._other_mappers) def join(self, field, node=None): # TBD pass - def checking_input_el(self, ind): """checking if all inputs are available (for specific state element)""" try: self.get_input_el(ind) return True - except: #TODO specify + except: #TODO specify return False - # dj: this is not used for a single node def get_input_el(self, ind): """collecting all inputs required to run the node (for specific state element)""" @@ -135,25 +150,35 @@ def get_input_el(self, ind): state_dict = self.state.state_ind(ind) # reading extra inputs that come from previous nodes for (from_node, from_socket, to_socket) in self.needed_outputs: - dir_nm_el_from = "_".join(["{}:{}".format(i, j) for i, j in list(state_dict.items()) - if i in list(from_node._state_inputs.keys())]) + dir_nm_el_from = "_".join([ + "{}:{}".format(i, j) for i, j in list(state_dict.items()) + if i in list(from_node._state_inputs.keys()) + ]) if not from_node.mapper: dir_nm_el_from = "" - if is_node(from_node) and is_current_interface(from_node.interface): - file_from = self._reading_ci_output(node=from_node, dir_nm_el=dir_nm_el_from, out_nm=from_socket) + if is_node(from_node) and is_current_interface( + from_node.interface): + file_from = self._reading_ci_output( + node=from_node, + dir_nm_el=dir_nm_el_from, + out_nm=from_socket) if file_from and os.path.exists(file_from): - inputs_dict["{}.{}".format(self.name, to_socket)] = file_from + inputs_dict["{}.{}".format(self.name, + to_socket)] = file_from else: raise Exception("{} doesnt exist".format(file_from)) - else: # assuming here that I want to read the file (will not be used with the current interfaces) - file_from = os.path.join(from_node.workingdir, dir_nm_el_from, from_socket+".txt") + else: # assuming here that I want to read the file (will not be used with the current interfaces) + file_from = os.path.join(from_node.workingdir, dir_nm_el_from, + from_socket + ".txt") with open(file_from) as f: content = f.readline() try: - inputs_dict["{}.{}".format(self.name, to_socket)] = eval(content) + inputs_dict["{}.{}".format(self.name, + to_socket)] = eval(content) except NameError: - inputs_dict["{}.{}".format(self.name, to_socket)] = content + inputs_dict["{}.{}".format(self.name, + to_socket)] = content return state_dict, inputs_dict @@ -161,8 +186,9 @@ def _reading_ci_output(self, dir_nm_el, out_nm, node=None): """used for current interfaces: checking if the output exists and returns the path if it does""" if not node: node = self - result_pklfile = os.path.join(os.getcwd(), node.workingdir, dir_nm_el, - node.interface.nn.name, "result_{}.pklz".format(node.interface.nn.name)) + result_pklfile = os.path.join( + os.getcwd(), node.workingdir, dir_nm_el, node.interface.nn.name, + "result_{}.pklz".format(node.interface.nn.name)) if os.path.exists(result_pklfile): out_file = getattr(loadpkl(result_pklfile).outputs, out_nm) if os.path.exists(out_file): @@ -172,7 +198,6 @@ def _reading_ci_output(self, dir_nm_el, out_nm, node=None): else: return False - # checking if all outputs are saved @property def is_complete(self): @@ -183,7 +208,6 @@ def is_complete(self): else: return self._check_all_results() - def get_output(self): raise NotImplementedError @@ -193,7 +217,6 @@ def _check_all_results(self): def _reading_results(self): raise NotImplementedError - def _dict_tuple2list(self, container): if type(container) is dict: val_l = [val for (_, val) in container.items()] @@ -205,13 +228,30 @@ def _dict_tuple2list(self, container): class NewNode(NewBase): - def __init__(self, name, interface, inputs=None, mapper=None, join_by=None, - workingdir=None, other_mappers=None, mem_gb=None, cache_location=None, - output_names=None, print_val=True, *args, **kwargs): - super(NewNode, self).__init__(name=name, mapper=mapper, inputs=inputs, - other_mappers=other_mappers, mem_gb=mem_gb, - cache_location=cache_location, print_val=print_val, - *args, **kwargs) + def __init__(self, + name, + interface, + inputs=None, + mapper=None, + join_by=None, + workingdir=None, + other_mappers=None, + mem_gb=None, + cache_location=None, + output_names=None, + print_val=True, + *args, + **kwargs): + super(NewNode, self).__init__( + name=name, + mapper=mapper, + inputs=inputs, + other_mappers=other_mappers, + mem_gb=mem_gb, + cache_location=cache_location, + print_val=print_val, + *args, + **kwargs) # working directory for node, will be change if node is a part of a wf self.workingdir = workingdir @@ -219,8 +259,9 @@ def __init__(self, name, interface, inputs=None, mapper=None, join_by=None, if is_function_interface(self.interface): # adding node name to the interface's name mapping - self.interface.input_map = dict((key, "{}.{}".format(self.name, value)) - for (key, value) in self.interface.input_map.items()) + self.interface.input_map = dict( + (key, "{}.{}".format(self.name, value)) + for (key, value) in self.interface.input_map.items()) # list of output names taken from interface output name self.output_names = self.interface._output_nm elif is_current_interface(self.interface): @@ -229,8 +270,6 @@ def __init__(self, name, interface, inputs=None, mapper=None, join_by=None, if not self.output_names: self.output_names = [] - - # dj: not sure if I need it # def __deepcopy__(self, memo): # memo is a dict of id's to copies # id_self = id(self) # memoization avoids unnecesary recursion @@ -249,7 +288,6 @@ def __init__(self, name, interface, inputs=None, mapper=None, join_by=None, # memo[id_self] = _copy # return _copy - @property def inputs(self): return self._inputs @@ -258,17 +296,19 @@ def inputs(self): def inputs(self, inputs): self._inputs.update(inputs) - def run_interface_el(self, i, ind): """ running interface one element generated from node_state.""" - logger.debug("Run interface el, name={}, i={}, ind={}".format(self.name, i, ind)) + logger.debug("Run interface el, name={}, i={}, ind={}".format( + self.name, i, ind)) state_dict, inputs_dict = self.get_input_el(ind) if not self.print_val: state_dict = self.state.state_ind(ind) - dir_nm_el = "_".join(["{}:{}".format(i, j) for i, j in list(state_dict.items())]) + dir_nm_el = "_".join( + ["{}:{}".format(i, j) for i, j in list(state_dict.items())]) print("Run interface el, dict={}".format(state_dict)) - logger.debug("Run interface el, name={}, inputs_dict={}, state_dict={}".format( - self.name, inputs_dict, state_dict)) + logger.debug( + "Run interface el, name={}, inputs_dict={}, state_dict={}".format( + self.name, inputs_dict, state_dict)) if is_function_interface(self.interface): res = self.interface.run(inputs_dict) output = self.interface.output @@ -278,8 +318,10 @@ def run_interface_el(self, i, ind): elif is_current_interface(self.interface): if not self.mapper: dir_nm_el = "" - res = self.interface.run(inputs=inputs_dict, base_dir=os.path.join(os.getcwd(), self.workingdir), - dir_nm_el=dir_nm_el) + res = self.interface.run( + inputs=inputs_dict, + base_dir=os.path.join(os.getcwd(), self.workingdir), + dir_nm_el=dir_nm_el) # TODO when join #if self._joinByKey: @@ -291,30 +333,34 @@ def run_interface_el(self, i, ind): # dir_nm_el = os.path.join(dir_join, dir_nm_el) return res - def _writting_results_tmp(self, state_dict, dir_nm_el, output): """temporary method to write the results in the files (this is usually part of a interface)""" if not self.mapper: dir_nm_el = '' os.makedirs(os.path.join(self.workingdir, dir_nm_el), exist_ok=True) for key_out, val_out in output.items(): - with open(os.path.join(self.workingdir, dir_nm_el, key_out+".txt"), "w") as fout: + with open( + os.path.join(self.workingdir, dir_nm_el, key_out + ".txt"), + "w") as fout: fout.write(str(val_out)) - def get_output(self): """collecting all outputs and updating self._output""" for key_out in self.output_names: self._output[key_out] = {} - for (i, ind) in enumerate(itertools.product(*self.state.all_elements)): + for (i, ind) in enumerate( + itertools.product(*self.state.all_elements)): if self.print_val: state_dict = self.state.state_values(ind) else: state_dict = self.state.state_ind(ind) - dir_nm_el = "_".join(["{}:{}".format(i, j) for i, j in list(state_dict.items())]) + dir_nm_el = "_".join([ + "{}:{}".format(i, j) for i, j in list(state_dict.items()) + ]) if self.mapper: if is_function_interface(self.interface): - output = os.path.join(self.workingdir, dir_nm_el, key_out + ".txt") + output = os.path.join(self.workingdir, dir_nm_el, + key_out + ".txt") if self.interface.out_read: with open(output) as fout: content = fout.readline() @@ -328,7 +374,8 @@ def get_output(self): (state_dict, (state_dict, self._reading_ci_output(dir_nm_el=dir_nm_el, out_nm=key_out))) else: if is_function_interface(self.interface): - output = os.path.join(self.workingdir, key_out + ".txt") + output = os.path.join(self.workingdir, + key_out + ".txt") if self.interface.out_read: with open(output) as fout: try: @@ -341,7 +388,6 @@ def get_output(self): (state_dict, self._reading_ci_output(dir_nm_el="", out_nm=key_out)) return self._output - # dj: version without join def _check_all_results(self): """checking if all files that should be created are present""" @@ -350,13 +396,16 @@ def _check_all_results(self): state_dict = self.state.state_values(ind) else: state_dict = self.state.state_ind(ind) - dir_nm_el = "_".join(["{}:{}".format(i, j) for i, j in list(state_dict.items())]) + dir_nm_el = "_".join( + ["{}:{}".format(i, j) for i, j in list(state_dict.items())]) if not self.mapper: dir_nm_el = "" for key_out in self.output_names: if is_function_interface(self.interface): - if not os.path.isfile(os.path.join(self.workingdir, dir_nm_el, key_out+".txt")): + if not os.path.isfile( + os.path.join(self.workingdir, dir_nm_el, + key_out + ".txt")): return False elif is_current_interface(self.interface): if not self._reading_ci_output(dir_nm_el, key_out): @@ -364,7 +413,6 @@ def _check_all_results(self): self._is_complete = True return True - def _reading_results(self): """temporary: reading results from output files (that is now just txt) should be probably just reading output for self.output_names @@ -376,7 +424,8 @@ def _reading_results(self): val_l = self._dict_tuple2list(self._output[key_out]) for (st_dict, filename) in val_l: with open(filename) as fout: - self._result[key_out].append((st_dict, eval(fout.readline()))) + self._result[key_out].append((st_dict, + eval(fout.readline()))) else: # st_dict should be {} # not sure if this is used (not tested) @@ -395,10 +444,28 @@ def _reading_results(self): class NewWorkflow(NewBase): - def __init__(self, name, inputs=None, wf_output_names=None, mapper=None, #join_by=None, - nodes=None, workingdir=None, mem_gb=None, cache_location=None, print_val=True, *args, **kwargs): - super(NewWorkflow, self).__init__(name=name, mapper=mapper, inputs=inputs, mem_gb=mem_gb, - cache_location=cache_location, print_val=print_val, *args, **kwargs) + def __init__( + self, + name, + inputs=None, + wf_output_names=None, + mapper=None, #join_by=None, + nodes=None, + workingdir=None, + mem_gb=None, + cache_location=None, + print_val=True, + *args, + **kwargs): + super(NewWorkflow, self).__init__( + name=name, + mapper=mapper, + inputs=inputs, + mem_gb=mem_gb, + cache_location=cache_location, + print_val=print_val, + *args, + **kwargs) self.graph = nx.DiGraph() # all nodes in the workflow (probably will be removed) @@ -443,8 +510,9 @@ def inputs(self): @inputs.setter def inputs(self, inputs): - self._inputs.update(dict(("{}.{}".format(self.name, key), value) for (key, value) in inputs.items())) - + self._inputs.update( + dict(("{}.{}".format(self.name, key), value) + for (key, value) in inputs.items())) @property def nodes(self): @@ -455,7 +523,6 @@ def graph_sorted(self): # TODO: should I always update the graph? return list(nx.topological_sort(self.graph)) - def map_node(self, mapper, node=None, inputs=None): """this is setting a mapper to the wf's nodes (not to the wf)""" if not node: @@ -465,13 +532,14 @@ def map_node(self, mapper, node=None, inputs=None): node.map(mapper=mapper, inputs=inputs) self._node_mappers[node.name] = node.mapper - def get_output(self): # not sure, if I should collecto output of all nodes or only the ones that are used in wf.output self.node_outputs = {} for nn in self.graph: if self.mapper: - self.node_outputs[nn.name] = [ni.get_output() for ni in self.inner_nodes[nn.name]] + self.node_outputs[nn.name] = [ + ni.get_output() for ni in self.inner_nodes[nn.name] + ] else: self.node_outputs[nn.name] = nn.get_output() if self.wf_output_names: @@ -481,32 +549,45 @@ def get_output(self): elif len(out) == 3: node_nm, out_nd_nm, out_wf_nm = out else: - raise Exception("wf_output_names should have 2 or 3 elements") + raise Exception( + "wf_output_names should have 2 or 3 elements") if out_wf_nm not in self._output.keys(): if self.mapper: self._output[out_wf_nm] = {} - for (i, ind) in enumerate(itertools.product(*self.state.all_elements)): + for (i, ind) in enumerate( + itertools.product(*self.state.all_elements)): if self.print_val: wf_inputs_dict = self.state.state_values(ind) - dir_nm_el = "_".join(["{}:{}".format(i, j) for i, j in list(wf_inputs_dict.items())]) + dir_nm_el = "_".join([ + "{}:{}".format(i, j) + for i, j in list(wf_inputs_dict.items()) + ]) else: wf_ind_dict = self.state.state_ind(ind) - dir_nm_el = "_".join(["{}:{}".format(i, j) for i, j in list(wf_ind_dict.items())]) - self._output[out_wf_nm][dir_nm_el] = self.node_outputs[node_nm][i][out_nd_nm] + dir_nm_el = "_".join([ + "{}:{}".format(i, j) + for i, j in list(wf_ind_dict.items()) + ]) + self._output[out_wf_nm][ + dir_nm_el] = self.node_outputs[node_nm][i][ + out_nd_nm] else: - self._output[out_wf_nm] = self.node_outputs[node_nm][out_nd_nm] + self._output[out_wf_nm] = self.node_outputs[node_nm][ + out_nd_nm] else: - raise Exception("the key {} is already used in workflow.result".format(out_wf_nm)) + raise Exception( + "the key {} is already used in workflow.result".format( + out_wf_nm)) return self._output - # dj: version without join # TODO: might merge with the function from Node def _check_all_results(self): """checking if all files that should be created are present""" for nn in self.graph_sorted: if nn.name in self.inner_nodes.keys(): - if not all([ni.is_complete for ni in self.inner_nodes[nn.name]]): + if not all( + [ni.is_complete for ni in self.inner_nodes[nn.name]]): return False else: if not nn.is_complete: @@ -514,7 +595,6 @@ def _check_all_results(self): self._is_complete = True return True - # TODO: should try to merge with the function from Node def _reading_results(self): """reading all results of the workflow @@ -522,20 +602,27 @@ def _reading_results(self): """ if self.wf_output_names: for out in self.wf_output_names: - key_out = out[2] if len(out)==3 else out[1] + key_out = out[2] if len(out) == 3 else out[1] self._result[key_out] = [] if self.mapper: - for (i, ind) in enumerate(itertools.product(*self.state.all_elements)): + for (i, ind) in enumerate( + itertools.product(*self.state.all_elements)): if self.print_val: wf_inputs_dict = self.state.state_values(ind) else: wf_inputs_dict = self.state.state_ind(ind) - dir_nm_el = "_".join(["{}:{}".format(i, j) for i, j in list(wf_inputs_dict.items())]) - res_l= [] - val_l = self._dict_tuple2list(self.output[key_out][dir_nm_el]) + dir_nm_el = "_".join([ + "{}:{}".format(i, j) + for i, j in list(wf_inputs_dict.items()) + ]) + res_l = [] + val_l = self._dict_tuple2list( + self.output[key_out][dir_nm_el]) for val in val_l: with open(val[1]) as fout: - logger.debug('Reading Results: file={}, st_dict={}'.format(val[1], val[0])) + logger.debug( + 'Reading Results: file={}, st_dict={}'. + format(val[1], val[0])) res_l.append((val[0], eval(fout.readline()))) self._result[key_out].append((wf_inputs_dict, res_l)) else: @@ -544,11 +631,13 @@ def _reading_results(self): #TODO: I think that val shouldn't be dict here... # TMP solution if type(val) is dict: - val = [v for k,v in val.items()][0] + val = [v for k, v in val.items()][0] with open(val[1]) as fout: - logger.debug('Reading Results: file={}, st_dict={}'.format(val[1], val[0])) - self._result[key_out].append((val[0], eval(fout.readline()))) - + logger.debug( + 'Reading Results: file={}, st_dict={}'.format( + val[1], val[0])) + self._result[key_out].append( + (val[0], eval(fout.readline()))) def add_nodes(self, nodes): """adding nodes without defining connections @@ -562,37 +651,67 @@ def add_nodes(self, nodes): self._node_names[nn.name] = nn self._node_mappers[nn.name] = nn.mapper - # TODO: workingir shouldn't have None - def add(self, runnable, name=None, workingdir=None, inputs=None, output_names=None, mapper=None, - mem_gb=None, print_val=True, out_read=False, **kwargs): + def add(self, + runnable, + name=None, + workingdir=None, + inputs=None, + output_names=None, + mapper=None, + mem_gb=None, + print_val=True, + out_read=False, + **kwargs): if is_function(runnable): if not output_names: output_names = ["out"] - interface = aux.FunctionInterface(function=runnable, output_nm=output_names, out_read=out_read) + interface = aux.FunctionInterface( + function=runnable, output_nm=output_names, out_read=out_read) if not name: raise Exception("you have to specify name for the node") if not workingdir: workingdir = name - node = NewNode(interface=interface, workingdir=workingdir, name=name, inputs=inputs, mapper=mapper, - other_mappers=self._node_mappers, mem_gb=mem_gb, print_val=print_val) + node = NewNode( + interface=interface, + workingdir=workingdir, + name=name, + inputs=inputs, + mapper=mapper, + other_mappers=self._node_mappers, + mem_gb=mem_gb, + print_val=print_val) elif is_function_interface(runnable) or is_current_interface(runnable): if not name: raise Exception("you have to specify name for the node") if not workingdir: workingdir = name - node = NewNode(interface=runnable, workingdir=workingdir, name=name, inputs=inputs, mapper=mapper, - other_mappers=self._node_mappers, mem_gb_node=mem_gb, output_names=output_names, - print_val=print_val) + node = NewNode( + interface=runnable, + workingdir=workingdir, + name=name, + inputs=inputs, + mapper=mapper, + other_mappers=self._node_mappers, + mem_gb_node=mem_gb, + output_names=output_names, + print_val=print_val) elif is_nipype_interface(runnable): ci = aux.CurrentInterface(interface=runnable, name=name) if not name: raise Exception("you have to specify name for the node") if not workingdir: workingdir = name - node = NewNode(interface=ci, workingdir=workingdir, name=name, inputs=inputs, mapper=mapper, - other_mappers=self._node_mappers, mem_gb_node=mem_gb, output_names=output_names, - print_val=print_val) + node = NewNode( + interface=ci, + workingdir=workingdir, + name=name, + inputs=inputs, + mapper=mapper, + other_mappers=self._node_mappers, + mem_gb_node=mem_gb, + output_names=output_names, + print_val=print_val) elif is_node(runnable): node = runnable elif is_workflow(runnable): @@ -608,11 +727,10 @@ def add(self, runnable, name=None, workingdir=None, inputs=None, output_names=No from_node_nm, from_socket = source.split(".") self.connect(from_node_nm, from_socket, node.name, inp) # TODO not sure if i need it, just check if from_node_nm is not None?? - except(ValueError): + except (ValueError): self.connect_wf_input(source, node.name, inp) return self - def connect(self, from_node_nm, from_socket, to_node_nm, to_socket): from_node = self._node_names[from_node_nm] to_node = self._node_names[to_node_nm] @@ -623,42 +741,52 @@ def connect(self, from_node_nm, from_socket, to_node_nm, to_socket): # from_node.sending_output.append((from_socket, to_node, to_socket)) logger.debug('connecting {} and {}'.format(from_node, to_node)) - def connect_wf_input(self, inp_wf, node_nm, inp_nd): self.needed_inp_wf.append((node_nm, inp_wf, inp_nd)) - def preparing(self, wf_inputs=None, wf_inputs_ind=None): """preparing nodes which are connected: setting the final mapper and state_inputs""" #pdb.set_trace() for node_nm, inp_wf, inp_nd in self.needed_inp_wf: node = self._node_names[node_nm] if "{}.{}".format(self.name, inp_wf) in wf_inputs: - node.state_inputs.update({"{}.{}".format(node_nm, inp_nd): wf_inputs["{}.{}".format(self.name, inp_wf)]}) - node.inputs.update({"{}.{}".format(node_nm, inp_nd): wf_inputs["{}.{}".format(self.name, inp_wf)]}) + node.state_inputs.update({ + "{}.{}".format(node_nm, inp_nd): + wf_inputs["{}.{}".format(self.name, inp_wf)] + }) + node.inputs.update({ + "{}.{}".format(node_nm, inp_nd): + wf_inputs["{}.{}".format(self.name, inp_wf)] + }) else: - raise Exception("{}.{} not in the workflow inputs".format(self.name, inp_wf)) + raise Exception("{}.{} not in the workflow inputs".format( + self.name, inp_wf)) for nn in self.graph_sorted: if self.print_val: - dir_nm_el = "_".join(["{}:{}".format(i, j) for i, j in list(wf_inputs.items())]) + dir_nm_el = "_".join( + ["{}:{}".format(i, j) for i, j in list(wf_inputs.items())]) else: - dir_nm_el = "_".join(["{}:{}".format(i, j) for i, j in list(wf_inputs_ind.items())]) + dir_nm_el = "_".join([ + "{}:{}".format(i, j) + for i, j in list(wf_inputs_ind.items()) + ]) if not self.mapper: dir_nm_el = "" nn.workingdir = os.path.join(self.workingdir, dir_nm_el, nn.name) - nn._is_complete = False # helps when mp is used + nn._is_complete = False # helps when mp is used try: for inp, (out_node, out_var) in self.connected_var[nn].items(): - nn.ready2run = False #it has some history (doesnt have to be in the loop) + nn.ready2run = False #it has some history (doesnt have to be in the loop) nn.state_inputs.update(out_node.state_inputs) nn.needed_outputs.append((out_node, out_var, inp)) #if there is no mapper provided, i'm assuming that mapper is taken from the previous node - if (not nn.mapper or nn.mapper == out_node.mapper) and out_node.mapper: + if (not nn.mapper or + nn.mapper == out_node.mapper) and out_node.mapper: nn.mapper = out_node.mapper else: pass #TODO: implement inner mapper - except(KeyError): + except (KeyError): # tmp: we don't care about nn that are not in self.connected_var pass @@ -678,17 +806,22 @@ def preparing(self, wf_inputs=None, wf_inputs_ind=None): def is_function(obj): return hasattr(obj, '__call__') + def is_function_interface(obj): return type(obj) is aux.FunctionInterface + def is_current_interface(obj): return type(obj) is aux.CurrentInterface + def is_nipype_interface(obj): return hasattr(obj, "_run_interface") + def is_node(obj): return type(obj) is NewNode + def is_workflow(obj): return type(obj) is NewWorkflow diff --git a/pydra/engine/state.py b/pydra/engine/state.py index 354b8d80af..57ccc8ea07 100644 --- a/pydra/engine/state.py +++ b/pydra/engine/state.py @@ -4,6 +4,7 @@ from . import auxiliary as aux + class State(object): def __init__(self, node_name, mapper=None, other_mappers=None): self._mapper = mapper @@ -11,13 +12,15 @@ def __init__(self, node_name, mapper=None, other_mappers=None): if self._mapper: # changing mapper (as in rpn), so I can read from left to right # e.g. if mapper=('d', ['e', 'r']), _mapper_rpn=['d', 'e', 'r', '*', '.'] - self._mapper_rpn = aux.mapper2rpn(self._mapper, other_mappers=other_mappers) - self._input_names_mapper = [i for i in self._mapper_rpn if i not in ["*", "."]] + self._mapper_rpn = aux.mapper2rpn( + self._mapper, other_mappers=other_mappers) + self._input_names_mapper = [ + i for i in self._mapper_rpn if i not in ["*", "."] + ] else: self._mapper_rpn = [] self._input_names_mapper = [] - def prepare_state_input(self, state_inputs): """prepare all inputs, should be called once all input is available""" @@ -31,23 +34,23 @@ def prepare_state_input(self, state_inputs): # dictionary[key=input names] = list of axes related to # e.g. {'r': [1], 'e': [0], 'd': [0, 1]} # ndim - int, number of dimension for the "final array" (that is not created) - self._axis_for_input, self._ndim = aux.mapping_axis(self.state_inputs, self._mapper_rpn) + self._axis_for_input, self._ndim = aux.mapping_axis( + self.state_inputs, self._mapper_rpn) # list of inputs variable for each axis # e.g. [['e', 'd'], ['r', 'd']] # shape - list, e.g. [2,3] - self._input_for_axis, self._shape = aux.converting_axis2input(self.state_inputs, - self._axis_for_input, self._ndim) + self._input_for_axis, self._shape = aux.converting_axis2input( + self.state_inputs, self._axis_for_input, self._ndim) # list of all possible indexes in each dim, will be use to iterate # e.g. [[0, 1], [0, 1, 2]] self.all_elements = [range(i) for i in self._shape] self.index_generator = itertools.product(*self.all_elements) - def __getitem__(self, ind): if type(ind) is int: - ind = (ind,) + ind = (ind, ) return self.state_values(ind) # not used? @@ -55,17 +58,14 @@ def __getitem__(self, ind): #def mapper(self): # return self._mapper - @property def ndim(self): return self._ndim - @property def shape(self): return self._shape - def state_values(self, ind): """returns state input as a dictionary (input name, value)""" if len(ind) > self._ndim: @@ -73,14 +73,16 @@ def state_values(self, ind): for ii, index in enumerate(ind): if index > self._shape[ii] - 1: - raise IndexError("index {} is out of bounds for axis {} with size {}".format(index, ii, self._shape[ii])) + raise IndexError( + "index {} is out of bounds for axis {} with size {}". + format(index, ii, self._shape[ii])) state_dict = {} for input, ax in self._axis_for_input.items(): # checking which axes are important for the input - sl_ax = slice(ax[0], ax[-1]+1) + sl_ax = slice(ax[0], ax[-1] + 1) # taking the indexes for the axes - ind_inp = tuple(ind[sl_ax]) #used to be list + ind_inp = tuple(ind[sl_ax]) #used to be list state_dict[input] = self.state_inputs[input][ind_inp] # adding values from input that are not used in the mapper for input in set(self._input_names) - set(self._input_names_mapper): @@ -90,7 +92,6 @@ def state_values(self, ind): # returning a named tuple? return OrderedDict(sorted(state_dict.items(), key=lambda t: t[0])) - def state_ind(self, ind): """similar to state value but returns indices (not values)""" if len(ind) > self._ndim: @@ -98,14 +99,16 @@ def state_ind(self, ind): for ii, index in enumerate(ind): if index > self._shape[ii] - 1: - raise IndexError("index {} is out of bounds for axis {} with size {}".format(index, ii, self._shape[ii])) + raise IndexError( + "index {} is out of bounds for axis {} with size {}". + format(index, ii, self._shape[ii])) state_dict = {} for input, ax in self._axis_for_input.items(): # checking which axes are important for the input - sl_ax = slice(ax[0], ax[-1]+1) + sl_ax = slice(ax[0], ax[-1] + 1) # taking the indexes for the axes - ind_inp = tuple(ind[sl_ax]) #used to be list + ind_inp = tuple(ind[sl_ax]) #used to be list ind_inp_str = "x".join([str(el) for el in ind_inp]) state_dict[input] = ind_inp_str # adding inputs that are not used in the mapper @@ -114,4 +117,4 @@ def state_ind(self, ind): # in py3.7 we can skip OrderedDict # returning a named tuple? - return OrderedDict(sorted(state_dict.items(), key=lambda t: t[0])) \ No newline at end of file + return OrderedDict(sorted(state_dict.items(), key=lambda t: t[0])) diff --git a/pydra/engine/submitter.py b/pydra/engine/submitter.py index cd2e72ed83..21bc22371f 100644 --- a/pydra/engine/submitter.py +++ b/pydra/engine/submitter.py @@ -12,7 +12,7 @@ class Submitter(object): def __init__(self, plugin, runnable): self.plugin = plugin self.node_line = [] - self._to_finish = [] # used only for wf + self._to_finish = [] # used only for wf if self.plugin == "mp": self.worker = MpWorker() elif self.plugin == "serial": @@ -24,14 +24,13 @@ def __init__(self, plugin, runnable): else: raise Exception("plugin {} not available".format(self.plugin)) - if hasattr(runnable, 'interface'): # a node + if hasattr(runnable, 'interface'): # a node self.node = runnable - elif hasattr(runnable, "graph"): # a workflow + elif hasattr(runnable, "graph"): # a workflow self.workflow = runnable else: raise Exception("runnable has to be a Node or Workflow") - def run(self): """main running method, checks if submitter id for Node or Workflow""" if hasattr(self, "node"): @@ -39,17 +38,16 @@ def run(self): elif hasattr(self, "workflow"): self.run_workflow() - def run_node(self): """the main method to run a Node""" self.node.prepare_state_input() self._submit_node(self.node) while not self.node.is_complete: - logger.debug("Submitter, in while, to_finish: {}".format(self.node)) + logger.debug("Submitter, in while, to_finish: {}".format( + self.node)) time.sleep(3) self.node.get_output() - def _submit_node(self, node): """submitting nodes's interface for all states""" for (i, ind) in enumerate(node.state.index_generator): @@ -60,7 +58,6 @@ def _submit_node_el(self, node, i, ind): logger.debug("SUBMIT WORKER, node: {}, ind: {}".format(node, ind)) self.worker.run_el(node.run_interface_el, (i, ind)) - def run_workflow(self, workflow=None, ready=True): """the main function to run Workflow""" if not workflow: @@ -81,10 +78,12 @@ def run_workflow(self, workflow=None, ready=True): else: if ready: if workflow.print_val: - workflow.preparing(wf_inputs=workflow.inputs) + workflow.preparing(wf_inputs=workflow.inputs) else: - inputs_ind = dict((key, None) for (key, _) in workflow.inputs.items()) - workflow.preparing(wf_inputs=workflow.inputs, wf_inputs_ind=inputs_ind) + inputs_ind = dict( + (key, None) for (key, _) in workflow.inputs.items()) + workflow.preparing( + wf_inputs=workflow.inputs, wf_inputs_ind=inputs_ind) self._run_workflow_nd(workflow=workflow) else: self.node_line.append((workflow, 0, ())) @@ -92,19 +91,20 @@ def run_workflow(self, workflow=None, ready=True): # this parts submits nodes that are waiting to be run # it should stop when nothing is waiting while self._nodes_check(): - logger.debug("Submitter, in while, node_line: {}".format(self.node_line)) + logger.debug("Submitter, in while, node_line: {}".format( + self.node_line)) time.sleep(3) # this part simply waiting for all "last nodes" to finish while self._output_check(): - logger.debug("Submitter, in while, to_finish: {}".format(self._to_finish)) + logger.debug("Submitter, in while, to_finish: {}".format( + self._to_finish)) time.sleep(3) # calling only for the main wf (other wf will be called inside the function) if workflow is self.workflow: workflow.get_output() - def _run_workflow_el(self, workflow, i, ind, collect_inp=False): """running one internal workflow (if workflow has a mapper)""" # TODO: can I simplify and remove collect inp? where should it be? @@ -116,18 +116,18 @@ def _run_workflow_el(self, workflow, i, ind, collect_inp=False): workflow.preparing(wf_inputs=wf_inputs) else: wf_inputs_ind = workflow.state.state_ind(ind) - workflow.preparing(wf_inputs=wf_inputs, wf_inputs_ind=wf_inputs_ind) + workflow.preparing( + wf_inputs=wf_inputs, wf_inputs_ind=wf_inputs_ind) self._run_workflow_nd(workflow=workflow) - def _run_workflow_nd(self, workflow): """iterating over all nodes from a workflow and submitting them or adding to the node_line""" for (i_n, node) in enumerate(workflow.graph_sorted): - if workflow.parent_wf and workflow.parent_wf.mapper: # for now if parent_wf, parent_wf has to have mapper + if workflow.parent_wf and workflow.parent_wf.mapper: # for now if parent_wf, parent_wf has to have mapper workflow.parent_wf.inner_nodes[node.name].append(node) node.prepare_state_input() self._to_finish.append(node) - # submitting all the nodes who are self sufficient (self.workflow.graph is already sorted) + # submitting all the nodes who are self sufficient (self.workflow.graph is already sorted) if node.ready2run: if hasattr(node, 'interface'): self._submit_node(node) @@ -148,24 +148,25 @@ def _run_workflow_nd(self, workflow): for (i, ind) in enumerate(nn.state.index_generator): self._to_finish.append(nn) self.node_line.append((nn, i, ind)) - else: #wf + else: #wf self.run_workflow(workflow=nn, ready=False) - def _nodes_check(self): """checking which nodes-states are ready to run and running the ones that are ready""" _to_remove = [] for (to_node, i, ind) in self.node_line: if hasattr(to_node, 'interface'): - print("_NODES_CHECK INPUT", to_node.name, to_node.checking_input_el(ind)) + print("_NODES_CHECK INPUT", to_node.name, + to_node.checking_input_el(ind)) if to_node.checking_input_el(ind): self._submit_node_el(to_node, i, ind) _to_remove.append((to_node, i, ind)) else: pass - else: #wf + else: #wf if to_node.checking_input_el(ind): - self._run_workflow_el(workflow=to_node, i=i, ind=ind, collect_inp=True) + self._run_workflow_el( + workflow=to_node, i=i, ind=ind, collect_inp=True) _to_remove.append((to_node, i, ind)) else: pass @@ -174,7 +175,6 @@ def _nodes_check(self): self.node_line.remove(rn) return self.node_line - # this I believe can be done for entire node def _output_check(self): """"checking if all nodes are done""" @@ -187,6 +187,5 @@ def _output_check(self): self._to_finish.remove(rn) return self._to_finish - def close(self): self.worker.close() diff --git a/pydra/engine/tests/test_auxiliary.py b/pydra/engine/tests/test_auxiliary.py index 4c5df1df2f..3eaab01e16 100644 --- a/pydra/engine/tests/test_auxiliary.py +++ b/pydra/engine/tests/test_auxiliary.py @@ -3,54 +3,98 @@ import numpy as np import pytest -@pytest.mark.parametrize("mapper, rpn", - [ - ("a", ["a"]), - (("a", "b"), ["a", "b", "."]), - (["a", "b"], ["a", "b", "*"]), - (["a", ("b", "c")], ["a", "b", "c", ".", "*"]), - ([("a", "b"), "c"], ["a", "b", ".", "c", "*"]), - (["a", ("b", ["c", "d"])], ["a", "b", "c", "d", "*", ".", "*"]) - ]) + +@pytest.mark.parametrize( + "mapper, rpn", + [("a", ["a"]), (("a", "b"), ["a", "b", "."]), + (["a", "b"], ["a", "b", "*"]), + (["a", ("b", "c")], ["a", "b", "c", ".", "*"]), + ([("a", "b"), "c"], ["a", "b", ".", "c", "*"]), + (["a", ("b", ["c", "d"])], ["a", "b", "c", "d", "*", ".", "*"])]) def test_mapper2rpn(mapper, rpn): assert aux.mapper2rpn(mapper) == rpn @pytest.mark.parametrize("mapper, other_mappers, rpn", - [ - (["a", "_NA"], {"NA": ("b", "c")}, ["a", "NA.b", "NA.c", ".", "*"]), - (["_NA", "c"], {"NA": ("a", "b")}, ["NA.a", "NA.b", ".", "c", "*"]), - (["a", ("b", "_NA")], {"NA": ["c", "d"]}, ["a", "b", "NA.c", "NA.d", "*", ".", "*"]) - ]) - + [(["a", "_NA"], { + "NA": ("b", "c") + }, ["a", "NA.b", "NA.c", ".", "*"]), + (["_NA", "c"], { + "NA": ("a", "b") + }, ["NA.a", "NA.b", ".", "c", "*"]), + (["a", ("b", "_NA")], { + "NA": ["c", "d"] + }, ["a", "b", "NA.c", "NA.d", "*", ".", "*"])]) def test_mapper2rpn_wf_mapper(mapper, other_mappers, rpn): assert aux.mapper2rpn(mapper, other_mappers=other_mappers) == rpn @pytest.mark.parametrize("mapper, mapper_changed", - [ - ("a", "Node.a"), - (["a", ("b", "c")], ["Node.a", ("Node.b", "Node.c")]), - (("a", ["b", "c"]), ("Node.a", ["Node.b", "Node.c"])) - ]) + [("a", "Node.a"), + (["a", ("b", "c")], ["Node.a", + ("Node.b", "Node.c")]), + (("a", ["b", "c"]), + ("Node.a", ["Node.b", "Node.c"]))]) def test_change_mapper(mapper, mapper_changed): assert aux.change_mapper(mapper, "Node") == mapper_changed @pytest.mark.parametrize("inputs, rpn, expected", - [ - ({"a": np.array([1, 2])}, ["a"], {"a": [0]}), - ({"a": np.array([1, 2]), "b": np.array([3, 4])}, ["a", "b", "."], {"a": [0], "b": [0]}), - ({"a": np.array([1, 2]), "b": np.array([3, 4, 1])}, ["a", "b", "*"], {"a": [0], "b": [1]}), - ({"a": np.array([1, 2]), "b": np.array([3, 4]), "c": np.array([1, 2, 3])}, ["a", "b", ".", "c", "*"], - {"a": [0], "b": [0], "c": [1]}), - ({"a": np.array([1, 2]), "b": np.array([3, 4]), "c": np.array([1, 2, 3])}, - ["c", "a", "b", ".", "*"], {"a": [1], "b": [1], "c": [0]}), - ({"a": np.array([[1, 2], [1, 2]]), "b": np.array([[3, 4], [3, 3]]), "c": np.array([1, 2, 3])}, - ["a", "b", ".", "c", "*"], {"a": [0, 1], "b": [0, 1], "c": [2]}), - ({"a": np.array([[1, 2], [1, 2]]), "b": np.array([[3, 4], [3, 3]]), - "c": np.array([1, 2, 3])}, ["c", "a", "b", ".", "*"], {"a": [1, 2], "b": [1, 2], "c": [0]}) - ]) + [({ + "a": np.array([1, 2]) + }, ["a"], { + "a": [0] + }), + ({ + "a": np.array([1, 2]), + "b": np.array([3, 4]) + }, ["a", "b", "."], { + "a": [0], + "b": [0] + }), + ({ + "a": np.array([1, 2]), + "b": np.array([3, 4, 1]) + }, ["a", "b", "*"], { + "a": [0], + "b": [1] + }), + ({ + "a": np.array([1, 2]), + "b": np.array([3, 4]), + "c": np.array([1, 2, 3]) + }, ["a", "b", ".", "c", "*"], { + "a": [0], + "b": [0], + "c": [1] + }), + ({ + "a": np.array([1, 2]), + "b": np.array([3, 4]), + "c": np.array([1, 2, 3]) + }, ["c", "a", "b", ".", "*"], { + "a": [1], + "b": [1], + "c": [0] + }), + ({ + "a": np.array([[1, 2], [1, 2]]), + "b": np.array([[3, 4], [3, 3]]), + "c": np.array([1, 2, 3]) + }, ["a", "b", ".", "c", "*"], { + "a": [0, 1], + "b": [0, 1], + "c": [2] + }), + ({ + "a": np.array([[1, 2], [1, 2]]), + "b": np.array([[3, 4], [3, 3]]), + "c": np.array([1, 2, 3]) + }, ["c", "a", "b", ".", "*"], { + "a": [1, 2], + "b": [1, 2], + "c": [0] + })]) def test_mapping_axis(inputs, rpn, expected): res = aux.mapping_axis(inputs, rpn)[0] print(res) @@ -60,25 +104,67 @@ def test_mapping_axis(inputs, rpn, expected): def test_mapping_axis_error(): with pytest.raises(Exception): - aux.mapping_axis({"a": np.array([1, 2]), "b": np.array([3, 4, 5])}, ["a", "b", "."]) + aux.mapping_axis({ + "a": np.array([1, 2]), + "b": np.array([3, 4, 5]) + }, ["a", "b", "."]) @pytest.mark.parametrize("inputs, axis_inputs, ndim, expected", - [ - ({"a": np.array([1, 2])}, {"a": [0]}, 1, [["a"]]), - ({"a": np.array([1, 2]), "b": np.array([3, 4])}, {"a": [0], "b": [0]}, 1, - [["a", "b"]]), - ({"a": np.array([1, 2]), "b": np.array([3, 4, 1])}, {"a": [0], "b": [1]}, 2, - [["a"], ["b"]]), - ({"a": np.array([1, 2]), "b": np.array([3, 4]), "c": np.array([1, 2, 3])}, - {"a": [0], "b": [0], "c": [1]}, 2, [["a", "b"]]), - ({"a": np.array([1, 2]), "b": np.array([3, 4]), "c": np.array([1, 2, 3])}, - {"a": [1], "b": [1], "c": [0]}, 2, [["c"], ["a", "b"]]), - ({"a": np.array([[1, 2], [1, 2]]), "b": np.array([[3, 4], [3, 3]]), "c": np.array([1, 2, 3])}, - {"a": [0, 1], "b": [0, 1], "c": [2]}, 3, [["a", "b"], ["a", "b"], ["c"]]), - ({"a": np.array([[1, 2], [1, 2]]), "b": np.array([[3, 4], [3, 3]]), - "c": np.array([1, 2, 3])}, {"a": [1, 2], "b": [1, 2], "c": [0]}, 3, - [["c"], ["a", "b"], ["a", "b"]]) - ]) + [({ + "a": np.array([1, 2]) + }, { + "a": [0] + }, 1, [["a"]]), + ({ + "a": np.array([1, 2]), + "b": np.array([3, 4]) + }, { + "a": [0], + "b": [0] + }, 1, [["a", "b"]]), + ({ + "a": np.array([1, 2]), + "b": np.array([3, 4, 1]) + }, { + "a": [0], + "b": [1] + }, 2, [["a"], ["b"]]), + ({ + "a": np.array([1, 2]), + "b": np.array([3, 4]), + "c": np.array([1, 2, 3]) + }, { + "a": [0], + "b": [0], + "c": [1] + }, 2, [["a", "b"]]), + ({ + "a": np.array([1, 2]), + "b": np.array([3, 4]), + "c": np.array([1, 2, 3]) + }, { + "a": [1], + "b": [1], + "c": [0] + }, 2, [["c"], ["a", "b"]]), + ({ + "a": np.array([[1, 2], [1, 2]]), + "b": np.array([[3, 4], [3, 3]]), + "c": np.array([1, 2, 3]) + }, { + "a": [0, 1], + "b": [0, 1], + "c": [2] + }, 3, [["a", "b"], ["a", "b"], ["c"]]), + ({ + "a": np.array([[1, 2], [1, 2]]), + "b": np.array([[3, 4], [3, 3]]), + "c": np.array([1, 2, 3]) + }, { + "a": [1, 2], + "b": [1, 2], + "c": [0] + }, 3, [["c"], ["a", "b"], ["a", "b"]])]) def test_converting_axis2input(inputs, axis_inputs, ndim, expected): aux.converting_axis2input(inputs, axis_inputs, ndim)[0] == expected diff --git a/pydra/engine/tests/test_newnode.py b/pydra/engine/tests/test_newnode.py index a795250281..495957ebd9 100644 --- a/pydra/engine/tests/test_newnode.py +++ b/pydra/engine/tests/test_newnode.py @@ -9,8 +9,9 @@ import numpy as np import pytest, pdb -python35_only = pytest.mark.skipif(sys.version_info < (3, 5), - reason="requires Python>3.4") +python35_only = pytest.mark.skipif( + sys.version_info < (3, 5), reason="requires Python>3.4") + @pytest.fixture(scope="module") def change_dir(request): @@ -28,12 +29,14 @@ def move2orig(): Plugins = ["serial"] Plugins = ["serial", "mp", "cf", "dask"] + def fun_addtwo(a): time.sleep(1) if a == 3: time.sleep(2) return a + 2 + def fun_addvar(a, b): return a + b @@ -60,7 +63,8 @@ def test_node_2(): def test_node_3(): """Node with interface, inputs and mapper""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - nn = NewNode(name="NA", interface=interf_addtwo, inputs={"a": [3, 5]}, mapper="a") + nn = NewNode( + name="NA", interface=interf_addtwo, inputs={"a": [3, 5]}, mapper="a") assert nn.mapper == "NA.a" assert (nn.inputs["NA.a"] == np.array([3, 5])).all() @@ -104,8 +108,11 @@ def test_node_4a(): def test_node_5(plugin, change_dir): """Node with interface and inputs, no mapper, running interface""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - nn = NewNode(name="NA", inputs={"a": 3}, interface=interf_addtwo, - workingdir="test_nd5_{}".format(plugin)) + nn = NewNode( + name="NA", + inputs={"a": 3}, + interface=interf_addtwo, + workingdir="test_nd5_{}".format(plugin)) assert (nn.inputs["NA.a"] == np.array([3])).all() @@ -130,7 +137,10 @@ def test_node_5(plugin, change_dir): def test_node_6(plugin, change_dir): """Node with interface, inputs and the simplest mapper, running interface""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - nn = NewNode(name="NA", interface=interf_addtwo, workingdir="test_nd6_{}".format(plugin)) + nn = NewNode( + name="NA", + interface=interf_addtwo, + workingdir="test_nd6_{}".format(plugin)) nn.map(mapper="a", inputs={"a": [3, 5]}) assert nn.mapper == "NA.a" @@ -157,7 +167,10 @@ def test_node_6(plugin, change_dir): def test_node_7(plugin, change_dir): """Node with interface, inputs and scalar mapper, running interface""" interf_addvar = FunctionInterface(fun_addvar, ["out"]) - nn = NewNode(name="NA", interface=interf_addvar, workingdir="test_nd7_{}".format(plugin)) + nn = NewNode( + name="NA", + interface=interf_addvar, + workingdir="test_nd7_{}".format(plugin)) # scalar mapper nn.map(mapper=("a", "b"), inputs={"a": [3, 5], "b": [2, 1]}) @@ -186,7 +199,10 @@ def test_node_7(plugin, change_dir): def test_node_8(plugin, change_dir): """Node with interface, inputs and vector mapper, running interface""" interf_addvar = FunctionInterface(fun_addvar, ["out"]) - nn = NewNode(name="NA", interface=interf_addvar, workingdir="test_nd8_{}".format(plugin)) + nn = NewNode( + name="NA", + interface=interf_addvar, + workingdir="test_nd8_{}".format(plugin)) # [] for outer product nn.map(mapper=["a", "b"], inputs={"a": [3, 5], "b": [2, 1]}) @@ -199,8 +215,19 @@ def test_node_8(plugin, change_dir): sub.close() # checking teh results - expected = [({"NA.a": 3, "NA.b": 1}, 4), ({"NA.a": 3, "NA.b": 2}, 5), - ({"NA.a": 5, "NA.b": 1}, 6), ({"NA.a": 5, "NA.b": 2}, 7)] + expected = [({ + "NA.a": 3, + "NA.b": 1 + }, 4), ({ + "NA.a": 3, + "NA.b": 2 + }, 5), ({ + "NA.a": 5, + "NA.b": 1 + }, 6), ({ + "NA.a": 5, + "NA.b": 2 + }, 7)] # to be sure that there is the same order (not sure if node itself should keep the order) key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) @@ -212,6 +239,7 @@ def test_node_8(plugin, change_dir): # tests for workflows + @python35_only def test_workflow_0(plugin="serial"): """workflow (without run) with one node with a mapper""" @@ -226,6 +254,7 @@ def test_workflow_0(plugin="serial"): assert (wf.nodes[0].inputs['NA.a'] == np.array([3, 5])).all() assert len(wf.graph.nodes) == 1 + @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_workflow_1(plugin, change_dir): @@ -243,7 +272,8 @@ def test_workflow_1(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -260,7 +290,8 @@ def test_workflow_2(plugin, change_dir): # the second node does not have explicit mapper (but keeps the mapper from the NA node) interf_addvar = FunctionInterface(fun_addvar, ["out"]) - nb = NewNode(name="NB", interface=interf_addvar, inputs={"b": 10}, workingdir="nb") + nb = NewNode( + name="NB", interface=interf_addvar, inputs={"b": 10}, workingdir="nb") # adding 2 nodes and create a connection (as it is now) wf.add_nodes([na, nb]) @@ -274,7 +305,8 @@ def test_workflow_2(plugin, change_dir): expected_A = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected_A[0][0].keys()) expected_A.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_A): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -284,7 +316,8 @@ def test_workflow_2(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 10}, 15), ({"NA.a": 5, "NB.b": 10}, 17)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -317,7 +350,8 @@ def test_workflow_2a(plugin, change_dir): expected_A = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected_A[0][0].keys()) expected_A.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_A): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -326,7 +360,8 @@ def test_workflow_2a(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -359,17 +394,30 @@ def test_workflow_2b(plugin): expected_A = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected_A[0][0].keys()) expected_A.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_A): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] # four elements (outer product) - expected_B = [({"NA.a": 3, "NB.b": 1}, 6), ({"NA.a": 3, "NB.b": 2}, 7), - ({"NA.a": 5, "NB.b": 1}, 8), ({"NA.a": 5, "NB.b": 2}, 9)] + expected_B = [({ + "NA.a": 3, + "NB.b": 1 + }, 6), ({ + "NA.a": 3, + "NB.b": 2 + }, 7), ({ + "NA.a": 5, + "NB.b": 1 + }, 8), ({ + "NA.a": 5, + "NB.b": 2 + }, 9)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -377,6 +425,7 @@ def test_workflow_2b(plugin): # using add method to add nodes + @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_workflow_3(plugin, change_dir): @@ -397,7 +446,8 @@ def test_workflow_3(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -411,7 +461,12 @@ def test_workflow_3a(plugin, change_dir): interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) # using the add method with an interface - wf.add(interf_addtwo, workingdir="na", mapper="a", inputs={"a": [3, 5]}, name="NA") + wf.add( + interf_addtwo, + workingdir="na", + mapper="a", + inputs={"a": [3, 5]}, + name="NA") assert wf.nodes[0].mapper == "NA.a" @@ -422,7 +477,8 @@ def test_workflow_3a(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -434,7 +490,12 @@ def test_workflow_3b(plugin, change_dir): """using add (function) method""" wf = NewWorkflow(name="wf3b", workingdir="test_wf3b_{}".format(plugin)) # using the add method with a function - wf.add(fun_addtwo, workingdir="na", mapper="a", inputs={"a": [3, 5]}, name="NA") + wf.add( + fun_addtwo, + workingdir="na", + mapper="a", + inputs={"a": [3, 5]}, + name="NA") assert wf.nodes[0].mapper == "NA.a" @@ -445,13 +506,13 @@ def test_workflow_3b(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] - @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_workflow_4(plugin, change_dir): @@ -480,7 +541,8 @@ def test_workflow_4(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -488,7 +550,8 @@ def test_workflow_4(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -518,7 +581,8 @@ def test_workflow_4a(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -526,15 +590,16 @@ def test_workflow_4a(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] - # using map after add method + @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_workflow_5(plugin, change_dir): @@ -554,7 +619,8 @@ def test_workflow_5(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -577,7 +643,8 @@ def test_workflow_5a(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -607,7 +674,8 @@ def test_workflow_6(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -615,7 +683,8 @@ def test_workflow_6(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -646,7 +715,8 @@ def test_workflow_6a(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -654,7 +724,8 @@ def test_workflow_6a(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -683,7 +754,8 @@ def test_workflow_6b(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -691,7 +763,8 @@ def test_workflow_6b(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -699,12 +772,16 @@ def test_workflow_6b(plugin, change_dir): # tests for a workflow that have its own input + @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_workflow_7(plugin, change_dir): """using inputs for workflow and connect_workflow""" # adding inputs to the workflow directly - wf = NewWorkflow(name="wf7", inputs={"wfa": [3, 5]}, workingdir="test_wf7_{}".format(plugin)) + wf = NewWorkflow( + name="wf7", + inputs={"wfa": [3, 5]}, + workingdir="test_wf7_{}".format(plugin)) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = NewNode(name="NA", interface=interf_addtwo, workingdir="na") @@ -720,7 +797,8 @@ def test_workflow_7(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -730,7 +808,10 @@ def test_workflow_7(plugin, change_dir): @python35_only def test_workflow_7a(plugin, change_dir): """using inputs for workflow and kwarg arg in add (instead of connect)""" - wf = NewWorkflow(name="wf7a", inputs={"wfa": [3, 5]}, workingdir="test_wf7a_{}".format(plugin)) + wf = NewWorkflow( + name="wf7a", + inputs={"wfa": [3, 5]}, + workingdir="test_wf7a_{}".format(plugin)) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = NewNode(name="NA", interface=interf_addtwo, workingdir="na") # using kwrg argument in the add method (instead of connect or connect_wf_input @@ -744,7 +825,8 @@ def test_workflow_7a(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -754,7 +836,8 @@ def test_workflow_7a(plugin, change_dir): @python35_only def test_workflow_8(plugin, change_dir): """using inputs for workflow and connect_wf_input for the second node""" - wf = NewWorkflow(name="wf8", workingdir="test_wf8_{}".format(plugin), inputs={"b": 10}) + wf = NewWorkflow( + name="wf8", workingdir="test_wf8_{}".format(plugin), inputs={"b": 10}) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = NewNode(name="NA", interface=interf_addtwo, workingdir="na") na.map(mapper="a", inputs={"a": [3, 5]}) @@ -774,16 +857,17 @@ def test_workflow_8(plugin, change_dir): expected_A = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected_A[0][0].keys()) expected_A.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_A): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] - expected_B = [({"NA.a": 3, "NB.b": 10}, 15), ({"NA.a": 5, "NB.b": 10}, 17)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -791,16 +875,22 @@ def test_workflow_8(plugin, change_dir): # testing if _NA in mapper works, using interfaces in add + @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_workflow_9(plugin, change_dir): """using add(interface) method and mapper from previous nodes""" wf = NewWorkflow(name="wf9", workingdir="test_wf9_{}".format(plugin)) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - wf.add(name="NA", runnable=interf_addtwo, workingdir="na").map_node(mapper="a", inputs={"a": [3, 5]}) + wf.add( + name="NA", runnable=interf_addtwo, workingdir="na").map_node( + mapper="a", inputs={"a": [3, 5]}) interf_addvar = FunctionInterface(fun_addvar, ["out"]) # _NA means that I'm using mapper from the NA node, it's the same as ("NA.a", "b") - wf.add(name="NB", runnable=interf_addvar, workingdir="nb", a="NA.out").map_node(mapper=("_NA", "b"), inputs={"b": [2, 1]}) + wf.add( + name="NB", runnable=interf_addvar, workingdir="nb", + a="NA.out").map_node( + mapper=("_NA", "b"), inputs={"b": [2, 1]}) sub = Submitter(runnable=wf, plugin=plugin) sub.run() @@ -809,7 +899,8 @@ def test_workflow_9(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -817,7 +908,8 @@ def test_workflow_9(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -829,10 +921,18 @@ def test_workflow_10(plugin, change_dir): """using add(interface) method and scalar mapper from previous nodes""" wf = NewWorkflow(name="wf10", workingdir="test_wf10_{}".format(plugin)) interf_addvar1 = FunctionInterface(fun_addvar, ["out"]) - wf.add(name="NA", runnable=interf_addvar1, workingdir="na").map_node(mapper=("a", "b"), inputs={"a": [3, 5], "b": [0, 10]}) + wf.add( + name="NA", runnable=interf_addvar1, workingdir="na").map_node( + mapper=("a", "b"), inputs={ + "a": [3, 5], + "b": [0, 10] + }) interf_addvar2 = FunctionInterface(fun_addvar, ["out"]) # _NA means that I'm using mapper from the NA node, it's the same as (("NA.a", NA.b), "b") - wf.add(name="NB", runnable=interf_addvar2, workingdir="nb", a="NA.out").map_node(mapper=("_NA", "b"), inputs={"b": [2, 1]}) + wf.add( + name="NB", runnable=interf_addvar2, workingdir="nb", + a="NA.out").map_node( + mapper=("_NA", "b"), inputs={"b": [2, 1]}) sub = Submitter(runnable=wf, plugin=plugin) sub.run() @@ -841,15 +941,25 @@ def test_workflow_10(plugin, change_dir): expected = [({"NA.a": 3, "NA.b": 0}, 3), ({"NA.a": 5, "NA.b": 10}, 15)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] - expected_B = [({"NA.a": 3, "NA.b": 0, "NB.b": 2}, 5), ({"NA.a": 5, "NA.b": 10, "NB.b": 1}, 16)] + expected_B = [({ + "NA.a": 3, + "NA.b": 0, + "NB.b": 2 + }, 5), ({ + "NA.a": 5, + "NA.b": 10, + "NB.b": 1 + }, 16)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -861,29 +971,65 @@ def test_workflow_10a(plugin, change_dir): """using add(interface) method and vector mapper from previous nodes""" wf = NewWorkflow(name="wf10a", workingdir="test_wf10a_{}".format(plugin)) interf_addvar1 = FunctionInterface(fun_addvar, ["out"]) - wf.add(name="NA", runnable=interf_addvar1, workingdir="na").map_node(mapper=["a", "b"], inputs={"a": [3, 5], "b": [0, 10]}) + wf.add( + name="NA", runnable=interf_addvar1, workingdir="na").map_node( + mapper=["a", "b"], inputs={ + "a": [3, 5], + "b": [0, 10] + }) interf_addvar2 = FunctionInterface(fun_addvar, ["out"]) # _NA means that I'm using mapper from the NA node, it's the same as (["NA.a", NA.b], "b") - wf.add(name="NB", runnable=interf_addvar2, workingdir="nb", a="NA.out").map_node(mapper=("_NA", "b"), inputs={"b": [[2, 1], [0, 0]]}) + wf.add( + name="NB", runnable=interf_addvar2, workingdir="nb", + a="NA.out").map_node( + mapper=("_NA", "b"), inputs={"b": [[2, 1], [0, 0]]}) sub = Submitter(runnable=wf, plugin=plugin) sub.run() sub.close() - expected = [({"NA.a": 3, "NA.b": 0}, 3), ({"NA.a": 3, "NA.b": 10}, 13), - ({"NA.a": 5, "NA.b": 0}, 5), ({"NA.a": 5, "NA.b": 10}, 15)] + expected = [({ + "NA.a": 3, + "NA.b": 0 + }, 3), ({ + "NA.a": 3, + "NA.b": 10 + }, 13), ({ + "NA.a": 5, + "NA.b": 0 + }, 5), ({ + "NA.a": 5, + "NA.b": 10 + }, 15)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] - expected_B = [({"NA.a": 3, "NA.b": 0, "NB.b": 2}, 5), ({"NA.a": 3, "NA.b": 10, "NB.b": 1}, 14), - ({"NA.a": 5, "NA.b": 0, "NB.b": 0}, 5), ({"NA.a": 5, "NA.b": 10, "NB.b": 0}, 15)] + expected_B = [({ + "NA.a": 3, + "NA.b": 0, + "NB.b": 2 + }, 5), ({ + "NA.a": 3, + "NA.b": 10, + "NB.b": 1 + }, 14), ({ + "NA.a": 5, + "NA.b": 0, + "NB.b": 0 + }, 5), ({ + "NA.a": 5, + "NA.b": 10, + "NB.b": 0 + }, 15)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -895,12 +1041,25 @@ def test_workflow_11(plugin, change_dir): """using add(interface) method and vector mapper from previous two nodes""" wf = NewWorkflow(name="wf11", workingdir="test_wf11_{}".format(plugin)) interf_addvar1 = FunctionInterface(fun_addvar, ["out"]) - wf.add(name="NA", runnable=interf_addvar1, workingdir="na").map_node(mapper=("a", "b"), inputs={"a": [3, 5], "b": [0, 10]}) + wf.add( + name="NA", runnable=interf_addvar1, workingdir="na").map_node( + mapper=("a", "b"), inputs={ + "a": [3, 5], + "b": [0, 10] + }) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - wf.add(name="NB", runnable=interf_addtwo, workingdir="nb").map_node(mapper="a", inputs={"a": [2, 1]}) + wf.add( + name="NB", runnable=interf_addtwo, workingdir="nb").map_node( + mapper="a", inputs={"a": [2, 1]}) interf_addvar2 = FunctionInterface(fun_addvar, ["out"]) # _NA, _NB means that I'm using mappers from the NA/NB nodes, it's the same as [("NA.a", NA.b), "NB.a"] - wf.add(name="NC", runnable=interf_addvar2, workingdir="nc", a="NA.out", b="NB.out").map_node(mapper=["_NA", "_NB"]) # TODO: this should eb default? + wf.add( + name="NC", + runnable=interf_addvar2, + workingdir="nc", + a="NA.out", + b="NB.out").map_node(mapper=["_NA", + "_NB"]) # TODO: this should eb default? sub = Submitter(runnable=wf, plugin=plugin) sub.run() @@ -909,17 +1068,33 @@ def test_workflow_11(plugin, change_dir): expected = [({"NA.a": 3, "NA.b": 0}, 3), ({"NA.a": 5, "NA.b": 10}, 15)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] - - expected_C = [({"NA.a": 3, "NA.b": 0, "NB.a": 1}, 6), ({"NA.a": 3, "NA.b": 0, "NB.a": 2}, 7), - ({"NA.a": 5, "NA.b": 10, "NB.a": 1}, 18), ({"NA.a": 5, "NA.b": 10, "NB.a": 2}, 19)] + expected_C = [({ + "NA.a": 3, + "NA.b": 0, + "NB.a": 1 + }, 6), ({ + "NA.a": 3, + "NA.b": 0, + "NB.a": 2 + }, 7), ({ + "NA.a": 5, + "NA.b": 10, + "NB.a": 1 + }, 18), ({ + "NA.a": 5, + "NA.b": 10, + "NB.a": 2 + }, 19)] key_sort = list(expected_C[0][0].keys()) expected_C.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[2].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[2].result["out"].sort( + key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_C): assert wf.nodes[2].result["out"][i][0] == res[0] assert wf.nodes[2].result["out"][i][1] == res[1] @@ -927,12 +1102,15 @@ def test_workflow_11(plugin, change_dir): # checking workflow.result + @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_workflow_12(plugin, change_dir): """testing if wf.result works (the same workflow as in test_workflow_6)""" - wf = NewWorkflow(name="wf12", workingdir="test_wf12_{}".format(plugin), - wf_output_names=[("NA", "out", "NA_out"), ("NB", "out")]) + wf = NewWorkflow( + name="wf12", + workingdir="test_wf12_{}".format(plugin), + wf_output_names=[("NA", "out", "NA_out"), ("NB", "out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = NewNode(name="NA", interface=interf_addtwo, workingdir="na") @@ -977,8 +1155,10 @@ def test_workflow_12(plugin, change_dir): @python35_only def test_workflow_12a(plugin, change_dir): """testing if wf.result raises exceptione (the same workflow as in test_workflow_6)""" - wf = NewWorkflow(name="wf12a", workingdir="test_wf12a_{}".format(plugin), - wf_output_names=[("NA", "out", "wf_out"), ("NB", "out", "wf_out")]) + wf = NewWorkflow( + name="wf12a", + workingdir="test_wf12a_{}".format(plugin), + wf_output_names=[("NA", "out", "wf_out"), ("NB", "out", "wf_out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = NewNode(name="NA", interface=interf_addtwo, workingdir="na") @@ -995,7 +1175,9 @@ def test_workflow_12a(plugin, change_dir): # wf_out can't be used twice with pytest.raises(Exception) as exinfo: sub.run() - assert str(exinfo.value) == "the key wf_out is already used in workflow.result" + assert str( + exinfo.value) == "the key wf_out is already used in workflow.result" + # tests for a workflow that have its own input and mapper @@ -1004,8 +1186,12 @@ def test_workflow_12a(plugin, change_dir): @python35_only def test_workflow_13(plugin, change_dir): """using inputs for workflow and connect_wf_input""" - wf = NewWorkflow(name="wf13", inputs={"wfa": [3, 5]}, mapper="wfa", workingdir="test_wf13_{}".format(plugin), - wf_output_names=[("NA", "out", "NA_out")]) + wf = NewWorkflow( + name="wf13", + inputs={"wfa": [3, 5]}, + mapper="wfa", + workingdir="test_wf13_{}".format(plugin), + wf_output_names=[("NA", "out", "NA_out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = NewNode(name="NA", interface=interf_addtwo, workingdir="na") wf.add(na) @@ -1016,8 +1202,15 @@ def test_workflow_13(plugin, change_dir): sub.close() assert wf.is_complete - expected = [({"wf13.wfa": 3}, [({"NA.a": 3}, 5)]), - ({'wf13.wfa': 5}, [({"NA.a": 5}, 7)])] + expected = [({ + "wf13.wfa": 3 + }, [({ + "NA.a": 3 + }, 5)]), ({ + 'wf13.wfa': 5 + }, [({ + "NA.a": 5 + }, 7)])] for i, res in enumerate(expected): assert wf.result["NA_out"][i][0] == res[0] assert wf.result["NA_out"][i][1][0][0] == res[1][0][0] @@ -1028,10 +1221,19 @@ def test_workflow_13(plugin, change_dir): @python35_only def test_workflow_13a(plugin, change_dir): """using inputs for workflow and connect_wf_input (the node has 2 inputs)""" - wf = NewWorkflow(name="wf13a", inputs={"wfa": [3, 5]}, mapper="wfa", workingdir="test_wf13a_{}".format(plugin), - wf_output_names=[("NA", "out", "NA_out")]) + wf = NewWorkflow( + name="wf13a", + inputs={"wfa": [3, 5]}, + mapper="wfa", + workingdir="test_wf13a_{}".format(plugin), + wf_output_names=[("NA", "out", "NA_out")]) interf_addvar = FunctionInterface(fun_addvar, ["out"]) - na = NewNode(name="NA", interface=interf_addvar, workingdir="na", mapper="b", inputs={"b": [10, 20]}) + na = NewNode( + name="NA", + interface=interf_addvar, + workingdir="na", + mapper="b", + inputs={"b": [10, 20]}) wf.add(na) wf.connect_wf_input("wfa", "NA", "a") @@ -1040,8 +1242,24 @@ def test_workflow_13a(plugin, change_dir): sub.close() assert wf.is_complete - expected = [({"wf13a.wfa": 3}, [({"NA.a": 3, "NA.b": 10}, 13), ({"NA.a": 3, "NA.b": 20}, 23)]), - ({'wf13a.wfa': 5}, [({"NA.a": 5, "NA.b": 10}, 15), ({"NA.a": 5, "NA.b": 20}, 25)])] + expected = [({ + "wf13a.wfa": 3 + }, [({ + "NA.a": 3, + "NA.b": 10 + }, 13), ({ + "NA.a": 3, + "NA.b": 20 + }, 23)]), + ({ + 'wf13a.wfa': 5 + }, [({ + "NA.a": 5, + "NA.b": 10 + }, 15), ({ + "NA.a": 5, + "NA.b": 20 + }, 25)])] for i, res in enumerate(expected): assert wf.result["NA_out"][i][0] == res[0] for j in range(len(res[1])): @@ -1053,8 +1271,10 @@ def test_workflow_13a(plugin, change_dir): @python35_only def test_workflow_13c(plugin, change_dir): """using inputs for workflow and connect_wf_input, using wf.map(mapper, inputs)""" - wf = NewWorkflow(name="wf13c", workingdir="test_wf13c_{}".format(plugin), - wf_output_names=[("NA", "out", "NA_out")]) + wf = NewWorkflow( + name="wf13c", + workingdir="test_wf13c_{}".format(plugin), + wf_output_names=[("NA", "out", "NA_out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = NewNode(name="NA", interface=interf_addtwo, workingdir="na") wf.add(na).map(mapper="wfa", inputs={"wfa": [3, 5]}) @@ -1065,8 +1285,15 @@ def test_workflow_13c(plugin, change_dir): sub.close() assert wf.is_complete - expected = [({"wf13c.wfa": 3}, [({"NA.a": 3}, 5)]), - ({'wf13c.wfa': 5}, [({"NA.a": 5}, 7)])] + expected = [({ + "wf13c.wfa": 3 + }, [({ + "NA.a": 3 + }, 5)]), ({ + 'wf13c.wfa': 5 + }, [({ + "NA.a": 5 + }, 7)])] for i, res in enumerate(expected): assert wf.result["NA_out"][i][0] == res[0] assert wf.result["NA_out"][i][1][0][0] == res[1][0][0] @@ -1076,9 +1303,11 @@ def test_workflow_13c(plugin, change_dir): @python35_only def test_workflow_13b(plugin, change_dir): """using inputs for workflow and connect_wf_input, using wf.map(mapper)""" - wf = NewWorkflow(name="wf13b", inputs={"wfa": [3, 5]}, - workingdir="test_wf13b_{}".format(plugin), - wf_output_names=[("NA", "out", "NA_out")]) + wf = NewWorkflow( + name="wf13b", + inputs={"wfa": [3, 5]}, + workingdir="test_wf13b_{}".format(plugin), + wf_output_names=[("NA", "out", "NA_out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = NewNode(name="NA", interface=interf_addtwo, workingdir="na") wf.add(na).map(mapper="wfa") @@ -1089,8 +1318,15 @@ def test_workflow_13b(plugin, change_dir): sub.close() assert wf.is_complete - expected = [({"wf13b.wfa": 3}, [({"NA.a": 3}, 5)]), - ({'wf13b.wfa': 5}, [({"NA.a": 5}, 7)])] + expected = [({ + "wf13b.wfa": 3 + }, [({ + "NA.a": 3 + }, 5)]), ({ + 'wf13b.wfa': 5 + }, [({ + "NA.a": 5 + }, 7)])] for i, res in enumerate(expected): assert wf.result["NA_out"][i][0] == res[0] assert wf.result["NA_out"][i][1][0][0] == res[1][0][0] @@ -1099,18 +1335,24 @@ def test_workflow_13b(plugin, change_dir): # workflow as a node + @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_workflow_14(plugin, change_dir): """workflow with a workflow as a node (no mapper)""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - na = NewNode(name="NA", interface=interf_addtwo, workingdir="na", inputs={"a": 3}) - wfa = NewWorkflow(name="wfa", workingdir="test_wfa", - wf_output_names=[("NA", "out", "NA_out")]) + na = NewNode( + name="NA", interface=interf_addtwo, workingdir="na", inputs={"a": 3}) + wfa = NewWorkflow( + name="wfa", + workingdir="test_wfa", + wf_output_names=[("NA", "out", "NA_out")]) wfa.add(na) - wf = NewWorkflow(name="wf14", workingdir="test_wf14_{}".format(plugin), - wf_output_names=[("wfa", "NA_out", "wfa_out")]) + wf = NewWorkflow( + name="wf14", + workingdir="test_wf14_{}".format(plugin), + wf_output_names=[("wfa", "NA_out", "wfa_out")]) wf.add(wfa) sub = Submitter(runnable=wf, plugin=plugin) @@ -1130,13 +1372,18 @@ def test_workflow_14a(plugin, change_dir): """workflow with a workflow as a node (no mapper, using connect_wf_input in wfa)""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = NewNode(name="NA", interface=interf_addtwo, workingdir="na") - wfa = NewWorkflow(name="wfa", workingdir="test_wfa", inputs={"a": 3}, - wf_output_names=[("NA", "out", "NA_out")]) + wfa = NewWorkflow( + name="wfa", + workingdir="test_wfa", + inputs={"a": 3}, + wf_output_names=[("NA", "out", "NA_out")]) wfa.add(na) wfa.connect_wf_input("a", "NA", "a") - wf = NewWorkflow(name="wf14a", workingdir="test_wf14a_{}".format(plugin), - wf_output_names=[("wfa", "NA_out", "wfa_out")]) + wf = NewWorkflow( + name="wf14a", + workingdir="test_wf14a_{}".format(plugin), + wf_output_names=[("wfa", "NA_out", "wfa_out")]) wf.add(wfa) sub = Submitter(runnable=wf, plugin=plugin) @@ -1156,13 +1403,18 @@ def test_workflow_14b(plugin, change_dir): """workflow with a workflow as a node (no mapper, using connect_wf_input in wfa and wf)""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = NewNode(name="NA", interface=interf_addtwo, workingdir="na") - wfa = NewWorkflow(name="wfa", workingdir="test_wfa", - wf_output_names=[("NA", "out", "NA_out")]) + wfa = NewWorkflow( + name="wfa", + workingdir="test_wfa", + wf_output_names=[("NA", "out", "NA_out")]) wfa.add(na) wfa.connect_wf_input("a", "NA", "a") - wf = NewWorkflow(name="wf14b", workingdir="test_wf14b_{}".format(plugin), - wf_output_names=[("wfa", "NA_out", "wfa_out")], inputs={"a": 3}) + wf = NewWorkflow( + name="wf14b", + workingdir="test_wf14b_{}".format(plugin), + wf_output_names=[("wfa", "NA_out", "wfa_out")], + inputs={"a": 3}) wf.add(wfa) wf.connect_wf_input("a", "wfa", "a") @@ -1182,14 +1434,22 @@ def test_workflow_14b(plugin, change_dir): def test_workflow_15(plugin, change_dir): """workflow with a workflow as a node with mapper (like 14 but with a mapper)""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - na = NewNode(name="NA", interface=interf_addtwo, workingdir="na", - inputs={"a": [3, 5]}, mapper="a") - wfa = NewWorkflow(name="wfa", workingdir="test_wfa", - wf_output_names=[("NA", "out", "NA_out")]) + na = NewNode( + name="NA", + interface=interf_addtwo, + workingdir="na", + inputs={"a": [3, 5]}, + mapper="a") + wfa = NewWorkflow( + name="wfa", + workingdir="test_wfa", + wf_output_names=[("NA", "out", "NA_out")]) wfa.add(na) - wf = NewWorkflow(name="wf15", workingdir="test_wf15_{}".format(plugin), - wf_output_names=[("wfa", "NA_out", "wfa_out")]) + wf = NewWorkflow( + name="wf15", + workingdir="test_wf15_{}".format(plugin), + wf_output_names=[("wfa", "NA_out", "wfa_out")]) wf.add(wfa) sub = Submitter(runnable=wf, plugin=plugin) @@ -1207,17 +1467,23 @@ def test_workflow_15(plugin, change_dir): @python35_only def test_workflow_16(plugin, change_dir): """workflow with two nodes, and one is a workflow (no mapper)""" - wf = NewWorkflow(name="wf16", workingdir="test_wf16_{}".format(plugin), - wf_output_names=[("wfb", "NB_out"), ("NA", "out", "NA_out")]) + wf = NewWorkflow( + name="wf16", + workingdir="test_wf16_{}".format(plugin), + wf_output_names=[("wfb", "NB_out"), ("NA", "out", "NA_out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - na = NewNode(name="NA", interface=interf_addtwo, workingdir="na", inputs={"a": 3}) + na = NewNode( + name="NA", interface=interf_addtwo, workingdir="na", inputs={"a": 3}) wf.add(na) # the second node does not have explicit mapper (but keeps the mapper from the NA node) interf_addvar = FunctionInterface(fun_addvar, ["out"]) nb = NewNode(name="NB", interface=interf_addvar, workingdir="nb") - wfb = NewWorkflow(name="wfb", workingdir="test_wfb", inputs={"b": 10}, - wf_output_names=[("NB", "out", "NB_out")]) + wfb = NewWorkflow( + name="wfb", + workingdir="test_wfb", + inputs={"b": 10}, + wf_output_names=[("NB", "out", "NB_out")]) wfb.add(nb) wfb.connect_wf_input("b", "NB", "b") wfb.connect_wf_input("a", "NB", "a") @@ -1247,8 +1513,10 @@ def test_workflow_16(plugin, change_dir): @python35_only def test_workflow_16a(plugin, change_dir): """workflow with two nodes, and one is a workflow (with mapper)""" - wf = NewWorkflow(name="wf16a", workingdir="test_wf16a_{}".format(plugin), - wf_output_names=[("wfb", "NB_out"), ("NA", "out", "NA_out")]) + wf = NewWorkflow( + name="wf16a", + workingdir="test_wf16a_{}".format(plugin), + wf_output_names=[("wfb", "NB_out"), ("NA", "out", "NA_out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = NewNode(name="NA", interface=interf_addtwo, workingdir="na") na.map(mapper="a", inputs={"a": [3, 5]}) @@ -1257,8 +1525,11 @@ def test_workflow_16a(plugin, change_dir): # the second node does not have explicit mapper (but keeps the mapper from the NA node) interf_addvar = FunctionInterface(fun_addvar, ["out"]) nb = NewNode(name="NB", interface=interf_addvar, workingdir="nb") - wfb = NewWorkflow(name="wfb", workingdir="test_wfb", inputs={"b": 10}, - wf_output_names=[("NB", "out", "NB_out")]) + wfb = NewWorkflow( + name="wfb", + workingdir="test_wfb", + inputs={"b": 10}, + wf_output_names=[("NB", "out", "NB_out")]) wfb.add(nb) wfb.connect_wf_input("b", "NB", "b") wfb.connect_wf_input("a", "NB", "a") @@ -1292,15 +1563,25 @@ def test_workflow_16a(plugin, change_dir): # testing CurrentInterface that is a temporary wrapper for current interfaces -@pytest.mark.skipif(not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") + +@pytest.mark.skipif( + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), + reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_node_1(change_dir, plugin): """Node with a current interface and inputs, no mapper, running interface""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - nn = NewNode(name="NA", inputs={"in_file": "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz"}, interface=interf_bet, - workingdir="test_cnd1_{}".format(plugin), output_names=["out_file"]) + nn = NewNode( + name="NA", + inputs={ + "in_file": + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz" + }, + interface=interf_bet, + workingdir="test_cnd1_{}".format(plugin), + output_names=["out_file"]) sub = Submitter(plugin=plugin, runnable=nn) sub.run() @@ -1309,17 +1590,27 @@ def test_current_node_1(change_dir, plugin): assert "out_file" in nn.output.keys() -@pytest.mark.skipif(not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") +@pytest.mark.skipif( + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), + reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_node_2(change_dir, plugin): """Node with a current interface and mapper""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - in_file_l = ["/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", - "/Users/dorota/nipype_workshop/data/ds000114/sub-02/ses-test/anat/sub-02_ses-test_T1w.nii.gz"] - nn = NewNode(name="NA", inputs={"in_file": in_file_l}, mapper="in_file", interface=interf_bet, print_val=False, - workingdir="test_cnd2_{}".format(plugin), output_names=["out_file"]) + in_file_l = [ + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", + "/Users/dorota/nipype_workshop/data/ds000114/sub-02/ses-test/anat/sub-02_ses-test_T1w.nii.gz" + ] + nn = NewNode( + name="NA", + inputs={"in_file": in_file_l}, + mapper="in_file", + interface=interf_bet, + print_val=False, + workingdir="test_cnd2_{}".format(plugin), + output_names=["out_file"]) sub = Submitter(plugin=plugin, runnable=nn) sub.run() @@ -1330,17 +1621,31 @@ def test_current_node_2(change_dir, plugin): assert "NA.in_file:1" in nn.output["out_file"].keys() -@pytest.mark.skipif(not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") +@pytest.mark.skipif( + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), + reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_wf_1(change_dir, plugin): """Wf with a current interface, no mapper""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - nn = NewNode(name="fsl", inputs={"in_file": "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz"}, interface=interf_bet, - workingdir="nn", output_names=["out_file"], print_val=False) - - wf = NewWorkflow( workingdir="test_cwf_1_{}".format(plugin), name="cw1", wf_output_names=[("fsl", "out_file", "fsl_out")], print_val=False) + nn = NewNode( + name="fsl", + inputs={ + "in_file": + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz" + }, + interface=interf_bet, + workingdir="nn", + output_names=["out_file"], + print_val=False) + + wf = NewWorkflow( + workingdir="test_cwf_1_{}".format(plugin), + name="cw1", + wf_output_names=[("fsl", "out_file", "fsl_out")], + print_val=False) wf.add_nodes([nn]) sub = Submitter(plugin=plugin, runnable=wf) @@ -1350,17 +1655,31 @@ def test_current_wf_1(change_dir, plugin): assert "fsl_out" in wf.output.keys() -@pytest.mark.skipif(not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") +@pytest.mark.skipif( + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), + reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_wf_1a(change_dir, plugin): """Wf with a current interface, no mapper""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - nn = NewNode(name="fsl", inputs={"in_file": "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz"}, interface=interf_bet, - workingdir="nn", output_names=["out_file"], print_val=False) - - wf = NewWorkflow(workingdir="test_cwf_1a_{}".format(plugin), name="cw1", wf_output_names=[("fsl", "out_file", "fsl_out")], print_val=False) + nn = NewNode( + name="fsl", + inputs={ + "in_file": + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz" + }, + interface=interf_bet, + workingdir="nn", + output_names=["out_file"], + print_val=False) + + wf = NewWorkflow( + workingdir="test_cwf_1a_{}".format(plugin), + name="cw1", + wf_output_names=[("fsl", "out_file", "fsl_out")], + print_val=False) wf.add(runnable=nn) sub = Submitter(plugin=plugin, runnable=wf) @@ -1370,16 +1689,30 @@ def test_current_wf_1a(change_dir, plugin): assert "fsl_out" in wf.output.keys() -@pytest.mark.skipif(not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") +@pytest.mark.skipif( + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), + reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_wf_1b(change_dir, plugin): """Wf with a current interface, no mapper; using wf.add(nipype CurrentInterface)""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - wf = NewWorkflow(workingdir="test_cwf_1b_{}".format(plugin), name="cw1", wf_output_names=[("fsl", "out_file", "fsl_out")], print_val=False) - wf.add(runnable=interf_bet, name="fsl", workingdir="nn", output_names=["out_file"], print_val=False, - inputs={"in_file": "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz"}) + wf = NewWorkflow( + workingdir="test_cwf_1b_{}".format(plugin), + name="cw1", + wf_output_names=[("fsl", "out_file", "fsl_out")], + print_val=False) + wf.add( + runnable=interf_bet, + name="fsl", + workingdir="nn", + output_names=["out_file"], + print_val=False, + inputs={ + "in_file": + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz" + }) sub = Submitter(plugin=plugin, runnable=wf) sub.run() @@ -1388,15 +1721,29 @@ def test_current_wf_1b(change_dir, plugin): assert "fsl_out" in wf.output.keys() -@pytest.mark.skipif(not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") +@pytest.mark.skipif( + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), + reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_wf_1c(change_dir, plugin): """Wf with a current interface, no mapper; using wf.add(nipype interface) """ - wf = NewWorkflow(workingdir="test_cwf_1c_{}".format(plugin), name="cw1", wf_output_names=[("fsl", "out_file", "fsl_out")], print_val=False) - wf.add(runnable=fsl.BET(), name="fsl", workingdir="nn", output_names=["out_file"], print_val=False, - inputs={"in_file": "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz"}) + wf = NewWorkflow( + workingdir="test_cwf_1c_{}".format(plugin), + name="cw1", + wf_output_names=[("fsl", "out_file", "fsl_out")], + print_val=False) + wf.add( + runnable=fsl.BET(), + name="fsl", + workingdir="nn", + output_names=["out_file"], + print_val=False, + inputs={ + "in_file": + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz" + }) sub = Submitter(plugin=plugin, runnable=wf) sub.run() @@ -1405,21 +1752,34 @@ def test_current_wf_1c(change_dir, plugin): assert "fsl_out" in wf.output.keys() -@pytest.mark.skipif(not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") +@pytest.mark.skipif( + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), + reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_wf_2(change_dir, plugin): """Wf with a current interface and mapper""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - in_file_l = ["/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", - "/Users/dorota/nipype_workshop/data/ds000114/sub-02/ses-test/anat/sub-02_ses-test_T1w.nii.gz"] - - nn = NewNode(name="fsl", interface=interf_bet, print_val=False, - workingdir="nn", output_names=["out_file"]) - - wf = NewWorkflow( workingdir="test_cwf_2_{}".format(plugin), name="cw2", wf_output_names=[("fsl", "out_file", "fsl_out")], - inputs={"in_file": in_file_l}, mapper="in_file", print_val=False) + in_file_l = [ + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", + "/Users/dorota/nipype_workshop/data/ds000114/sub-02/ses-test/anat/sub-02_ses-test_T1w.nii.gz" + ] + + nn = NewNode( + name="fsl", + interface=interf_bet, + print_val=False, + workingdir="nn", + output_names=["out_file"]) + + wf = NewWorkflow( + workingdir="test_cwf_2_{}".format(plugin), + name="cw2", + wf_output_names=[("fsl", "out_file", "fsl_out")], + inputs={"in_file": in_file_l}, + mapper="in_file", + print_val=False) wf.add_nodes([nn]) wf.connect_wf_input("in_file", "fsl", "in_file") @@ -1432,24 +1792,36 @@ def test_current_wf_2(change_dir, plugin): assert 'cw2.in_file:1' in wf.output["fsl_out"].keys() -@pytest.mark.skipif(not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") +@pytest.mark.skipif( + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), + reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_wf_2a(change_dir, plugin): """Wf with a current interface and mapper""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - in_file_l = ["/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", - "/data/ds000114/sub-02/ses-test/anat/sub-02_ses-test_T1w.nii.gz"] - - nn = NewNode(name="fsl", interface=interf_bet, print_val=False, - workingdir="nn", output_names=["out_file"], - inputs={"in_file": in_file_l}, mapper="in_file") - - wf = NewWorkflow( workingdir="test_cwf_2a_{}".format(plugin), name="cw2a", wf_output_names=[("fsl", "out_file", "fsl_out")], - print_val=False) + in_file_l = [ + "/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", + "/data/ds000114/sub-02/ses-test/anat/sub-02_ses-test_T1w.nii.gz" + ] + + nn = NewNode( + name="fsl", + interface=interf_bet, + print_val=False, + workingdir="nn", + output_names=["out_file"], + inputs={"in_file": in_file_l}, + mapper="in_file") + + wf = NewWorkflow( + workingdir="test_cwf_2a_{}".format(plugin), + name="cw2a", + wf_output_names=[("fsl", "out_file", "fsl_out")], + print_val=False) wf.add_nodes([nn]) - # wf.connect_wf_input("in_file", "fsl", "in_file") + # wf.connect_wf_input("in_file", "fsl", "in_file") sub = Submitter(plugin=plugin, runnable=wf) sub.run() diff --git a/pydra/engine/tests/test_newnode_neuro.py b/pydra/engine/tests/test_newnode_neuro.py index a7b6acab85..32ca4bcb88 100644 --- a/pydra/engine/tests/test_newnode_neuro.py +++ b/pydra/engine/tests/test_newnode_neuro.py @@ -14,6 +14,7 @@ except ImportError: no_fmriprep = True + @pytest.fixture() def change_dir(request): orig_dir = os.getcwd() @@ -26,26 +27,35 @@ def move2orig(): request.addfinalizer(move2orig) + import pdb Name = "example" DEFAULT_MEMORY_MIN_GB = None # TODO, adding fields to Inputs (subject_id) -Inputs = {"subject_id": "sub-01", - "output_spaces": ["fsaverage", "fsaverage5"], - "source_file": "/fmriprep_test/workdir1/fmriprep_wf/single_subject_01_wf/func_preproc_ses_test_task_fingerfootlips_wf/bold_t1_trans_wf/merge/vol0000_xform-00000_merged.nii", - "t1_preproc": "/fmriprep_test/output1/fmriprep/sub-01/anat/sub-01_T1w_preproc.nii.gz", - "t1_2_fsnative_forward_transform": "/fmriprep_test/workdir1/fmriprep_wf/single_subject_01_wf/anat_preproc_wf/surface_recon_wf/t1_2_fsnative_xfm/out.lta", - "subjects_dir": "/fmriprep_test/output1/freesurfer/" +Inputs = { + "subject_id": + "sub-01", + "output_spaces": ["fsaverage", "fsaverage5"], + "source_file": + "/fmriprep_test/workdir1/fmriprep_wf/single_subject_01_wf/func_preproc_ses_test_task_fingerfootlips_wf/bold_t1_trans_wf/merge/vol0000_xform-00000_merged.nii", + "t1_preproc": + "/fmriprep_test/output1/fmriprep/sub-01/anat/sub-01_T1w_preproc.nii.gz", + "t1_2_fsnative_forward_transform": + "/fmriprep_test/workdir1/fmriprep_wf/single_subject_01_wf/anat_preproc_wf/surface_recon_wf/t1_2_fsnative_xfm/out.lta", + "subjects_dir": + "/fmriprep_test/output1/freesurfer/" } Plugins = ["serial"] Plugins = ["serial", "mp", "cf", "dask"] + def select_target(subject_id, space): """ Given a source subject ID and a target space, get the target subject ID """ return subject_id if space == 'fsnative' else space + @pytest.mark.skipif(no_fmriprep, reason="No fmriprep") @pytest.mark.parametrize("plugin", Plugins) def test_neuro(change_dir, plugin): @@ -58,17 +68,19 @@ def test_neuro(change_dir, plugin): # #dj: why do I need outputs? - - wf = NewWorkflow(name=Name, inputs=Inputs, workingdir="test_neuro_{}".format(plugin), print_val=False, - wf_output_names=[("sampler", "out_file", "sampler_out"), ("targets", "out", "target_out")]) + wf = NewWorkflow( + name=Name, + inputs=Inputs, + workingdir="test_neuro_{}".format(plugin), + print_val=False, + wf_output_names=[("sampler", "out_file", "sampler_out"), + ("targets", "out", "target_out")]) # @interface # def select_target(subject_id, space): # """ Given a source subject ID and a target space, get the target subject ID """ # return subject_id if space == 'fsnative' else space - - # wf.add('targets', select_target(subject_id=wf.inputs.subject_id)) # .map('space', space=[space for space in wf.inputs.output_spaces # if space.startswith('fs')]) @@ -93,7 +105,6 @@ def test_neuro(change_dir, plugin): .map_node('subject', inputs={"subject": [space for space in Inputs["output_spaces"] if space.startswith("fs")]}) #TODO: now it's only one subject - # wf.add('resampling_xfm', # fs.utils.LTAConvert(in_lta='identity.nofile', # out_lta=True, @@ -113,8 +124,6 @@ def test_neuro(change_dir, plugin): in_lta2="t1_2_fsnative_forward_transform", in_lta1="resampling_xfm.out_lta", output_names=["out_file"], print_val=False) - - # wf.add('sampler', # fs.SampleToSurface(sampling_method='average', sampling_range=(0, 1, 0.2), # sampling_units='frac', interp_method='trilinear', @@ -138,7 +147,6 @@ def test_neuro(change_dir, plugin): target_subject="targets.out", source_file="rename_src.out_file", output_names=["out_file"])\ .map_node(mapper=[('_targets', "_rename_src"), 'hemi'], inputs={"hemi": ['lh', 'rh']}) - sub = Submitter(plugin=plugin, runnable=wf) sub.run() sub.close() diff --git a/pydra/engine/workers.py b/pydra/engine/workers.py index 1014ca712c..d8e7716d11 100644 --- a/pydra/engine/workers.py +++ b/pydra/engine/workers.py @@ -25,7 +25,7 @@ def close(self): class MpWorker(Worker): - def __init__(self, nr_proc=4): #should be none + def __init__(self, nr_proc=4): #should be none self.nr_proc = nr_proc self.pool = mp.Pool(processes=self.nr_proc) logger.debug('Initialize MpWorker') @@ -72,10 +72,9 @@ def __init__(self): from distributed.deploy.local import LocalCluster logger.debug("Initialize Dask Worker") #self.cluster = LocalCluster() - self.client = Client()#self.cluster) + self.client = Client() #self.cluster) #print("BOKEH", self.client.scheduler_info()["address"] + ":" + str(self.client.scheduler_info()["services"]["bokeh"])) - def run_el(self, interface, inp): print("DASK, run_el: ", interface, inp, time.time()) # dask doesn't copy the node second time, so it doesn't see that I change input in the meantime (??) @@ -85,8 +84,6 @@ def run_el(self, interface, inp): x.add_done_callback(lambda x: print("DONE ", interface, inp)) print("res", x.result()) - def close(self): #self.cluster.close() self.client.close() - From 5079ef3f178e47d99bd259952ae3f616a95d7a25 Mon Sep 17 00:00:00 2001 From: Jakub Kaczmarzyk Date: Wed, 10 Oct 2018 10:09:29 -0400 Subject: [PATCH 02/12] add files --- .gitignore | 5 + .style.yapf | 260 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 265 insertions(+) create mode 100644 .gitignore create mode 100644 .style.yapf diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..377937d520 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +__pycache__ +*.pyc + +.ipynb_checkpoints +*.ipynb diff --git a/.style.yapf b/.style.yapf new file mode 100644 index 0000000000..be7f612300 --- /dev/null +++ b/.style.yapf @@ -0,0 +1,260 @@ +[style] +# Align closing bracket with visual indentation. +align_closing_bracket_with_visual_indent=True + +# Allow dictionary keys to exist on multiple lines. For example: +# +# x = { +# ('this is the first element of a tuple', +# 'this is the second element of a tuple'): +# value, +# } +allow_multiline_dictionary_keys=False + +# Allow lambdas to be formatted on more than one line. +allow_multiline_lambdas=False + +# Allow splits before the dictionary value. +allow_split_before_dict_value=True + +# Number of blank lines surrounding top-level function and class +# definitions. +blank_lines_around_top_level_definition=2 + +# Insert a blank line before a class-level docstring. +blank_line_before_class_docstring=False + +# Insert a blank line before a module docstring. +blank_line_before_module_docstring=False + +# Insert a blank line before a 'def' or 'class' immediately nested +# within another 'def' or 'class'. For example: +# +# class Foo: +# # <------ this blank line +# def method(): +# ... +blank_line_before_nested_class_or_def=False + +# Do not split consecutive brackets. Only relevant when +# dedent_closing_brackets is set. For example: +# +# call_func_that_takes_a_dict( +# { +# 'key1': 'value1', +# 'key2': 'value2', +# } +# ) +# +# would reformat to: +# +# call_func_that_takes_a_dict({ +# 'key1': 'value1', +# 'key2': 'value2', +# }) +coalesce_brackets=False + +# The column limit. +column_limit=79 + +# The style for continuation alignment. Possible values are: +# +# - SPACE: Use spaces for continuation alignment. This is default behavior. +# - FIXED: Use fixed number (CONTINUATION_INDENT_WIDTH) of columns +# (ie: CONTINUATION_INDENT_WIDTH/INDENT_WIDTH tabs) for continuation +# alignment. +# - LESS: Slightly left if cannot vertically align continuation lines with +# indent characters. +# - VALIGN-RIGHT: Vertically align continuation lines with indent +# characters. Slightly right (one more indent character) if cannot +# vertically align continuation lines with indent characters. +# +# For options FIXED, and VALIGN-RIGHT are only available when USE_TABS is +# enabled. +continuation_align_style=SPACE + +# Indent width used for line continuations. +continuation_indent_width=4 + +# Put closing brackets on a separate line, dedented, if the bracketed +# expression can't fit in a single line. Applies to all kinds of brackets, +# including function definitions and calls. For example: +# +# config = { +# 'key1': 'value1', +# 'key2': 'value2', +# } # <--- this bracket is dedented and on a separate line +# +# time_series = self.remote_client.query_entity_counters( +# entity='dev3246.region1', +# key='dns.query_latency_tcp', +# transform=Transformation.AVERAGE(window=timedelta(seconds=60)), +# start_ts=now()-timedelta(days=3), +# end_ts=now(), +# ) # <--- this bracket is dedented and on a separate line +dedent_closing_brackets=False + +# Disable the heuristic which places each list element on a separate line +# if the list is comma-terminated. +disable_ending_comma_heuristic=False + +# Place each dictionary entry onto its own line. +each_dict_entry_on_separate_line=True + +# The regex for an i18n comment. The presence of this comment stops +# reformatting of that line, because the comments are required to be +# next to the string they translate. +i18n_comment= + +# The i18n function call names. The presence of this function stops +# reformattting on that line, because the string it has cannot be moved +# away from the i18n comment. +i18n_function_call= + +# Indent the dictionary value if it cannot fit on the same line as the +# dictionary key. For example: +# +# config = { +# 'key1': +# 'value1', +# 'key2': value1 + +# value2, +# } +indent_dictionary_value=False + +# The number of columns to use for indentation. +indent_width=4 + +# Join short lines into one line. E.g., single line 'if' statements. +join_multiple_lines=True + +# Do not include spaces around selected binary operators. For example: +# +# 1 + 2 * 3 - 4 / 5 +# +# will be formatted as follows when configured with "*,/": +# +# 1 + 2*3 - 4/5 +# +no_spaces_around_selected_binary_operators= + +# Use spaces around default or named assigns. +spaces_around_default_or_named_assign=False + +# Use spaces around the power operator. +spaces_around_power_operator=False + +# The number of spaces required before a trailing comment. +spaces_before_comment=2 + +# Insert a space between the ending comma and closing bracket of a list, +# etc. +space_between_ending_comma_and_closing_bracket=True + +# Split before arguments +split_all_comma_separated_values=False + +# Split before arguments if the argument list is terminated by a +# comma. +split_arguments_when_comma_terminated=False + +# Set to True to prefer splitting before '&', '|' or '^' rather than +# after. +split_before_bitwise_operator=True + +# Split before the closing bracket if a list or dict literal doesn't fit on +# a single line. +split_before_closing_bracket=True + +# Split before a dictionary or set generator (comp_for). For example, note +# the split before the 'for': +# +# foo = { +# variable: 'Hello world, have a nice day!' +# for variable in bar if variable != 42 +# } +split_before_dict_set_generator=True + +# Split before the '.' if we need to split a longer expression: +# +# foo = ('This is a really long string: {}, {}, {}, {}'.format(a, b, c, d)) +# +# would reformat to something like: +# +# foo = ('This is a really long string: {}, {}, {}, {}' +# .format(a, b, c, d)) +split_before_dot=False + +# Split after the opening paren which surrounds an expression if it doesn't +# fit on a single line. +split_before_expression_after_opening_paren=False + +# If an argument / parameter list is going to be split, then split before +# the first argument. +split_before_first_argument=False + +# Set to True to prefer splitting before 'and' or 'or' rather than +# after. +split_before_logical_operator=True + +# Split named assignments onto individual lines. +split_before_named_assigns=True + +# Set to True to split list comprehensions and generators that have +# non-trivial expressions and multiple clauses before each of these +# clauses. For example: +# +# result = [ +# a_long_var + 100 for a_long_var in xrange(1000) +# if a_long_var % 10] +# +# would reformat to something like: +# +# result = [ +# a_long_var + 100 +# for a_long_var in xrange(1000) +# if a_long_var % 10] +split_complex_comprehension=False + +# The penalty for splitting right after the opening bracket. +split_penalty_after_opening_bracket=30 + +# The penalty for splitting the line after a unary operator. +split_penalty_after_unary_operator=10000 + +# The penalty for splitting right before an if expression. +split_penalty_before_if_expr=0 + +# The penalty of splitting the line around the '&', '|', and '^' +# operators. +split_penalty_bitwise_operator=300 + +# The penalty for splitting a list comprehension or generator +# expression. +split_penalty_comprehension=80 + +# The penalty for characters over the column limit. +split_penalty_excess_character=7000 + +# The penalty incurred by adding a line split to the unwrapped line. The +# more line splits added the higher the penalty. +split_penalty_for_added_line_split=30 + +# The penalty of splitting a list of "import as" names. For example: +# +# from a_very_long_or_indented_module_name_yada_yad import (long_argument_1, +# long_argument_2, +# long_argument_3) +# +# would reformat to something like: +# +# from a_very_long_or_indented_module_name_yada_yad import ( +# long_argument_1, long_argument_2, long_argument_3) +split_penalty_import_names=0 + +# The penalty of splitting the line around the 'and' and 'or' +# operators. +split_penalty_logical_operator=300 + +# Use the Tab character for indentation. +use_tabs=False From 8430dbf0d287f77be0f3b9eac4aad51bd0c30d63 Mon Sep 17 00:00:00 2001 From: Jakub Kaczmarzyk Date: Wed, 10 Oct 2018 10:26:52 -0400 Subject: [PATCH 03/12] add coverage files + egg-info --- .gitignore | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.gitignore b/.gitignore index 377937d520..4db61f2d20 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,10 @@ +pydra.egg-info + __pycache__ *.pyc .ipynb_checkpoints *.ipynb + +.coverage* +cov.xml From 355fd74ddaad7dc4f3966752c2ee2be846cd3806 Mon Sep 17 00:00:00 2001 From: Jakub Kaczmarzyk Date: Wed, 10 Oct 2018 10:28:57 -0400 Subject: [PATCH 04/12] more yapf style updates --- pydra/engine/auxiliary.py | 13 +- pydra/engine/node.py | 126 +++++++-- pydra/engine/submitter.py | 2 +- pydra/engine/tests/test_newnode.py | 342 +++++++++++++++++------ pydra/engine/tests/test_newnode_neuro.py | 10 +- 5 files changed, 361 insertions(+), 132 deletions(-) diff --git a/pydra/engine/auxiliary.py b/pydra/engine/auxiliary.py index c85b91aa83..e29614082b 100644 --- a/pydra/engine/auxiliary.py +++ b/pydra/engine/auxiliary.py @@ -86,7 +86,8 @@ def mapping_axis(state_inputs, mapper_rpn): right = stack.pop() left = stack.pop() if left == "OUT": - if state_inputs[right].shape == current_shape: #todo:should we allow for one-element array? + if state_inputs[ + right].shape == current_shape: #todo:should we allow for one-element array? axis_for_input[right] = current_axis else: raise Exception( @@ -108,7 +109,9 @@ def mapping_axis(state_inputs, mapper_rpn): axis_for_input[left] = current_axis axis_for_input[right] = current_axis else: - raise Exception("arrays for scalar operations should have the same size") + raise Exception( + "arrays for scalar operations should have the same size" + ) stack.append("OUT") @@ -147,8 +150,10 @@ def mapping_axis(state_inputs, mapper_rpn): for i in range(state_inputs[right].ndim) ] current_axis = axis_for_input[left] + axis_for_input[right] - current_shape = tuple([i for i in - state_inputs[left].shape + state_inputs[right].shape]) + current_shape = tuple([ + i for i in state_inputs[left].shape + + state_inputs[right].shape + ]) stack.append("OUT") else: diff --git a/pydra/engine/node.py b/pydra/engine/node.py index dcd5425944..a8a8a55e8f 100644 --- a/pydra/engine/node.py +++ b/pydra/engine/node.py @@ -5,7 +5,6 @@ import networkx as nx import numpy as np - from nipype.utils.filemanip import loadpkl from nipype import logging @@ -15,8 +14,14 @@ class NodeBase(object): - def __init__(self, name, mapper=None, inputs=None, other_mappers=None, - write_state=True, *args, **kwargs): + def __init__(self, + name, + mapper=None, + inputs=None, + other_mappers=None, + write_state=True, + *args, + **kwargs): """A base structure for nodes in the computational graph (i.e. both ``Node`` and ``Workflow``). @@ -94,9 +99,11 @@ def inputs(self): @inputs.setter def inputs(self, inputs): # Massage inputs dict - inputs = {".".join((self.name, key)): value - if not isinstance(value, list) else np.array(value) - for key, value in inputs.items()} + inputs = { + ".".join((self.name, key)): + value if not isinstance(value, list) else np.array(value) + for key, value in inputs.items() + } self._inputs.update(inputs) self._state_inputs.update(inputs) @@ -234,12 +241,26 @@ def _dict_tuple2list(self, container): class Node(NodeBase): - def __init__(self, name, interface, inputs=None, mapper=None, join_by=None, - workingdir=None, other_mappers=None, - output_names=None, write_state=True, *args, **kwargs): - super(Node, self).__init__(name=name, mapper=mapper, inputs=inputs, - other_mappers=other_mappers, write_state=write_state, - *args, **kwargs) + def __init__(self, + name, + interface, + inputs=None, + mapper=None, + join_by=None, + workingdir=None, + other_mappers=None, + output_names=None, + write_state=True, + *args, + **kwargs): + super(Node, self).__init__( + name=name, + mapper=mapper, + inputs=inputs, + other_mappers=other_mappers, + write_state=write_state, + *args, + **kwargs) # working directory for node, will be change if node is a part of a wf self.workingdir = workingdir @@ -328,7 +349,8 @@ def get_output(self): """collecting all outputs and updating self._output""" for key_out in self.output_names: self._output[key_out] = {} - for (i, ind) in enumerate(itertools.product(*self.state.all_elements)): + for (i, ind) in enumerate( + itertools.product(*self.state.all_elements)): if self.write_state: state_dict = self.state.state_values(ind) else: @@ -423,10 +445,24 @@ def _reading_results(self): class Workflow(NodeBase): - def __init__(self, name, inputs=None, wf_output_names=None, mapper=None, #join_by=None, - nodes=None, workingdir=None, write_state=True, *args, **kwargs): - super(Workflow, self).__init__(name=name, mapper=mapper, inputs=inputs, - write_state=write_state, *args, **kwargs) + def __init__( + self, + name, + inputs=None, + wf_output_names=None, + mapper=None, #join_by=None, + nodes=None, + workingdir=None, + write_state=True, + *args, + **kwargs): + super(Workflow, self).__init__( + name=name, + mapper=mapper, + inputs=inputs, + write_state=write_state, + *args, + **kwargs) self.graph = nx.DiGraph() # all nodes in the workflow (probably will be removed) @@ -465,7 +501,6 @@ def __init__(self, name, inputs=None, wf_output_names=None, mapper=None, #join_b # self.add(name, value) - @property def nodes(self): return self._nodes @@ -506,7 +541,8 @@ def get_output(self): if out_wf_nm not in self._output.keys(): if self.mapper: self._output[out_wf_nm] = {} - for (i, ind) in enumerate(itertools.product(*self.state.all_elements)): + for (i, ind) in enumerate( + itertools.product(*self.state.all_elements)): if self.write_state: wf_inputs_dict = self.state.state_values(ind) dir_nm_el = "_".join([ @@ -556,7 +592,8 @@ def _reading_results(self): key_out = out[2] if len(out) == 3 else out[1] self._result[key_out] = [] if self.mapper: - for (i, ind) in enumerate(itertools.product(*self.state.all_elements)): + for (i, ind) in enumerate( + itertools.product(*self.state.all_elements)): if self.write_state: wf_inputs_dict = self.state.state_values(ind) else: @@ -602,8 +639,16 @@ def add_nodes(self, nodes): self._node_mappers[nn.name] = nn.mapper # TODO: workingir shouldn't have None - def add(self, runnable, name=None, workingdir=None, inputs=None, output_names=None, mapper=None, - write_state=True, out_read=False, **kwargs): + def add(self, + runnable, + name=None, + workingdir=None, + inputs=None, + output_names=None, + mapper=None, + write_state=True, + out_read=False, + **kwargs): if is_function(runnable): if not output_names: output_names = ["out"] @@ -613,25 +658,43 @@ def add(self, runnable, name=None, workingdir=None, inputs=None, output_names=No raise Exception("you have to specify name for the node") if not workingdir: workingdir = name - node = Node(interface=interface, workingdir=workingdir, name=name, inputs=inputs, mapper=mapper, - other_mappers=self._node_mappers, write_state=write_state) + node = Node( + interface=interface, + workingdir=workingdir, + name=name, + inputs=inputs, + mapper=mapper, + other_mappers=self._node_mappers, + write_state=write_state) elif is_function_interface(runnable) or is_current_interface(runnable): if not name: raise Exception("you have to specify name for the node") if not workingdir: workingdir = name - node = Node(interface=runnable, workingdir=workingdir, name=name, inputs=inputs, mapper=mapper, - other_mappers=self._node_mappers, output_names=output_names, - write_state=write_state) + node = Node( + interface=runnable, + workingdir=workingdir, + name=name, + inputs=inputs, + mapper=mapper, + other_mappers=self._node_mappers, + output_names=output_names, + write_state=write_state) elif is_nipype_interface(runnable): ci = aux.CurrentInterface(interface=runnable, name=name) if not name: raise Exception("you have to specify name for the node") if not workingdir: workingdir = name - node = Node(interface=ci, workingdir=workingdir, name=name, inputs=inputs, mapper=mapper, - other_mappers=self._node_mappers, output_names=output_names, - write_state=write_state) + node = Node( + interface=ci, + workingdir=workingdir, + name=name, + inputs=inputs, + mapper=mapper, + other_mappers=self._node_mappers, + output_names=output_names, + write_state=write_state) elif is_node(runnable): node = runnable elif is_workflow(runnable): @@ -683,7 +746,8 @@ def preparing(self, wf_inputs=None, wf_inputs_ind=None): self.name, inp_wf)) for nn in self.graph_sorted: if self.write_state: - dir_nm_el = "_".join(["{}:{}".format(i, j) for i, j in list(wf_inputs.items())]) + dir_nm_el = "_".join( + ["{}:{}".format(i, j) for i, j in list(wf_inputs.items())]) else: dir_nm_el = "_".join([ "{}:{}".format(i, j) diff --git a/pydra/engine/submitter.py b/pydra/engine/submitter.py index 37a58c848b..2f519a868d 100644 --- a/pydra/engine/submitter.py +++ b/pydra/engine/submitter.py @@ -78,7 +78,7 @@ def run_workflow(self, workflow=None, ready=True): else: if ready: if workflow.write_state: - workflow.preparing(wf_inputs=workflow.inputs) + workflow.preparing(wf_inputs=workflow.inputs) else: inputs_ind = dict( (key, None) for (key, _) in workflow.inputs.items()) diff --git a/pydra/engine/tests/test_newnode.py b/pydra/engine/tests/test_newnode.py index 9788ae255e..36cf3b6886 100644 --- a/pydra/engine/tests/test_newnode.py +++ b/pydra/engine/tests/test_newnode.py @@ -63,7 +63,8 @@ def test_node_2(): def test_node_3(): """Node with interface, inputs and mapper""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - nn = Node(name="NA", interface=interf_addtwo, inputs={"a": [3, 5]}, mapper="a") + nn = Node( + name="NA", interface=interf_addtwo, inputs={"a": [3, 5]}, mapper="a") assert nn.mapper == "NA.a" assert (nn.inputs["NA.a"] == np.array([3, 5])).all() @@ -107,8 +108,11 @@ def test_node_4a(): def test_node_5(plugin, change_dir): """Node with interface and inputs, no mapper, running interface""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - nn = Node(name="NA", inputs={"a": 3}, interface=interf_addtwo, - workingdir="test_nd5_{}".format(plugin)) + nn = Node( + name="NA", + inputs={"a": 3}, + interface=interf_addtwo, + workingdir="test_nd5_{}".format(plugin)) assert (nn.inputs["NA.a"] == np.array([3])).all() @@ -133,7 +137,10 @@ def test_node_5(plugin, change_dir): def test_node_6(plugin, change_dir): """Node with interface, inputs and the simplest mapper, running interface""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - nn = Node(name="NA", interface=interf_addtwo, workingdir="test_nd6_{}".format(plugin)) + nn = Node( + name="NA", + interface=interf_addtwo, + workingdir="test_nd6_{}".format(plugin)) nn.map(mapper="a", inputs={"a": [3, 5]}) assert nn.mapper == "NA.a" @@ -160,7 +167,10 @@ def test_node_6(plugin, change_dir): def test_node_7(plugin, change_dir): """Node with interface, inputs and scalar mapper, running interface""" interf_addvar = FunctionInterface(fun_addvar, ["out"]) - nn = Node(name="NA", interface=interf_addvar, workingdir="test_nd7_{}".format(plugin)) + nn = Node( + name="NA", + interface=interf_addvar, + workingdir="test_nd7_{}".format(plugin)) # scalar mapper nn.map(mapper=("a", "b"), inputs={"a": [3, 5], "b": [2, 1]}) @@ -189,7 +199,10 @@ def test_node_7(plugin, change_dir): def test_node_8(plugin, change_dir): """Node with interface, inputs and vector mapper, running interface""" interf_addvar = FunctionInterface(fun_addvar, ["out"]) - nn = Node(name="NA", interface=interf_addvar, workingdir="test_nd8_{}".format(plugin)) + nn = Node( + name="NA", + interface=interf_addvar, + workingdir="test_nd8_{}".format(plugin)) # [] for outer product nn.map(mapper=["a", "b"], inputs={"a": [3, 5], "b": [2, 1]}) @@ -277,7 +290,8 @@ def test_workflow_2(plugin, change_dir): # the second node does not have explicit mapper (but keeps the mapper from the NA node) interf_addvar = FunctionInterface(fun_addvar, ["out"]) - nb = Node(name="NB", interface=interf_addvar, inputs={"b": 10}, workingdir="nb") + nb = Node( + name="NB", interface=interf_addvar, inputs={"b": 10}, workingdir="nb") # adding 2 nodes and create a connection (as it is now) wf.add_nodes([na, nb]) @@ -764,7 +778,10 @@ def test_workflow_6b(plugin, change_dir): def test_workflow_7(plugin, change_dir): """using inputs for workflow and connect_workflow""" # adding inputs to the workflow directly - wf = Workflow(name="wf7", inputs={"wfa": [3, 5]}, workingdir="test_wf7_{}".format(plugin)) + wf = Workflow( + name="wf7", + inputs={"wfa": [3, 5]}, + workingdir="test_wf7_{}".format(plugin)) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") @@ -791,7 +808,10 @@ def test_workflow_7(plugin, change_dir): @python35_only def test_workflow_7a(plugin, change_dir): """using inputs for workflow and kwarg arg in add (instead of connect)""" - wf = Workflow(name="wf7a", inputs={"wfa": [3, 5]}, workingdir="test_wf7a_{}".format(plugin)) + wf = Workflow( + name="wf7a", + inputs={"wfa": [3, 5]}, + workingdir="test_wf7a_{}".format(plugin)) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") # using kwrg argument in the add method (instead of connect or connect_wf_input @@ -816,7 +836,8 @@ def test_workflow_7a(plugin, change_dir): @python35_only def test_workflow_8(plugin, change_dir): """using inputs for workflow and connect_wf_input for the second node""" - wf = Workflow(name="wf8", workingdir="test_wf8_{}".format(plugin), inputs={"b": 10}) + wf = Workflow( + name="wf8", workingdir="test_wf8_{}".format(plugin), inputs={"b": 10}) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") na.map(mapper="a", inputs={"a": [3, 5]}) @@ -1086,8 +1107,10 @@ def test_workflow_11(plugin, change_dir): @python35_only def test_workflow_12(plugin, change_dir): """testing if wf.result works (the same workflow as in test_workflow_6)""" - wf = Workflow(name="wf12", workingdir="test_wf12_{}".format(plugin), - wf_output_names=[("NA", "out", "NA_out"), ("NB", "out")]) + wf = Workflow( + name="wf12", + workingdir="test_wf12_{}".format(plugin), + wf_output_names=[("NA", "out", "NA_out"), ("NB", "out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") @@ -1132,8 +1155,10 @@ def test_workflow_12(plugin, change_dir): @python35_only def test_workflow_12a(plugin, change_dir): """testing if wf.result raises exceptione (the same workflow as in test_workflow_6)""" - wf = Workflow(name="wf12a", workingdir="test_wf12a_{}".format(plugin), - wf_output_names=[("NA", "out", "wf_out"), ("NB", "out", "wf_out")]) + wf = Workflow( + name="wf12a", + workingdir="test_wf12a_{}".format(plugin), + wf_output_names=[("NA", "out", "wf_out"), ("NB", "out", "wf_out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") @@ -1161,8 +1186,12 @@ def test_workflow_12a(plugin, change_dir): @python35_only def test_workflow_13(plugin, change_dir): """using inputs for workflow and connect_wf_input""" - wf = Workflow(name="wf13", inputs={"wfa": [3, 5]}, mapper="wfa", workingdir="test_wf13_{}".format(plugin), - wf_output_names=[("NA", "out", "NA_out")]) + wf = Workflow( + name="wf13", + inputs={"wfa": [3, 5]}, + mapper="wfa", + workingdir="test_wf13_{}".format(plugin), + wf_output_names=[("NA", "out", "NA_out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") wf.add(na) @@ -1192,10 +1221,19 @@ def test_workflow_13(plugin, change_dir): @python35_only def test_workflow_13a(plugin, change_dir): """using inputs for workflow and connect_wf_input (the node has 2 inputs)""" - wf = Workflow(name="wf13a", inputs={"wfa": [3, 5]}, mapper="wfa", workingdir="test_wf13a_{}".format(plugin), - wf_output_names=[("NA", "out", "NA_out")]) + wf = Workflow( + name="wf13a", + inputs={"wfa": [3, 5]}, + mapper="wfa", + workingdir="test_wf13a_{}".format(plugin), + wf_output_names=[("NA", "out", "NA_out")]) interf_addvar = FunctionInterface(fun_addvar, ["out"]) - na = Node(name="NA", interface=interf_addvar, workingdir="na", mapper="b", inputs={"b": [10, 20]}) + na = Node( + name="NA", + interface=interf_addvar, + workingdir="na", + mapper="b", + inputs={"b": [10, 20]}) wf.add(na) wf.connect_wf_input("wfa", "NA", "a") @@ -1233,8 +1271,10 @@ def test_workflow_13a(plugin, change_dir): @python35_only def test_workflow_13c(plugin, change_dir): """using inputs for workflow and connect_wf_input, using wf.map(mapper, inputs)""" - wf = Workflow(name="wf13c", workingdir="test_wf13c_{}".format(plugin), - wf_output_names=[("NA", "out", "NA_out")]) + wf = Workflow( + name="wf13c", + workingdir="test_wf13c_{}".format(plugin), + wf_output_names=[("NA", "out", "NA_out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") wf.add(na).map(mapper="wfa", inputs={"wfa": [3, 5]}) @@ -1263,9 +1303,11 @@ def test_workflow_13c(plugin, change_dir): @python35_only def test_workflow_13b(plugin, change_dir): """using inputs for workflow and connect_wf_input, using wf.map(mapper)""" - wf = Workflow(name="wf13b", inputs={"wfa": [3, 5]}, - workingdir="test_wf13b_{}".format(plugin), - wf_output_names=[("NA", "out", "NA_out")]) + wf = Workflow( + name="wf13b", + inputs={"wfa": [3, 5]}, + workingdir="test_wf13b_{}".format(plugin), + wf_output_names=[("NA", "out", "NA_out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") wf.add(na).map(mapper="wfa") @@ -1299,13 +1341,18 @@ def test_workflow_13b(plugin, change_dir): def test_workflow_14(plugin, change_dir): """workflow with a workflow as a node (no mapper)""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - na = Node(name="NA", interface=interf_addtwo, workingdir="na", inputs={"a": 3}) - wfa = Workflow(name="wfa", workingdir="test_wfa", - wf_output_names=[("NA", "out", "NA_out")]) + na = Node( + name="NA", interface=interf_addtwo, workingdir="na", inputs={"a": 3}) + wfa = Workflow( + name="wfa", + workingdir="test_wfa", + wf_output_names=[("NA", "out", "NA_out")]) wfa.add(na) - wf = Workflow(name="wf14", workingdir="test_wf14_{}".format(plugin), - wf_output_names=[("wfa", "NA_out", "wfa_out")]) + wf = Workflow( + name="wf14", + workingdir="test_wf14_{}".format(plugin), + wf_output_names=[("wfa", "NA_out", "wfa_out")]) wf.add(wfa) sub = Submitter(runnable=wf, plugin=plugin) @@ -1325,13 +1372,18 @@ def test_workflow_14a(plugin, change_dir): """workflow with a workflow as a node (no mapper, using connect_wf_input in wfa)""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") - wfa = Workflow(name="wfa", workingdir="test_wfa", inputs={"a": 3}, - wf_output_names=[("NA", "out", "NA_out")]) + wfa = Workflow( + name="wfa", + workingdir="test_wfa", + inputs={"a": 3}, + wf_output_names=[("NA", "out", "NA_out")]) wfa.add(na) wfa.connect_wf_input("a", "NA", "a") - wf = Workflow(name="wf14a", workingdir="test_wf14a_{}".format(plugin), - wf_output_names=[("wfa", "NA_out", "wfa_out")]) + wf = Workflow( + name="wf14a", + workingdir="test_wf14a_{}".format(plugin), + wf_output_names=[("wfa", "NA_out", "wfa_out")]) wf.add(wfa) sub = Submitter(runnable=wf, plugin=plugin) @@ -1351,13 +1403,18 @@ def test_workflow_14b(plugin, change_dir): """workflow with a workflow as a node (no mapper, using connect_wf_input in wfa and wf)""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") - wfa = Workflow(name="wfa", workingdir="test_wfa", - wf_output_names=[("NA", "out", "NA_out")]) + wfa = Workflow( + name="wfa", + workingdir="test_wfa", + wf_output_names=[("NA", "out", "NA_out")]) wfa.add(na) wfa.connect_wf_input("a", "NA", "a") - wf = Workflow(name="wf14b", workingdir="test_wf14b_{}".format(plugin), - wf_output_names=[("wfa", "NA_out", "wfa_out")], inputs={"a": 3}) + wf = Workflow( + name="wf14b", + workingdir="test_wf14b_{}".format(plugin), + wf_output_names=[("wfa", "NA_out", "wfa_out")], + inputs={"a": 3}) wf.add(wfa) wf.connect_wf_input("a", "wfa", "a") @@ -1377,14 +1434,22 @@ def test_workflow_14b(plugin, change_dir): def test_workflow_15(plugin, change_dir): """workflow with a workflow as a node with mapper (like 14 but with a mapper)""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - na = Node(name="NA", interface=interf_addtwo, workingdir="na", - inputs={"a": [3, 5]}, mapper="a") - wfa = Workflow(name="wfa", workingdir="test_wfa", - wf_output_names=[("NA", "out", "NA_out")]) + na = Node( + name="NA", + interface=interf_addtwo, + workingdir="na", + inputs={"a": [3, 5]}, + mapper="a") + wfa = Workflow( + name="wfa", + workingdir="test_wfa", + wf_output_names=[("NA", "out", "NA_out")]) wfa.add(na) - wf = Workflow(name="wf15", workingdir="test_wf15_{}".format(plugin), - wf_output_names=[("wfa", "NA_out", "wfa_out")]) + wf = Workflow( + name="wf15", + workingdir="test_wf15_{}".format(plugin), + wf_output_names=[("wfa", "NA_out", "wfa_out")]) wf.add(wfa) sub = Submitter(runnable=wf, plugin=plugin) @@ -1402,17 +1467,23 @@ def test_workflow_15(plugin, change_dir): @python35_only def test_workflow_16(plugin, change_dir): """workflow with two nodes, and one is a workflow (no mapper)""" - wf = Workflow(name="wf16", workingdir="test_wf16_{}".format(plugin), - wf_output_names=[("wfb", "NB_out"), ("NA", "out", "NA_out")]) + wf = Workflow( + name="wf16", + workingdir="test_wf16_{}".format(plugin), + wf_output_names=[("wfb", "NB_out"), ("NA", "out", "NA_out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - na = Node(name="NA", interface=interf_addtwo, workingdir="na", inputs={"a": 3}) + na = Node( + name="NA", interface=interf_addtwo, workingdir="na", inputs={"a": 3}) wf.add(na) # the second node does not have explicit mapper (but keeps the mapper from the NA node) interf_addvar = FunctionInterface(fun_addvar, ["out"]) nb = Node(name="NB", interface=interf_addvar, workingdir="nb") - wfb = Workflow(name="wfb", workingdir="test_wfb", inputs={"b": 10}, - wf_output_names=[("NB", "out", "NB_out")]) + wfb = Workflow( + name="wfb", + workingdir="test_wfb", + inputs={"b": 10}, + wf_output_names=[("NB", "out", "NB_out")]) wfb.add(nb) wfb.connect_wf_input("b", "NB", "b") wfb.connect_wf_input("a", "NB", "a") @@ -1442,8 +1513,10 @@ def test_workflow_16(plugin, change_dir): @python35_only def test_workflow_16a(plugin, change_dir): """workflow with two nodes, and one is a workflow (with mapper)""" - wf = Workflow(name="wf16a", workingdir="test_wf16a_{}".format(plugin), - wf_output_names=[("wfb", "NB_out"), ("NA", "out", "NA_out")]) + wf = Workflow( + name="wf16a", + workingdir="test_wf16a_{}".format(plugin), + wf_output_names=[("wfb", "NB_out"), ("NA", "out", "NA_out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") na.map(mapper="a", inputs={"a": [3, 5]}) @@ -1452,8 +1525,11 @@ def test_workflow_16a(plugin, change_dir): # the second node does not have explicit mapper (but keeps the mapper from the NA node) interf_addvar = FunctionInterface(fun_addvar, ["out"]) nb = Node(name="NB", interface=interf_addvar, workingdir="nb") - wfb = Workflow(name="wfb", workingdir="test_wfb", inputs={"b": 10}, - wf_output_names=[("NB", "out", "NB_out")]) + wfb = Workflow( + name="wfb", + workingdir="test_wfb", + inputs={"b": 10}, + wf_output_names=[("NB", "out", "NB_out")]) wfb.add(nb) wfb.connect_wf_input("b", "NB", "b") wfb.connect_wf_input("a", "NB", "a") @@ -1497,8 +1573,15 @@ def test_current_node_1(change_dir, plugin): """Node with a current interface and inputs, no mapper, running interface""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - nn = Node(name="NA", inputs={"in_file": "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz"}, interface=interf_bet, - workingdir="test_cnd1_{}".format(plugin), output_names=["out_file"]) + nn = Node( + name="NA", + inputs={ + "in_file": + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz" + }, + interface=interf_bet, + workingdir="test_cnd1_{}".format(plugin), + output_names=["out_file"]) sub = Submitter(plugin=plugin, runnable=nn) sub.run() @@ -1516,10 +1599,18 @@ def test_current_node_2(change_dir, plugin): """Node with a current interface and mapper""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - in_file_l = ["/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", - "/Users/dorota/nipype_workshop/data/ds000114/sub-02/ses-test/anat/sub-02_ses-test_T1w.nii.gz"] - nn = Node(name="NA", inputs={"in_file": in_file_l}, mapper="in_file", interface=interf_bet, write_state=False, - workingdir="test_cnd2_{}".format(plugin), output_names=["out_file"]) + in_file_l = [ + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", + "/Users/dorota/nipype_workshop/data/ds000114/sub-02/ses-test/anat/sub-02_ses-test_T1w.nii.gz" + ] + nn = Node( + name="NA", + inputs={"in_file": in_file_l}, + mapper="in_file", + interface=interf_bet, + write_state=False, + workingdir="test_cnd2_{}".format(plugin), + output_names=["out_file"]) sub = Submitter(plugin=plugin, runnable=nn) sub.run() @@ -1539,11 +1630,22 @@ def test_current_wf_1(change_dir, plugin): """Wf with a current interface, no mapper""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - nn = Node(name="fsl", inputs={"in_file": "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz"}, interface=interf_bet, - workingdir="nn", output_names=["out_file"], write_state=False) - - wf = Workflow( workingdir="test_cwf_1_{}".format(plugin), name="cw1", wf_output_names=[("fsl", "out_file", "fsl_out")], - write_state=False) + nn = Node( + name="fsl", + inputs={ + "in_file": + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz" + }, + interface=interf_bet, + workingdir="nn", + output_names=["out_file"], + write_state=False) + + wf = Workflow( + workingdir="test_cwf_1_{}".format(plugin), + name="cw1", + wf_output_names=[("fsl", "out_file", "fsl_out")], + write_state=False) wf.add_nodes([nn]) sub = Submitter(plugin=plugin, runnable=wf) @@ -1562,11 +1664,22 @@ def test_current_wf_1a(change_dir, plugin): """Wf with a current interface, no mapper""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - nn = Node(name="fsl", inputs={"in_file": "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz"}, interface=interf_bet, - workingdir="nn", output_names=["out_file"], write_state=False) - - wf = Workflow(workingdir="test_cwf_1a_{}".format(plugin), name="cw1", wf_output_names=[("fsl", "out_file", "fsl_out")], - write_state=False) + nn = Node( + name="fsl", + inputs={ + "in_file": + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz" + }, + interface=interf_bet, + workingdir="nn", + output_names=["out_file"], + write_state=False) + + wf = Workflow( + workingdir="test_cwf_1a_{}".format(plugin), + name="cw1", + wf_output_names=[("fsl", "out_file", "fsl_out")], + write_state=False) wf.add(runnable=nn) sub = Submitter(plugin=plugin, runnable=wf) @@ -1585,10 +1698,21 @@ def test_current_wf_1b(change_dir, plugin): """Wf with a current interface, no mapper; using wf.add(nipype CurrentInterface)""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - wf = Workflow(workingdir="test_cwf_1b_{}".format(plugin), name="cw1", wf_output_names=[("fsl", "out_file", "fsl_out")], - write_state=False) - wf.add(runnable=interf_bet, name="fsl", workingdir="nn", output_names=["out_file"], write_state=False, - inputs={"in_file": "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz"}) + wf = Workflow( + workingdir="test_cwf_1b_{}".format(plugin), + name="cw1", + wf_output_names=[("fsl", "out_file", "fsl_out")], + write_state=False) + wf.add( + runnable=interf_bet, + name="fsl", + workingdir="nn", + output_names=["out_file"], + write_state=False, + inputs={ + "in_file": + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz" + }) sub = Submitter(plugin=plugin, runnable=wf) sub.run() @@ -1605,10 +1729,21 @@ def test_current_wf_1b(change_dir, plugin): def test_current_wf_1c(change_dir, plugin): """Wf with a current interface, no mapper; using wf.add(nipype interface) """ - wf = Workflow(workingdir="test_cwf_1c_{}".format(plugin), name="cw1", wf_output_names=[("fsl", "out_file", "fsl_out")], - write_state=False) - wf.add(runnable=fsl.BET(), name="fsl", workingdir="nn", output_names=["out_file"], write_state=False, - inputs={"in_file": "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz"}) + wf = Workflow( + workingdir="test_cwf_1c_{}".format(plugin), + name="cw1", + wf_output_names=[("fsl", "out_file", "fsl_out")], + write_state=False) + wf.add( + runnable=fsl.BET(), + name="fsl", + workingdir="nn", + output_names=["out_file"], + write_state=False, + inputs={ + "in_file": + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz" + }) sub = Submitter(plugin=plugin, runnable=wf) sub.run() @@ -1626,14 +1761,25 @@ def test_current_wf_2(change_dir, plugin): """Wf with a current interface and mapper""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - in_file_l = ["/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", - "/Users/dorota/nipype_workshop/data/ds000114/sub-02/ses-test/anat/sub-02_ses-test_T1w.nii.gz"] - - nn = Node(name="fsl", interface=interf_bet, write_state=False, - workingdir="nn", output_names=["out_file"]) - - wf = Workflow( workingdir="test_cwf_2_{}".format(plugin), name="cw2", wf_output_names=[("fsl", "out_file", "fsl_out")], - inputs={"in_file": in_file_l}, mapper="in_file", write_state=False) + in_file_l = [ + "/Users/dorota/nipype_workshop/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", + "/Users/dorota/nipype_workshop/data/ds000114/sub-02/ses-test/anat/sub-02_ses-test_T1w.nii.gz" + ] + + nn = Node( + name="fsl", + interface=interf_bet, + write_state=False, + workingdir="nn", + output_names=["out_file"]) + + wf = Workflow( + workingdir="test_cwf_2_{}".format(plugin), + name="cw2", + wf_output_names=[("fsl", "out_file", "fsl_out")], + inputs={"in_file": in_file_l}, + mapper="in_file", + write_state=False) wf.add_nodes([nn]) wf.connect_wf_input("in_file", "fsl", "in_file") @@ -1655,15 +1801,25 @@ def test_current_wf_2a(change_dir, plugin): """Wf with a current interface and mapper""" interf_bet = CurrentInterface(interface=fsl.BET(), name="fsl_interface") - in_file_l = ["/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", - "/data/ds000114/sub-02/ses-test/anat/sub-02_ses-test_T1w.nii.gz"] - - nn = Node(name="fsl", interface=interf_bet, write_state=False, - workingdir="nn", output_names=["out_file"], - inputs={"in_file": in_file_l}, mapper="in_file") - - wf = Workflow( workingdir="test_cwf_2a_{}".format(plugin), name="cw2a", wf_output_names=[("fsl", "out_file", "fsl_out")], - write_state=False) + in_file_l = [ + "/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", + "/data/ds000114/sub-02/ses-test/anat/sub-02_ses-test_T1w.nii.gz" + ] + + nn = Node( + name="fsl", + interface=interf_bet, + write_state=False, + workingdir="nn", + output_names=["out_file"], + inputs={"in_file": in_file_l}, + mapper="in_file") + + wf = Workflow( + workingdir="test_cwf_2a_{}".format(plugin), + name="cw2a", + wf_output_names=[("fsl", "out_file", "fsl_out")], + write_state=False) wf.add_nodes([nn]) # wf.connect_wf_input("in_file", "fsl", "in_file") diff --git a/pydra/engine/tests/test_newnode_neuro.py b/pydra/engine/tests/test_newnode_neuro.py index 71ca9e993d..6f9e5e5543 100644 --- a/pydra/engine/tests/test_newnode_neuro.py +++ b/pydra/engine/tests/test_newnode_neuro.py @@ -68,9 +68,13 @@ def test_neuro(change_dir, plugin): # #dj: why do I need outputs? - - wf = Workflow(name=Name, inputs=Inputs, workingdir="test_neuro_{}".format(plugin), write_state=False, - wf_output_names=[("sampler", "out_file", "sampler_out"), ("targets", "out", "target_out")]) + wf = Workflow( + name=Name, + inputs=Inputs, + workingdir="test_neuro_{}".format(plugin), + write_state=False, + wf_output_names=[("sampler", "out_file", "sampler_out"), + ("targets", "out", "target_out")]) # @interface # def select_target(subject_id, space): From 111681947a4b5fed0b8e04ee676f7e7eee2a79ec Mon Sep 17 00:00:00 2001 From: Jakub Kaczmarzyk Date: Wed, 10 Oct 2018 10:40:50 -0400 Subject: [PATCH 05/12] do not ignore .coveragerc --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 4db61f2d20..290e53fd68 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,5 @@ __pycache__ *.ipynb .coverage* +!.coveragerc cov.xml From 143825b1e4436f3580f13c13bf57695ec961486a Mon Sep 17 00:00:00 2001 From: Jakub Kaczmarzyk Date: Wed, 10 Oct 2018 11:37:50 -0400 Subject: [PATCH 06/12] use line length 99 --- .style.yapf | 2 +- pydra/__about__.py | 10 +- pydra/__init__.py | 5 +- pydra/_version.py | 36 +-- pydra/engine/auxiliary.py | 70 ++---- pydra/engine/node.py | 159 ++++--------- pydra/engine/state.py | 20 +- pydra/engine/submitter.py | 24 +- pydra/engine/tests/test_auxiliary.py | 131 +++++----- pydra/engine/tests/test_newnode.py | 289 +++++++---------------- pydra/engine/tests/test_newnode_neuro.py | 3 +- 11 files changed, 247 insertions(+), 502 deletions(-) diff --git a/.style.yapf b/.style.yapf index be7f612300..9d209b37a8 100644 --- a/.style.yapf +++ b/.style.yapf @@ -55,7 +55,7 @@ blank_line_before_nested_class_or_def=False coalesce_brackets=False # The column limit. -column_limit=79 +column_limit=99 # The style for continuation alignment. Possible values are: # diff --git a/pydra/__about__.py b/pydra/__about__.py index 0e2b671ab3..e8a8230245 100644 --- a/pydra/__about__.py +++ b/pydra/__about__.py @@ -9,12 +9,10 @@ CLASSIFIERS = [ 'Development Status :: 2 - Pre-Alpha', 'Environment :: Console', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: MacOS :: MacOS X', - 'Operating System :: POSIX :: Linux', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering' + 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', + 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX :: Linux', + 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Topic :: Scientific/Engineering' ] description = 'Pydra dataflow engine' diff --git a/pydra/__init__.py b/pydra/__init__.py index 9f20279cc9..38d0e8dd75 100644 --- a/pydra/__init__.py +++ b/pydra/__init__.py @@ -1,3 +1,2 @@ -from .__about__ import (__version__, __author__, __license__, __maintainer__, - __email__, __status__, __url__, __packagename__, - __description__, __longdesc__) +from .__about__ import (__version__, __author__, __license__, __maintainer__, __email__, + __status__, __url__, __packagename__, __description__, __longdesc__) diff --git a/pydra/_version.py b/pydra/_version.py index ff023ee1bc..46d5511949 100644 --- a/pydra/_version.py +++ b/pydra/_version.py @@ -67,12 +67,7 @@ def decorate(f): return decorate -def run_command(commands, - args, - cwd=None, - verbose=False, - hide_stderr=False, - env=None): +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None @@ -134,8 +129,8 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): root = os.path.dirname(root) # up a level if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) + print("Tried directories %s but none started with prefix %s" % (str(rootdirs), + parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @@ -242,8 +237,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command( - GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -252,10 +246,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command( - GITS, [ - "describe", "--tags", "--dirty", "--always", "--long", "--match", - "%s*" % tag_prefix - ], + GITS, + ["describe", "--tags", "--dirty", "--always", "--long", "--match", + "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: @@ -288,8 +281,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ( - "unable to parse git-describe output: '%s'" % describe_out) + pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag @@ -298,8 +290,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % - (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] @@ -312,13 +303,11 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command( - GITS, ["rev-list", "HEAD", "--count"], cwd=root) + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command( - GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -509,8 +498,7 @@ def get_versions(): verbose = cfg.verbose try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass diff --git a/pydra/engine/auxiliary.py b/pydra/engine/auxiliary.py index e29614082b..09e2e6fed1 100644 --- a/pydra/engine/auxiliary.py +++ b/pydra/engine/auxiliary.py @@ -11,8 +11,7 @@ def mapper2rpn(mapper, other_mappers=None): """ Functions that translate mapper to "reverse polish notation.""" output_mapper = [] - _ordering( - mapper, i=0, output_mapper=output_mapper, other_mappers=other_mappers) + _ordering(mapper, i=0, output_mapper=output_mapper, other_mappers=other_mappers) return output_mapper @@ -24,15 +23,13 @@ def _ordering(el, i, output_mapper, current_sign=None, other_mappers=None): node_nm = el[0][1:] if node_nm not in other_mappers: raise Exception("can't ask for mapper from {}".format(node_nm)) - mapper_mod = change_mapper( - mapper=other_mappers[node_nm], name=node_nm) + mapper_mod = change_mapper(mapper=other_mappers[node_nm], name=node_nm) el = (mapper_mod, el[1]) if type(el[1]) is str and el[1].startswith("_"): node_nm = el[1][1:] if node_nm not in other_mappers: raise Exception("can't ask for mapper from {}".format(node_nm)) - mapper_mod = change_mapper( - mapper=other_mappers[node_nm], name=node_nm) + mapper_mod = change_mapper(mapper=other_mappers[node_nm], name=node_nm) el = (el[0], mapper_mod) _iterate_list(el, ".", other_mappers, output_mapper=output_mapper) elif type(el) is list: @@ -40,15 +37,13 @@ def _ordering(el, i, output_mapper, current_sign=None, other_mappers=None): node_nm = el[0][1:] if node_nm not in other_mappers: raise Exception("can't ask for mapper from {}".format(node_nm)) - mapper_mod = change_mapper( - mapper=other_mappers[node_nm], name=node_nm) + mapper_mod = change_mapper(mapper=other_mappers[node_nm], name=node_nm) el[0] = mapper_mod if type(el[1]) is str and el[1].startswith("_"): node_nm = el[1][1:] if node_nm not in other_mappers: raise Exception("can't ask for mapper from {}".format(node_nm)) - mapper_mod = change_mapper( - mapper=other_mappers[node_nm], name=node_nm) + mapper_mod = change_mapper(mapper=other_mappers[node_nm], name=node_nm) el[1] = mapper_mod _iterate_list(el, "*", other_mappers, output_mapper=output_mapper) elif type(el) is str: @@ -64,11 +59,7 @@ def _iterate_list(element, sign, other_mappers, output_mapper): """ Used in the mapper2rpn to get recursion. """ for i, el in enumerate(element): _ordering( - el, - i, - current_sign=sign, - other_mappers=other_mappers, - output_mapper=output_mapper) + el, i, current_sign=sign, other_mappers=other_mappers, output_mapper=output_mapper) # functions used in State to know which element should be used for a specific axis @@ -90,17 +81,13 @@ def mapping_axis(state_inputs, mapper_rpn): right].shape == current_shape: #todo:should we allow for one-element array? axis_for_input[right] = current_axis else: - raise Exception( - "arrays for scalar operations should have the same size" - ) + raise Exception("arrays for scalar operations should have the same size") elif right == "OUT": if state_inputs[left].shape == current_shape: axis_for_input[left] = current_axis else: - raise Exception( - "arrays for scalar operations should have the same size" - ) + raise Exception("arrays for scalar operations should have the same size") else: if state_inputs[right].shape == state_inputs[left].shape: @@ -109,9 +96,7 @@ def mapping_axis(state_inputs, mapper_rpn): axis_for_input[left] = current_axis axis_for_input[right] = current_axis else: - raise Exception( - "arrays for scalar operations should have the same size" - ) + raise Exception("arrays for scalar operations should have the same size") stack.append("OUT") @@ -120,17 +105,14 @@ def mapping_axis(state_inputs, mapper_rpn): left = stack.pop() if left == "OUT": axis_for_input[right] = [ - i + 1 + current_axis[-1] - for i in range(state_inputs[right].ndim) + i + 1 + current_axis[-1] for i in range(state_inputs[right].ndim) ] current_axis = current_axis + axis_for_input[right] - current_shape = tuple( - [i for i in current_shape + state_inputs[right].shape]) + current_shape = tuple([i for i in current_shape + state_inputs[right].shape]) elif right == "OUT": for key in axis_for_input: axis_for_input[key] = [ - i + state_inputs[left].ndim - for i in axis_for_input[key] + i + state_inputs[left].ndim for i in axis_for_input[key] ] axis_for_input[left] = [ @@ -138,22 +120,17 @@ def mapping_axis(state_inputs, mapper_rpn): for i in range(state_inputs[left].ndim) ] current_axis = current_axis + [ - i + 1 + current_axis[-1] - for i in range(state_inputs[left].ndim) + i + 1 + current_axis[-1] for i in range(state_inputs[left].ndim) ] - current_shape = tuple( - [i for i in state_inputs[left].shape + current_shape]) + current_shape = tuple([i for i in state_inputs[left].shape + current_shape]) else: axis_for_input[left] = list(range(state_inputs[left].ndim)) axis_for_input[right] = [ - i + state_inputs[left].ndim - for i in range(state_inputs[right].ndim) + i + state_inputs[left].ndim for i in range(state_inputs[right].ndim) ] current_axis = axis_for_input[left] + axis_for_input[right] - current_shape = tuple([ - i for i in state_inputs[left].shape + - state_inputs[right].shape - ]) + current_shape = tuple( + [i for i in state_inputs[left].shape + state_inputs[right].shape]) stack.append("OUT") else: @@ -251,24 +228,19 @@ def run(self, input): try: input[key_fun] = input.pop(key_inp) except KeyError: - raise Exception( - "no {} in the input dictionary".format(key_inp)) + raise Exception("no {} in the input dictionary".format(key_inp)) fun_output = self.function(**input) - logger.debug("Function Interf, input={}, fun_out={}".format( - input, fun_output)) + logger.debug("Function Interf, input={}, fun_out={}".format(input, fun_output)) if type(fun_output) is tuple: if len(self._output_nm) == len(fun_output): for i, out in enumerate(fun_output): self.output[self._output_nm[i]] = out else: - raise Exception( - "length of output_nm doesnt match length of the function output" - ) + raise Exception("length of output_nm doesnt match length of the function output") elif len(self._output_nm) == 1: self.output[self._output_nm[0]] = fun_output else: - raise Exception( - "output_nm doesnt match length of the function output") + raise Exception("output_nm doesnt match length of the function output") return fun_output diff --git a/pydra/engine/node.py b/pydra/engine/node.py index a8a8a55e8f..bede18ac63 100644 --- a/pydra/engine/node.py +++ b/pydra/engine/node.py @@ -58,9 +58,7 @@ def __init__(self, self._other_mappers = other_mappers # create state (takes care of mapper, connects inputs with axes, so we can ask for specifc element) self._state = state.State( - mapper=self._mapper, - node_name=self.name, - other_mappers=self._other_mappers) + mapper=self._mapper, node_name=self.name, other_mappers=self._other_mappers) self._output = {} self._result = {} # flag that says if the node/wf is ready to run (has all input) @@ -88,9 +86,7 @@ def mapper(self, mapper): self._mapper = mapper # updating state self._state = state.State( - mapper=self._mapper, - node_name=self.name, - other_mappers=self._other_mappers) + mapper=self._mapper, node_name=self.name, other_mappers=self._other_mappers) @property def inputs(self): @@ -100,8 +96,7 @@ def inputs(self): def inputs(self, inputs): # Massage inputs dict inputs = { - ".".join((self.name, key)): - value if not isinstance(value, list) else np.array(value) + ".".join((self.name, key)): value if not isinstance(value, list) else np.array(value) for key, value in inputs.items() } self._inputs.update(inputs) @@ -140,9 +135,7 @@ def map(self, mapper, inputs=None): if mapper: # updating state if we have a new mapper self._state = state.State( - mapper=self._mapper, - node_name=self.name, - other_mappers=self._other_mappers) + mapper=self._mapper, node_name=self.name, other_mappers=self._other_mappers) def join(self, field, node=None): # TBD @@ -172,15 +165,11 @@ def get_input_el(self, ind): if not from_node.mapper: dir_nm_el_from = "" - if is_node(from_node) and is_current_interface( - from_node.interface): + if is_node(from_node) and is_current_interface(from_node.interface): file_from = self._reading_ci_output( - node=from_node, - dir_nm_el=dir_nm_el_from, - out_nm=from_socket) + node=from_node, dir_nm_el=dir_nm_el_from, out_nm=from_socket) if file_from and os.path.exists(file_from): - inputs_dict["{}.{}".format(self.name, - to_socket)] = file_from + inputs_dict["{}.{}".format(self.name, to_socket)] = file_from else: raise Exception("{} doesnt exist".format(file_from)) else: # assuming here that I want to read the file (will not be used with the current interfaces) @@ -189,11 +178,9 @@ def get_input_el(self, ind): with open(file_from) as f: content = f.readline() try: - inputs_dict["{}.{}".format(self.name, - to_socket)] = eval(content) + inputs_dict["{}.{}".format(self.name, to_socket)] = eval(content) except NameError: - inputs_dict["{}.{}".format(self.name, - to_socket)] = content + inputs_dict["{}.{}".format(self.name, to_socket)] = content return state_dict, inputs_dict @@ -201,9 +188,9 @@ def _reading_ci_output(self, dir_nm_el, out_nm, node=None): """used for current interfaces: checking if the output exists and returns the path if it does""" if not node: node = self - result_pklfile = os.path.join( - os.getcwd(), node.workingdir, dir_nm_el, node.interface.nn.name, - "result_{}.pklz".format(node.interface.nn.name)) + result_pklfile = os.path.join(os.getcwd(), node.workingdir, dir_nm_el, + node.interface.nn.name, "result_{}.pklz".format( + node.interface.nn.name)) if os.path.exists(result_pklfile): out_file = getattr(loadpkl(result_pklfile).outputs, out_nm) if os.path.exists(out_file): @@ -268,9 +255,8 @@ def __init__(self, if is_function_interface(self.interface): # adding node name to the interface's name mapping - self.interface.input_map = dict( - (key, "{}.{}".format(self.name, value)) - for (key, value) in self.interface.input_map.items()) + self.interface.input_map = dict((key, "{}.{}".format(self.name, value)) + for (key, value) in self.interface.input_map.items()) # list of output names taken from interface output name self.output_names = self.interface._output_nm elif is_current_interface(self.interface): @@ -299,17 +285,14 @@ def __init__(self, def run_interface_el(self, i, ind): """ running interface one element generated from node_state.""" - logger.debug("Run interface el, name={}, i={}, ind={}".format( - self.name, i, ind)) + logger.debug("Run interface el, name={}, i={}, ind={}".format(self.name, i, ind)) state_dict, inputs_dict = self.get_input_el(ind) if not self.write_state: state_dict = self.state.state_ind(ind) - dir_nm_el = "_".join( - ["{}:{}".format(i, j) for i, j in list(state_dict.items())]) + dir_nm_el = "_".join(["{}:{}".format(i, j) for i, j in list(state_dict.items())]) print("Run interface el, dict={}".format(state_dict)) - logger.debug( - "Run interface el, name={}, inputs_dict={}, state_dict={}".format( - self.name, inputs_dict, state_dict)) + logger.debug("Run interface el, name={}, inputs_dict={}, state_dict={}".format( + self.name, inputs_dict, state_dict)) if is_function_interface(self.interface): res = self.interface.run(inputs_dict) output = self.interface.output @@ -340,28 +323,22 @@ def _writting_results_tmp(self, state_dict, dir_nm_el, output): dir_nm_el = '' os.makedirs(os.path.join(self.workingdir, dir_nm_el), exist_ok=True) for key_out, val_out in output.items(): - with open( - os.path.join(self.workingdir, dir_nm_el, key_out + ".txt"), - "w") as fout: + with open(os.path.join(self.workingdir, dir_nm_el, key_out + ".txt"), "w") as fout: fout.write(str(val_out)) def get_output(self): """collecting all outputs and updating self._output""" for key_out in self.output_names: self._output[key_out] = {} - for (i, ind) in enumerate( - itertools.product(*self.state.all_elements)): + for (i, ind) in enumerate(itertools.product(*self.state.all_elements)): if self.write_state: state_dict = self.state.state_values(ind) else: state_dict = self.state.state_ind(ind) - dir_nm_el = "_".join([ - "{}:{}".format(i, j) for i, j in list(state_dict.items()) - ]) + dir_nm_el = "_".join(["{}:{}".format(i, j) for i, j in list(state_dict.items())]) if self.mapper: if is_function_interface(self.interface): - output = os.path.join(self.workingdir, dir_nm_el, - key_out + ".txt") + output = os.path.join(self.workingdir, dir_nm_el, key_out + ".txt") if self.interface.out_read: with open(output) as fout: content = fout.readline() @@ -375,8 +352,7 @@ def get_output(self): (state_dict, (state_dict, self._reading_ci_output(dir_nm_el=dir_nm_el, out_nm=key_out))) else: if is_function_interface(self.interface): - output = os.path.join(self.workingdir, - key_out + ".txt") + output = os.path.join(self.workingdir, key_out + ".txt") if self.interface.out_read: with open(output) as fout: try: @@ -397,16 +373,14 @@ def _check_all_results(self): state_dict = self.state.state_values(ind) else: state_dict = self.state.state_ind(ind) - dir_nm_el = "_".join( - ["{}:{}".format(i, j) for i, j in list(state_dict.items())]) + dir_nm_el = "_".join(["{}:{}".format(i, j) for i, j in list(state_dict.items())]) if not self.mapper: dir_nm_el = "" for key_out in self.output_names: if is_function_interface(self.interface): if not os.path.isfile( - os.path.join(self.workingdir, dir_nm_el, - key_out + ".txt")): + os.path.join(self.workingdir, dir_nm_el, key_out + ".txt")): return False elif is_current_interface(self.interface): if not self._reading_ci_output(dir_nm_el, key_out): @@ -425,8 +399,7 @@ def _reading_results(self): val_l = self._dict_tuple2list(self._output[key_out]) for (st_dict, filename) in val_l: with open(filename) as fout: - self._result[key_out].append((st_dict, - eval(fout.readline()))) + self._result[key_out].append((st_dict, eval(fout.readline()))) else: # st_dict should be {} # not sure if this is used (not tested) @@ -457,12 +430,7 @@ def __init__( *args, **kwargs): super(Workflow, self).__init__( - name=name, - mapper=mapper, - inputs=inputs, - write_state=write_state, - *args, - **kwargs) + name=name, mapper=mapper, inputs=inputs, write_state=write_state, *args, **kwargs) self.graph = nx.DiGraph() # all nodes in the workflow (probably will be removed) @@ -524,9 +492,7 @@ def get_output(self): self.node_outputs = {} for nn in self.graph: if self.mapper: - self.node_outputs[nn.name] = [ - ni.get_output() for ni in self.inner_nodes[nn.name] - ] + self.node_outputs[nn.name] = [ni.get_output() for ni in self.inner_nodes[nn.name]] else: self.node_outputs[nn.name] = nn.get_output() if self.wf_output_names: @@ -536,35 +502,27 @@ def get_output(self): elif len(out) == 3: node_nm, out_nd_nm, out_wf_nm = out else: - raise Exception( - "wf_output_names should have 2 or 3 elements") + raise Exception("wf_output_names should have 2 or 3 elements") if out_wf_nm not in self._output.keys(): if self.mapper: self._output[out_wf_nm] = {} - for (i, ind) in enumerate( - itertools.product(*self.state.all_elements)): + for (i, ind) in enumerate(itertools.product(*self.state.all_elements)): if self.write_state: wf_inputs_dict = self.state.state_values(ind) dir_nm_el = "_".join([ - "{}:{}".format(i, j) - for i, j in list(wf_inputs_dict.items()) + "{}:{}".format(i, j) for i, j in list(wf_inputs_dict.items()) ]) else: wf_ind_dict = self.state.state_ind(ind) - dir_nm_el = "_".join([ - "{}:{}".format(i, j) - for i, j in list(wf_ind_dict.items()) - ]) - self._output[out_wf_nm][ - dir_nm_el] = self.node_outputs[node_nm][i][ - out_nd_nm] + dir_nm_el = "_".join( + ["{}:{}".format(i, j) for i, j in list(wf_ind_dict.items())]) + self._output[out_wf_nm][dir_nm_el] = self.node_outputs[node_nm][i][ + out_nd_nm] else: - self._output[out_wf_nm] = self.node_outputs[node_nm][ - out_nd_nm] + self._output[out_wf_nm] = self.node_outputs[node_nm][out_nd_nm] else: raise Exception( - "the key {} is already used in workflow.result".format( - out_wf_nm)) + "the key {} is already used in workflow.result".format(out_wf_nm)) return self._output # dj: version without join @@ -573,8 +531,7 @@ def _check_all_results(self): """checking if all files that should be created are present""" for nn in self.graph_sorted: if nn.name in self.inner_nodes.keys(): - if not all( - [ni.is_complete for ni in self.inner_nodes[nn.name]]): + if not all([ni.is_complete for ni in self.inner_nodes[nn.name]]): return False else: if not nn.is_complete: @@ -592,24 +549,19 @@ def _reading_results(self): key_out = out[2] if len(out) == 3 else out[1] self._result[key_out] = [] if self.mapper: - for (i, ind) in enumerate( - itertools.product(*self.state.all_elements)): + for (i, ind) in enumerate(itertools.product(*self.state.all_elements)): if self.write_state: wf_inputs_dict = self.state.state_values(ind) else: wf_inputs_dict = self.state.state_ind(ind) - dir_nm_el = "_".join([ - "{}:{}".format(i, j) - for i, j in list(wf_inputs_dict.items()) - ]) + dir_nm_el = "_".join( + ["{}:{}".format(i, j) for i, j in list(wf_inputs_dict.items())]) res_l = [] - val_l = self._dict_tuple2list( - self.output[key_out][dir_nm_el]) + val_l = self._dict_tuple2list(self.output[key_out][dir_nm_el]) for val in val_l: with open(val[1]) as fout: - logger.debug( - 'Reading Results: file={}, st_dict={}'. - format(val[1], val[0])) + logger.debug('Reading Results: file={}, st_dict={}'.format( + val[1], val[0])) res_l.append((val[0], eval(fout.readline()))) self._result[key_out].append((wf_inputs_dict, res_l)) else: @@ -620,11 +572,9 @@ def _reading_results(self): if type(val) is dict: val = [v for k, v in val.items()][0] with open(val[1]) as fout: - logger.debug( - 'Reading Results: file={}, st_dict={}'.format( - val[1], val[0])) - self._result[key_out].append( - (val[0], eval(fout.readline()))) + logger.debug('Reading Results: file={}, st_dict={}'.format( + val[1], val[0])) + self._result[key_out].append((val[0], eval(fout.readline()))) def add_nodes(self, nodes): """adding nodes without defining connections @@ -742,17 +692,13 @@ def preparing(self, wf_inputs=None, wf_inputs_ind=None): wf_inputs["{}.{}".format(self.name, inp_wf)] }) else: - raise Exception("{}.{} not in the workflow inputs".format( - self.name, inp_wf)) + raise Exception("{}.{} not in the workflow inputs".format(self.name, inp_wf)) for nn in self.graph_sorted: if self.write_state: - dir_nm_el = "_".join( - ["{}:{}".format(i, j) for i, j in list(wf_inputs.items())]) + dir_nm_el = "_".join(["{}:{}".format(i, j) for i, j in list(wf_inputs.items())]) else: - dir_nm_el = "_".join([ - "{}:{}".format(i, j) - for i, j in list(wf_inputs_ind.items()) - ]) + dir_nm_el = "_".join( + ["{}:{}".format(i, j) for i, j in list(wf_inputs_ind.items())]) if not self.mapper: dir_nm_el = "" nn.workingdir = os.path.join(self.workingdir, dir_nm_el, nn.name) @@ -763,8 +709,7 @@ def preparing(self, wf_inputs=None, wf_inputs_ind=None): nn.state_inputs.update(out_node.state_inputs) nn.needed_outputs.append((out_node, out_var, inp)) #if there is no mapper provided, i'm assuming that mapper is taken from the previous node - if (not nn.mapper or - nn.mapper == out_node.mapper) and out_node.mapper: + if (not nn.mapper or nn.mapper == out_node.mapper) and out_node.mapper: nn.mapper = out_node.mapper else: pass diff --git a/pydra/engine/state.py b/pydra/engine/state.py index 57ccc8ea07..534cad23a1 100644 --- a/pydra/engine/state.py +++ b/pydra/engine/state.py @@ -12,11 +12,8 @@ def __init__(self, node_name, mapper=None, other_mappers=None): if self._mapper: # changing mapper (as in rpn), so I can read from left to right # e.g. if mapper=('d', ['e', 'r']), _mapper_rpn=['d', 'e', 'r', '*', '.'] - self._mapper_rpn = aux.mapper2rpn( - self._mapper, other_mappers=other_mappers) - self._input_names_mapper = [ - i for i in self._mapper_rpn if i not in ["*", "."] - ] + self._mapper_rpn = aux.mapper2rpn(self._mapper, other_mappers=other_mappers) + self._input_names_mapper = [i for i in self._mapper_rpn if i not in ["*", "."]] else: self._mapper_rpn = [] self._input_names_mapper = [] @@ -34,8 +31,7 @@ def prepare_state_input(self, state_inputs): # dictionary[key=input names] = list of axes related to # e.g. {'r': [1], 'e': [0], 'd': [0, 1]} # ndim - int, number of dimension for the "final array" (that is not created) - self._axis_for_input, self._ndim = aux.mapping_axis( - self.state_inputs, self._mapper_rpn) + self._axis_for_input, self._ndim = aux.mapping_axis(self.state_inputs, self._mapper_rpn) # list of inputs variable for each axis # e.g. [['e', 'd'], ['r', 'd']] @@ -73,9 +69,8 @@ def state_values(self, ind): for ii, index in enumerate(ind): if index > self._shape[ii] - 1: - raise IndexError( - "index {} is out of bounds for axis {} with size {}". - format(index, ii, self._shape[ii])) + raise IndexError("index {} is out of bounds for axis {} with size {}".format( + index, ii, self._shape[ii])) state_dict = {} for input, ax in self._axis_for_input.items(): @@ -99,9 +94,8 @@ def state_ind(self, ind): for ii, index in enumerate(ind): if index > self._shape[ii] - 1: - raise IndexError( - "index {} is out of bounds for axis {} with size {}". - format(index, ii, self._shape[ii])) + raise IndexError("index {} is out of bounds for axis {} with size {}".format( + index, ii, self._shape[ii])) state_dict = {} for input, ax in self._axis_for_input.items(): diff --git a/pydra/engine/submitter.py b/pydra/engine/submitter.py index 2f519a868d..e7758ecd2c 100644 --- a/pydra/engine/submitter.py +++ b/pydra/engine/submitter.py @@ -43,8 +43,7 @@ def run_node(self): self.node.prepare_state_input() self._submit_node(self.node) while not self.node.is_complete: - logger.debug("Submitter, in while, to_finish: {}".format( - self.node)) + logger.debug("Submitter, in while, to_finish: {}".format(self.node)) time.sleep(3) self.node.get_output() @@ -80,10 +79,8 @@ def run_workflow(self, workflow=None, ready=True): if workflow.write_state: workflow.preparing(wf_inputs=workflow.inputs) else: - inputs_ind = dict( - (key, None) for (key, _) in workflow.inputs.items()) - workflow.preparing( - wf_inputs=workflow.inputs, wf_inputs_ind=inputs_ind) + inputs_ind = dict((key, None) for (key, _) in workflow.inputs.items()) + workflow.preparing(wf_inputs=workflow.inputs, wf_inputs_ind=inputs_ind) self._run_workflow_nd(workflow=workflow) else: self.node_line.append((workflow, 0, ())) @@ -91,14 +88,12 @@ def run_workflow(self, workflow=None, ready=True): # this parts submits nodes that are waiting to be run # it should stop when nothing is waiting while self._nodes_check(): - logger.debug("Submitter, in while, node_line: {}".format( - self.node_line)) + logger.debug("Submitter, in while, node_line: {}".format(self.node_line)) time.sleep(3) # this part simply waiting for all "last nodes" to finish while self._output_check(): - logger.debug("Submitter, in while, to_finish: {}".format( - self._to_finish)) + logger.debug("Submitter, in while, to_finish: {}".format(self._to_finish)) time.sleep(3) # calling only for the main wf (other wf will be called inside the function) @@ -116,8 +111,7 @@ def _run_workflow_el(self, workflow, i, ind, collect_inp=False): workflow.preparing(wf_inputs=wf_inputs) else: wf_inputs_ind = workflow.state.state_ind(ind) - workflow.preparing( - wf_inputs=wf_inputs, wf_inputs_ind=wf_inputs_ind) + workflow.preparing(wf_inputs=wf_inputs, wf_inputs_ind=wf_inputs_ind) self._run_workflow_nd(workflow=workflow) def _run_workflow_nd(self, workflow): @@ -156,8 +150,7 @@ def _nodes_check(self): _to_remove = [] for (to_node, i, ind) in self.node_line: if hasattr(to_node, 'interface'): - print("_NODES_CHECK INPUT", to_node.name, - to_node.checking_input_el(ind)) + print("_NODES_CHECK INPUT", to_node.name, to_node.checking_input_el(ind)) if to_node.checking_input_el(ind): self._submit_node_el(to_node, i, ind) _to_remove.append((to_node, i, ind)) @@ -165,8 +158,7 @@ def _nodes_check(self): pass else: #wf if to_node.checking_input_el(ind): - self._run_workflow_el( - workflow=to_node, i=i, ind=ind, collect_inp=True) + self._run_workflow_el(workflow=to_node, i=i, ind=ind, collect_inp=True) _to_remove.append((to_node, i, ind)) else: pass diff --git a/pydra/engine/tests/test_auxiliary.py b/pydra/engine/tests/test_auxiliary.py index 3eaab01e16..3ece8cb674 100644 --- a/pydra/engine/tests/test_auxiliary.py +++ b/pydra/engine/tests/test_auxiliary.py @@ -5,12 +5,10 @@ @pytest.mark.parametrize( - "mapper, rpn", - [("a", ["a"]), (("a", "b"), ["a", "b", "."]), - (["a", "b"], ["a", "b", "*"]), - (["a", ("b", "c")], ["a", "b", "c", ".", "*"]), - ([("a", "b"), "c"], ["a", "b", ".", "c", "*"]), - (["a", ("b", ["c", "d"])], ["a", "b", "c", "d", "*", ".", "*"])]) + "mapper, rpn", [("a", ["a"]), (("a", "b"), ["a", "b", "."]), (["a", "b"], ["a", "b", "*"]), + (["a", ("b", "c")], ["a", "b", "c", ".", "*"]), + ([("a", "b"), "c"], ["a", "b", ".", "c", "*"]), + (["a", ("b", ["c", "d"])], ["a", "b", "c", "d", "*", ".", "*"])]) def test_mapper2rpn(mapper, rpn): assert aux.mapper2rpn(mapper) == rpn @@ -30,71 +28,65 @@ def test_mapper2rpn_wf_mapper(mapper, other_mappers, rpn): @pytest.mark.parametrize("mapper, mapper_changed", - [("a", "Node.a"), - (["a", ("b", "c")], ["Node.a", - ("Node.b", "Node.c")]), - (("a", ["b", "c"]), - ("Node.a", ["Node.b", "Node.c"]))]) + [("a", "Node.a"), (["a", ("b", "c")], ["Node.a", ("Node.b", "Node.c")]), + (("a", ["b", "c"]), ("Node.a", ["Node.b", "Node.c"]))]) def test_change_mapper(mapper, mapper_changed): assert aux.change_mapper(mapper, "Node") == mapper_changed -@pytest.mark.parametrize("inputs, rpn, expected", - [({ - "a": np.array([1, 2]) - }, ["a"], { - "a": [0] - }), - ({ - "a": np.array([1, 2]), - "b": np.array([3, 4]) - }, ["a", "b", "."], { - "a": [0], - "b": [0] - }), - ({ - "a": np.array([1, 2]), - "b": np.array([3, 4, 1]) - }, ["a", "b", "*"], { - "a": [0], - "b": [1] - }), - ({ - "a": np.array([1, 2]), - "b": np.array([3, 4]), - "c": np.array([1, 2, 3]) - }, ["a", "b", ".", "c", "*"], { - "a": [0], - "b": [0], - "c": [1] - }), - ({ - "a": np.array([1, 2]), - "b": np.array([3, 4]), - "c": np.array([1, 2, 3]) - }, ["c", "a", "b", ".", "*"], { - "a": [1], - "b": [1], - "c": [0] - }), - ({ - "a": np.array([[1, 2], [1, 2]]), - "b": np.array([[3, 4], [3, 3]]), - "c": np.array([1, 2, 3]) - }, ["a", "b", ".", "c", "*"], { - "a": [0, 1], - "b": [0, 1], - "c": [2] - }), - ({ - "a": np.array([[1, 2], [1, 2]]), - "b": np.array([[3, 4], [3, 3]]), - "c": np.array([1, 2, 3]) - }, ["c", "a", "b", ".", "*"], { - "a": [1, 2], - "b": [1, 2], - "c": [0] - })]) +@pytest.mark.parametrize("inputs, rpn, expected", [({ + "a": np.array([1, 2]) +}, ["a"], { + "a": [0] +}), ({ + "a": np.array([1, 2]), + "b": np.array([3, 4]) +}, ["a", "b", "."], { + "a": [0], + "b": [0] +}), ({ + "a": np.array([1, 2]), + "b": np.array([3, 4, 1]) +}, ["a", "b", "*"], { + "a": [0], + "b": [1] +}), + ({ + "a": np.array([1, 2]), + "b": np.array([3, 4]), + "c": np.array([1, 2, 3]) + }, ["a", "b", ".", "c", "*"], { + "a": [0], + "b": [0], + "c": [1] + }), + ({ + "a": np.array([1, 2]), + "b": np.array([3, 4]), + "c": np.array([1, 2, 3]) + }, ["c", "a", "b", ".", "*"], { + "a": [1], + "b": [1], + "c": [0] + }), + ({ + "a": np.array([[1, 2], [1, 2]]), + "b": np.array([[3, 4], [3, 3]]), + "c": np.array([1, 2, 3]) + }, ["a", "b", ".", "c", "*"], { + "a": [0, 1], + "b": [0, 1], + "c": [2] + }), + ({ + "a": np.array([[1, 2], [1, 2]]), + "b": np.array([[3, 4], [3, 3]]), + "c": np.array([1, 2, 3]) + }, ["c", "a", "b", ".", "*"], { + "a": [1, 2], + "b": [1, 2], + "c": [0] + })]) def test_mapping_axis(inputs, rpn, expected): res = aux.mapping_axis(inputs, rpn)[0] print(res) @@ -104,10 +96,7 @@ def test_mapping_axis(inputs, rpn, expected): def test_mapping_axis_error(): with pytest.raises(Exception): - aux.mapping_axis({ - "a": np.array([1, 2]), - "b": np.array([3, 4, 5]) - }, ["a", "b", "."]) + aux.mapping_axis({"a": np.array([1, 2]), "b": np.array([3, 4, 5])}, ["a", "b", "."]) @pytest.mark.parametrize("inputs, axis_inputs, ndim, expected", diff --git a/pydra/engine/tests/test_newnode.py b/pydra/engine/tests/test_newnode.py index 36cf3b6886..ca36acdb91 100644 --- a/pydra/engine/tests/test_newnode.py +++ b/pydra/engine/tests/test_newnode.py @@ -9,8 +9,7 @@ import numpy as np import pytest, pdb -python35_only = pytest.mark.skipif( - sys.version_info < (3, 5), reason="requires Python>3.4") +python35_only = pytest.mark.skipif(sys.version_info < (3, 5), reason="requires Python>3.4") @pytest.fixture(scope="module") @@ -63,8 +62,7 @@ def test_node_2(): def test_node_3(): """Node with interface, inputs and mapper""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - nn = Node( - name="NA", interface=interf_addtwo, inputs={"a": [3, 5]}, mapper="a") + nn = Node(name="NA", interface=interf_addtwo, inputs={"a": [3, 5]}, mapper="a") assert nn.mapper == "NA.a" assert (nn.inputs["NA.a"] == np.array([3, 5])).all() @@ -137,10 +135,7 @@ def test_node_5(plugin, change_dir): def test_node_6(plugin, change_dir): """Node with interface, inputs and the simplest mapper, running interface""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - nn = Node( - name="NA", - interface=interf_addtwo, - workingdir="test_nd6_{}".format(plugin)) + nn = Node(name="NA", interface=interf_addtwo, workingdir="test_nd6_{}".format(plugin)) nn.map(mapper="a", inputs={"a": [3, 5]}) assert nn.mapper == "NA.a" @@ -167,10 +162,7 @@ def test_node_6(plugin, change_dir): def test_node_7(plugin, change_dir): """Node with interface, inputs and scalar mapper, running interface""" interf_addvar = FunctionInterface(fun_addvar, ["out"]) - nn = Node( - name="NA", - interface=interf_addvar, - workingdir="test_nd7_{}".format(plugin)) + nn = Node(name="NA", interface=interf_addvar, workingdir="test_nd7_{}".format(plugin)) # scalar mapper nn.map(mapper=("a", "b"), inputs={"a": [3, 5], "b": [2, 1]}) @@ -199,10 +191,7 @@ def test_node_7(plugin, change_dir): def test_node_8(plugin, change_dir): """Node with interface, inputs and vector mapper, running interface""" interf_addvar = FunctionInterface(fun_addvar, ["out"]) - nn = Node( - name="NA", - interface=interf_addvar, - workingdir="test_nd8_{}".format(plugin)) + nn = Node(name="NA", interface=interf_addvar, workingdir="test_nd8_{}".format(plugin)) # [] for outer product nn.map(mapper=["a", "b"], inputs={"a": [3, 5], "b": [2, 1]}) @@ -272,8 +261,7 @@ def test_workflow_1(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -290,8 +278,7 @@ def test_workflow_2(plugin, change_dir): # the second node does not have explicit mapper (but keeps the mapper from the NA node) interf_addvar = FunctionInterface(fun_addvar, ["out"]) - nb = Node( - name="NB", interface=interf_addvar, inputs={"b": 10}, workingdir="nb") + nb = Node(name="NB", interface=interf_addvar, inputs={"b": 10}, workingdir="nb") # adding 2 nodes and create a connection (as it is now) wf.add_nodes([na, nb]) @@ -305,8 +292,7 @@ def test_workflow_2(plugin, change_dir): expected_A = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected_A[0][0].keys()) expected_A.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_A): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -316,8 +302,7 @@ def test_workflow_2(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 10}, 15), ({"NA.a": 5, "NB.b": 10}, 17)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -350,8 +335,7 @@ def test_workflow_2a(plugin, change_dir): expected_A = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected_A[0][0].keys()) expected_A.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_A): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -360,8 +344,7 @@ def test_workflow_2a(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -394,8 +377,7 @@ def test_workflow_2b(plugin): expected_A = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected_A[0][0].keys()) expected_A.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_A): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -416,8 +398,7 @@ def test_workflow_2b(plugin): }, 9)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -446,8 +427,7 @@ def test_workflow_3(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -461,12 +441,7 @@ def test_workflow_3a(plugin, change_dir): interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) # using the add method with an interface - wf.add( - interf_addtwo, - workingdir="na", - mapper="a", - inputs={"a": [3, 5]}, - name="NA") + wf.add(interf_addtwo, workingdir="na", mapper="a", inputs={"a": [3, 5]}, name="NA") assert wf.nodes[0].mapper == "NA.a" @@ -477,8 +452,7 @@ def test_workflow_3a(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -490,12 +464,7 @@ def test_workflow_3b(plugin, change_dir): """using add (function) method""" wf = Workflow(name="wf3b", workingdir="test_wf3b_{}".format(plugin)) # using the add method with a function - wf.add( - fun_addtwo, - workingdir="na", - mapper="a", - inputs={"a": [3, 5]}, - name="NA") + wf.add(fun_addtwo, workingdir="na", mapper="a", inputs={"a": [3, 5]}, name="NA") assert wf.nodes[0].mapper == "NA.a" @@ -506,8 +475,7 @@ def test_workflow_3b(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -541,8 +509,7 @@ def test_workflow_4(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -550,8 +517,7 @@ def test_workflow_4(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -581,8 +547,7 @@ def test_workflow_4a(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -590,8 +555,7 @@ def test_workflow_4a(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -619,8 +583,7 @@ def test_workflow_5(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -643,8 +606,7 @@ def test_workflow_5a(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -674,8 +636,7 @@ def test_workflow_6(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -683,8 +644,7 @@ def test_workflow_6(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -715,8 +675,7 @@ def test_workflow_6a(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -724,8 +683,7 @@ def test_workflow_6a(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -754,8 +712,7 @@ def test_workflow_6b(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -763,8 +720,7 @@ def test_workflow_6b(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -778,10 +734,7 @@ def test_workflow_6b(plugin, change_dir): def test_workflow_7(plugin, change_dir): """using inputs for workflow and connect_workflow""" # adding inputs to the workflow directly - wf = Workflow( - name="wf7", - inputs={"wfa": [3, 5]}, - workingdir="test_wf7_{}".format(plugin)) + wf = Workflow(name="wf7", inputs={"wfa": [3, 5]}, workingdir="test_wf7_{}".format(plugin)) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") @@ -797,8 +750,7 @@ def test_workflow_7(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -808,10 +760,7 @@ def test_workflow_7(plugin, change_dir): @python35_only def test_workflow_7a(plugin, change_dir): """using inputs for workflow and kwarg arg in add (instead of connect)""" - wf = Workflow( - name="wf7a", - inputs={"wfa": [3, 5]}, - workingdir="test_wf7a_{}".format(plugin)) + wf = Workflow(name="wf7a", inputs={"wfa": [3, 5]}, workingdir="test_wf7a_{}".format(plugin)) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") # using kwrg argument in the add method (instead of connect or connect_wf_input @@ -825,8 +774,7 @@ def test_workflow_7a(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -836,8 +784,7 @@ def test_workflow_7a(plugin, change_dir): @python35_only def test_workflow_8(plugin, change_dir): """using inputs for workflow and connect_wf_input for the second node""" - wf = Workflow( - name="wf8", workingdir="test_wf8_{}".format(plugin), inputs={"b": 10}) + wf = Workflow(name="wf8", workingdir="test_wf8_{}".format(plugin), inputs={"b": 10}) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") na.map(mapper="a", inputs={"a": [3, 5]}) @@ -857,8 +804,7 @@ def test_workflow_8(plugin, change_dir): expected_A = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected_A[0][0].keys()) expected_A.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_A): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -866,8 +812,7 @@ def test_workflow_8(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 10}, 15), ({"NA.a": 5, "NB.b": 10}, 17)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -888,8 +833,7 @@ def test_workflow_9(plugin, change_dir): interf_addvar = FunctionInterface(fun_addvar, ["out"]) # _NA means that I'm using mapper from the NA node, it's the same as ("NA.a", "b") wf.add( - name="NB", runnable=interf_addvar, workingdir="nb", - a="NA.out").map_node( + name="NB", runnable=interf_addvar, workingdir="nb", a="NA.out").map_node( mapper=("_NA", "b"), inputs={"b": [2, 1]}) sub = Submitter(runnable=wf, plugin=plugin) @@ -899,8 +843,7 @@ def test_workflow_9(plugin, change_dir): expected = [({"NA.a": 3}, 5), ({"NA.a": 5}, 7)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -908,8 +851,7 @@ def test_workflow_9(plugin, change_dir): expected_B = [({"NA.a": 3, "NB.b": 2}, 7), ({"NA.a": 5, "NB.b": 1}, 8)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -930,8 +872,7 @@ def test_workflow_10(plugin, change_dir): interf_addvar2 = FunctionInterface(fun_addvar, ["out"]) # _NA means that I'm using mapper from the NA node, it's the same as (("NA.a", NA.b), "b") wf.add( - name="NB", runnable=interf_addvar2, workingdir="nb", - a="NA.out").map_node( + name="NB", runnable=interf_addvar2, workingdir="nb", a="NA.out").map_node( mapper=("_NA", "b"), inputs={"b": [2, 1]}) sub = Submitter(runnable=wf, plugin=plugin) @@ -941,25 +882,15 @@ def test_workflow_10(plugin, change_dir): expected = [({"NA.a": 3, "NA.b": 0}, 3), ({"NA.a": 5, "NA.b": 10}, 15)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] - expected_B = [({ - "NA.a": 3, - "NA.b": 0, - "NB.b": 2 - }, 5), ({ - "NA.a": 5, - "NA.b": 10, - "NB.b": 1 - }, 16)] + expected_B = [({"NA.a": 3, "NA.b": 0, "NB.b": 2}, 5), ({"NA.a": 5, "NA.b": 10, "NB.b": 1}, 16)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -980,8 +911,7 @@ def test_workflow_10a(plugin, change_dir): interf_addvar2 = FunctionInterface(fun_addvar, ["out"]) # _NA means that I'm using mapper from the NA node, it's the same as (["NA.a", NA.b], "b") wf.add( - name="NB", runnable=interf_addvar2, workingdir="nb", - a="NA.out").map_node( + name="NB", runnable=interf_addvar2, workingdir="nb", a="NA.out").map_node( mapper=("_NA", "b"), inputs={"b": [[2, 1], [0, 0]]}) sub = Submitter(runnable=wf, plugin=plugin) @@ -1003,8 +933,7 @@ def test_workflow_10a(plugin, change_dir): }, 15)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -1028,8 +957,7 @@ def test_workflow_10a(plugin, change_dir): }, 15)] key_sort = list(expected_B[0][0].keys()) expected_B.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[1].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[1].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_B): assert wf.nodes[1].result["out"][i][0] == res[0] assert wf.nodes[1].result["out"][i][1] == res[1] @@ -1054,12 +982,8 @@ def test_workflow_11(plugin, change_dir): interf_addvar2 = FunctionInterface(fun_addvar, ["out"]) # _NA, _NB means that I'm using mappers from the NA/NB nodes, it's the same as [("NA.a", NA.b), "NB.a"] wf.add( - name="NC", - runnable=interf_addvar2, - workingdir="nc", - a="NA.out", - b="NB.out").map_node(mapper=["_NA", - "_NB"]) # TODO: this should eb default? + name="NC", runnable=interf_addvar2, workingdir="nc", a="NA.out", b="NB.out").map_node( + mapper=["_NA", "_NB"]) # TODO: this should eb default? sub = Submitter(runnable=wf, plugin=plugin) sub.run() @@ -1068,8 +992,7 @@ def test_workflow_11(plugin, change_dir): expected = [({"NA.a": 3, "NA.b": 0}, 3), ({"NA.a": 5, "NA.b": 10}, 15)] key_sort = list(expected[0][0].keys()) expected.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[0].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[0].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected): assert wf.nodes[0].result["out"][i][0] == res[0] assert wf.nodes[0].result["out"][i][1] == res[1] @@ -1093,8 +1016,7 @@ def test_workflow_11(plugin, change_dir): }, 19)] key_sort = list(expected_C[0][0].keys()) expected_C.sort(key=lambda t: [t[0][key] for key in key_sort]) - wf.nodes[2].result["out"].sort( - key=lambda t: [t[0][key] for key in key_sort]) + wf.nodes[2].result["out"].sort(key=lambda t: [t[0][key] for key in key_sort]) for i, res in enumerate(expected_C): assert wf.nodes[2].result["out"][i][0] == res[0] assert wf.nodes[2].result["out"][i][1] == res[1] @@ -1175,8 +1097,7 @@ def test_workflow_12a(plugin, change_dir): # wf_out can't be used twice with pytest.raises(Exception) as exinfo: sub.run() - assert str( - exinfo.value) == "the key wf_out is already used in workflow.result" + assert str(exinfo.value) == "the key wf_out is already used in workflow.result" # tests for a workflow that have its own input and mapper @@ -1202,15 +1123,7 @@ def test_workflow_13(plugin, change_dir): sub.close() assert wf.is_complete - expected = [({ - "wf13.wfa": 3 - }, [({ - "NA.a": 3 - }, 5)]), ({ - 'wf13.wfa': 5 - }, [({ - "NA.a": 5 - }, 7)])] + expected = [({"wf13.wfa": 3}, [({"NA.a": 3}, 5)]), ({'wf13.wfa': 5}, [({"NA.a": 5}, 7)])] for i, res in enumerate(expected): assert wf.result["NA_out"][i][0] == res[0] assert wf.result["NA_out"][i][1][0][0] == res[1][0][0] @@ -1229,11 +1142,7 @@ def test_workflow_13a(plugin, change_dir): wf_output_names=[("NA", "out", "NA_out")]) interf_addvar = FunctionInterface(fun_addvar, ["out"]) na = Node( - name="NA", - interface=interf_addvar, - workingdir="na", - mapper="b", - inputs={"b": [10, 20]}) + name="NA", interface=interf_addvar, workingdir="na", mapper="b", inputs={"b": [10, 20]}) wf.add(na) wf.connect_wf_input("wfa", "NA", "a") @@ -1250,16 +1159,15 @@ def test_workflow_13a(plugin, change_dir): }, 13), ({ "NA.a": 3, "NA.b": 20 - }, 23)]), - ({ - 'wf13a.wfa': 5 - }, [({ - "NA.a": 5, - "NA.b": 10 - }, 15), ({ - "NA.a": 5, - "NA.b": 20 - }, 25)])] + }, 23)]), ({ + 'wf13a.wfa': 5 + }, [({ + "NA.a": 5, + "NA.b": 10 + }, 15), ({ + "NA.a": 5, + "NA.b": 20 + }, 25)])] for i, res in enumerate(expected): assert wf.result["NA_out"][i][0] == res[0] for j in range(len(res[1])): @@ -1285,15 +1193,7 @@ def test_workflow_13c(plugin, change_dir): sub.close() assert wf.is_complete - expected = [({ - "wf13c.wfa": 3 - }, [({ - "NA.a": 3 - }, 5)]), ({ - 'wf13c.wfa': 5 - }, [({ - "NA.a": 5 - }, 7)])] + expected = [({"wf13c.wfa": 3}, [({"NA.a": 3}, 5)]), ({'wf13c.wfa': 5}, [({"NA.a": 5}, 7)])] for i, res in enumerate(expected): assert wf.result["NA_out"][i][0] == res[0] assert wf.result["NA_out"][i][1][0][0] == res[1][0][0] @@ -1318,15 +1218,7 @@ def test_workflow_13b(plugin, change_dir): sub.close() assert wf.is_complete - expected = [({ - "wf13b.wfa": 3 - }, [({ - "NA.a": 3 - }, 5)]), ({ - 'wf13b.wfa': 5 - }, [({ - "NA.a": 5 - }, 7)])] + expected = [({"wf13b.wfa": 3}, [({"NA.a": 3}, 5)]), ({'wf13b.wfa': 5}, [({"NA.a": 5}, 7)])] for i, res in enumerate(expected): assert wf.result["NA_out"][i][0] == res[0] assert wf.result["NA_out"][i][1][0][0] == res[1][0][0] @@ -1341,12 +1233,8 @@ def test_workflow_13b(plugin, change_dir): def test_workflow_14(plugin, change_dir): """workflow with a workflow as a node (no mapper)""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - na = Node( - name="NA", interface=interf_addtwo, workingdir="na", inputs={"a": 3}) - wfa = Workflow( - name="wfa", - workingdir="test_wfa", - wf_output_names=[("NA", "out", "NA_out")]) + na = Node(name="NA", interface=interf_addtwo, workingdir="na", inputs={"a": 3}) + wfa = Workflow(name="wfa", workingdir="test_wfa", wf_output_names=[("NA", "out", "NA_out")]) wfa.add(na) wf = Workflow( @@ -1403,10 +1291,7 @@ def test_workflow_14b(plugin, change_dir): """workflow with a workflow as a node (no mapper, using connect_wf_input in wfa and wf)""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node(name="NA", interface=interf_addtwo, workingdir="na") - wfa = Workflow( - name="wfa", - workingdir="test_wfa", - wf_output_names=[("NA", "out", "NA_out")]) + wfa = Workflow(name="wfa", workingdir="test_wfa", wf_output_names=[("NA", "out", "NA_out")]) wfa.add(na) wfa.connect_wf_input("a", "NA", "a") @@ -1435,15 +1320,8 @@ def test_workflow_15(plugin, change_dir): """workflow with a workflow as a node with mapper (like 14 but with a mapper)""" interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) na = Node( - name="NA", - interface=interf_addtwo, - workingdir="na", - inputs={"a": [3, 5]}, - mapper="a") - wfa = Workflow( - name="wfa", - workingdir="test_wfa", - wf_output_names=[("NA", "out", "NA_out")]) + name="NA", interface=interf_addtwo, workingdir="na", inputs={"a": [3, 5]}, mapper="a") + wfa = Workflow(name="wfa", workingdir="test_wfa", wf_output_names=[("NA", "out", "NA_out")]) wfa.add(na) wf = Workflow( @@ -1472,8 +1350,7 @@ def test_workflow_16(plugin, change_dir): workingdir="test_wf16_{}".format(plugin), wf_output_names=[("wfb", "NB_out"), ("NA", "out", "NA_out")]) interf_addtwo = FunctionInterface(fun_addtwo, ["out"]) - na = Node( - name="NA", interface=interf_addtwo, workingdir="na", inputs={"a": 3}) + na = Node(name="NA", interface=interf_addtwo, workingdir="na", inputs={"a": 3}) wf.add(na) # the second node does not have explicit mapper (but keeps the mapper from the NA node) @@ -1565,8 +1442,7 @@ def test_workflow_16a(plugin, change_dir): @pytest.mark.skipif( - not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), - reason="adding data") + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_node_1(change_dir, plugin): @@ -1591,8 +1467,7 @@ def test_current_node_1(change_dir, plugin): @pytest.mark.skipif( - not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), - reason="adding data") + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_node_2(change_dir, plugin): @@ -1622,8 +1497,7 @@ def test_current_node_2(change_dir, plugin): @pytest.mark.skipif( - not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), - reason="adding data") + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_wf_1(change_dir, plugin): @@ -1656,8 +1530,7 @@ def test_current_wf_1(change_dir, plugin): @pytest.mark.skipif( - not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), - reason="adding data") + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_wf_1a(change_dir, plugin): @@ -1690,8 +1563,7 @@ def test_current_wf_1a(change_dir, plugin): @pytest.mark.skipif( - not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), - reason="adding data") + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_wf_1b(change_dir, plugin): @@ -1722,8 +1594,7 @@ def test_current_wf_1b(change_dir, plugin): @pytest.mark.skipif( - not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), - reason="adding data") + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_wf_1c(change_dir, plugin): @@ -1753,8 +1624,7 @@ def test_current_wf_1c(change_dir, plugin): @pytest.mark.skipif( - not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), - reason="adding data") + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_wf_2(change_dir, plugin): @@ -1793,8 +1663,7 @@ def test_current_wf_2(change_dir, plugin): @pytest.mark.skipif( - not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), - reason="adding data") + not os.path.exists("/Users/dorota/nipype_workshop/data/ds000114"), reason="adding data") @pytest.mark.parametrize("plugin", Plugins) @python35_only def test_current_wf_2a(change_dir, plugin): diff --git a/pydra/engine/tests/test_newnode_neuro.py b/pydra/engine/tests/test_newnode_neuro.py index 6f9e5e5543..2043bf5fb1 100644 --- a/pydra/engine/tests/test_newnode_neuro.py +++ b/pydra/engine/tests/test_newnode_neuro.py @@ -73,8 +73,7 @@ def test_neuro(change_dir, plugin): inputs=Inputs, workingdir="test_neuro_{}".format(plugin), write_state=False, - wf_output_names=[("sampler", "out_file", "sampler_out"), - ("targets", "out", "target_out")]) + wf_output_names=[("sampler", "out_file", "sampler_out"), ("targets", "out", "target_out")]) # @interface # def select_target(subject_id, space): From 1f3ccdedd867b23ad7d7645e40e7a405c35c042c Mon Sep 17 00:00:00 2001 From: Jakub Kaczmarzyk Date: Wed, 10 Oct 2018 14:07:38 -0400 Subject: [PATCH 07/12] revert to 4a81e61 (undo yapf-ification) --- pydra/_version.py | 127 ++++++++++++++++++++-------------------------- 1 file changed, 54 insertions(+), 73 deletions(-) diff --git a/pydra/_version.py b/pydra/_version.py index 46d5511949..081b10712d 100644 --- a/pydra/_version.py +++ b/pydra/_version.py @@ -1,3 +1,4 @@ + # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -6,6 +7,7 @@ # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) + """Git implementation of _version.py.""" import errno @@ -56,18 +58,17 @@ class NotThisMethod(Exception): def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f - return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, + env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None @@ -75,12 +76,10 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env= try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen( - [c] + args, - cwd=cwd, - env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr else None)) + p = subprocess.Popen([c] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None)) break except EnvironmentError: e = sys.exc_info()[1] @@ -92,7 +91,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env= return None, None else: if verbose: - print("unable to find command, tried %s" % (commands, )) + print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: @@ -117,20 +116,16 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): - return { - "version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, - "error": None, - "date": None - } + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: - print("Tried directories %s but none started with prefix %s" % (str(rootdirs), - parentdir_prefix)) + print("Tried directories %s but none started with prefix %s" % + (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @@ -206,23 +201,16 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) - return { - "version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, - "error": None, - "date": date - } + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None, + "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return { - "version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, - "error": "no suitable tags", - "date": None - } + return {"version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") @@ -237,7 +225,8 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -245,11 +234,10 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command( - GITS, - ["describe", "--tags", "--dirty", "--always", "--long", "--match", - "%s*" % tag_prefix], - cwd=root) + describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", + "--always", "--long", + "--match", "%s*" % tag_prefix], + cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") @@ -281,7 +269,8 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) + pieces["error"] = ("unable to parse git-describe output: '%s'" + % describe_out) return pieces # tag @@ -290,7 +279,8 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" + % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] @@ -303,11 +293,13 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], + cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], + cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -338,7 +330,8 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered @@ -452,13 +445,11 @@ def render_git_describe_long(pieces): def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: - return { - "version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None - } + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None} if not style or style == "default": style = "pep440" # the default @@ -478,13 +469,9 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return { - "version": rendered, - "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], - "error": None, - "date": pieces.get("date") - } + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None, + "date": pieces.get("date")} def get_versions(): @@ -498,7 +485,8 @@ def get_versions(): verbose = cfg.verbose try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, + verbose) except NotThisMethod: pass @@ -510,13 +498,10 @@ def get_versions(): for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: - return { - "version": "0+unknown", - "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None - } + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) @@ -530,10 +515,6 @@ def get_versions(): except NotThisMethod: pass - return { - "version": "0+unknown", - "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", - "date": None - } + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", "date": None} From 4cdc244ecca5f55cfe640330e1e2042b563014ad Mon Sep 17 00:00:00 2001 From: Jakub Kaczmarzyk Date: Wed, 10 Oct 2018 14:50:29 -0400 Subject: [PATCH 08/12] add flake8 config --- .flake8 | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .flake8 diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000000..9230811575 --- /dev/null +++ b/.flake8 @@ -0,0 +1,4 @@ +[flake8] +doctests = True +exclude = **/__init__.py +max-line-length=99 From 18a77d624754907ad21a14843f8d49e76fbece7c Mon Sep 17 00:00:00 2001 From: Jakub Kaczmarzyk Date: Wed, 10 Oct 2018 14:52:30 -0400 Subject: [PATCH 09/12] ignore dist and build --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 290e53fd68..53d1f2b762 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ pydra.egg-info +build +dist __pycache__ *.pyc From d9c3d7bf1965a883b9a0c1cbc63d592623fcd4aa Mon Sep 17 00:00:00 2001 From: Jakub Kaczmarzyk Date: Wed, 10 Oct 2018 14:53:14 -0400 Subject: [PATCH 10/12] add makefile --- Makefile | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 Makefile diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..bbc1623961 --- /dev/null +++ b/Makefile @@ -0,0 +1,25 @@ +install: + python setup.py install + +dist: clean + python setup.py sdist bdist_wheel + +clean-pyc: + find . -name '*.pyc' -type f -exec rm {} + + find . -name '*.pyo' -type f -exec rm {} + + find . -name '__pycache__' -type d -exec rm --recursive {} + + +clean-build: + rm --recursive --force build/ + rm --recursive --force dist/ + +clean: clean-pyc clean-build + +format: + yapf --parallel --in-place --recursive --exclude 'pydra/_version.py' pydra + +lint: + flake8 + +test: clean-pyc + py.test -vs -n auto --cov pydra --cov-config .coveragerc --cov-report xml:cov.xml --doctest-modules pydra From ddac536a7d9c5d68bc95268a40c0e999d0a53f34 Mon Sep 17 00:00:00 2001 From: Jakub Kaczmarzyk Date: Wed, 10 Oct 2018 15:18:48 -0400 Subject: [PATCH 11/12] move flake8 config to setup.cfg --- .flake8 | 4 ---- setup.cfg | 5 +++++ 2 files changed, 5 insertions(+), 4 deletions(-) delete mode 100644 .flake8 diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 9230811575..0000000000 --- a/.flake8 +++ /dev/null @@ -1,4 +0,0 @@ -[flake8] -doctests = True -exclude = **/__init__.py -max-line-length=99 diff --git a/setup.cfg b/setup.cfg index c865f3e421..2924475c85 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,3 +8,8 @@ versionfile_source = pydra/_version.py versionfile_build = pydra/_version.py tag_prefix = parentdir_prefix = + +[flake8] +doctests = True +exclude = **/__init__.py +max-line-length=99 From 70e189bb9ae661fae75d256083b85a7612a9bfb5 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 10 Oct 2018 15:20:25 -0400 Subject: [PATCH 12/12] MNT: Ignore editor temporary files --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 53d1f2b762..3ed8cd175f 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,6 @@ __pycache__ .coverage* !.coveragerc cov.xml + +.*.swp +*~