Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refurbish eval_func() #3963

Merged
merged 7 commits into from Dec 30, 2019
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
24 changes: 3 additions & 21 deletions datalad/cmdline/main.py
Expand Up @@ -175,30 +175,12 @@ def setup_parser(
of the command; 'continue' works like 'ignore', but an error causes a
non-zero exit code; 'stop' halts on first failure and yields non-zero exit
code. A failure is any result with status 'impossible' or 'error'.""")
parser.add_argument(
'--proc-pre', dest='common_proc_pre',
nargs='+',
action='append',
metavar=('<PROCEDURE NAME>', 'ARGS'),
help="""Dataset procedure to run before the main command (see run-procedure
command for details). This option can be given more than once to run
multiple procedures in the order in which they were given.
It is important to specify the target dataset via the --dataset argument
of the main command."""),
parser.add_argument(
'--proc-post', dest='common_proc_post',
nargs='+',
action='append',
metavar=('<PROCEDURE NAME>', 'ARGS'),
help="""Like --proc-pre, but procedures are executed after the main command
has finished."""),
parser.add_argument(
'--cmd', dest='_', action='store_true',
help="""syntactical helper that can be used to end the list of global
command line options before the subcommand label. Options like
--proc-pre can take an arbitrary number of arguments and may require
to be followed by a single --cmd in order to enable identification
of the subcommand.""")
command line options before the subcommand label. Options taking
an arbitrary number of arguments may require to be followed by a single
--cmd in order to enable identification of the subcommand.""")

# yoh: atm we only dump to console. Might adopt the same separation later on
# and for consistency will call it --verbose-level as well for now
Expand Down
18 changes: 0 additions & 18 deletions datalad/core/local/tests/test_create.py
Expand Up @@ -385,24 +385,6 @@ def test_saving_prior(topdir):
assert_in('ds2', ds1.subdatasets(result_xfm='relpaths'))


@with_tempfile(mkdir=True)
def test_create_withprocedure(path):
# first without
ds = create(path)
assert(not op.lexists(op.join(ds.path, 'README.rst')))
ds.remove()
assert(not op.lexists(ds.path))
# now for reals...
ds = create(
# needs to identify the dataset, otherwise post-proc
# procedure doesn't know what to run on
dataset=path,
proc_post=[['cfg_metadatatypes', 'xmp', 'datacite']])
assert_repo_status(path)
ds.config.reload()
eq_(ds.config['datalad.metadata.nativetype'], ('xmp', 'datacite'))


@with_tempfile(mkdir=True)
def test_create_withcfg(path):
ds = create(
Expand Down
11 changes: 7 additions & 4 deletions datalad/core/local/tests/test_resulthooks.py
Expand Up @@ -31,6 +31,8 @@ def test_basics(src, dst):
(ds.pathobj / 'file1').write_text('some')
ds.save()
sub = ds.create('subds')
# second one for a result_xfm test below
ds.create('subds2')
eq_(sub.config.get('datalad.metadata.nativetype'), None)

# now clone the super
Expand All @@ -39,8 +41,6 @@ def test_basics(src, dst):
# on install to have 'bids' listed as a metadata type
clone.config.set(
'datalad.result-hook.alwaysbids.call-json',
# the spec is like --proc-post/pre, but has the dataset to run on as
# the first element
# string substitutions based on the result record are supported
'run_procedure {{"dataset":"{path}","spec":"cfg_metadatatypes bids"}}',
where='local',
Expand Down Expand Up @@ -88,11 +88,14 @@ def test_basics(src, dst):
'{"type":["in", ["file"]],"action":"get","status":"notneeded"}',
where='local',
)
# TODO resetting of detached HEAD seem to come after the install result
# and wipes out the change
# setup done, now see if it works
clone.get('subds')
clone_sub = Dataset(clone.pathobj / 'subds')
eq_(clone_sub.config.get('datalad.metadata.nativetype'), 'bids')
# now the same thing with a result_xfm, should make no difference
clone.get('subds2')
clone_sub2 = Dataset(clone.pathobj / 'subds2')
eq_(clone_sub2.config.get('datalad.metadata.nativetype'), 'bids')

# hook auto-unlocks the file
if not on_windows:
Expand Down
4 changes: 1 addition & 3 deletions datalad/interface/base.py
Expand Up @@ -681,7 +681,7 @@ def call_from_parser(cls, args):
# XXX define or better get from elsewhere
common_opts = ('change_path', 'common_debug', 'common_idebug', 'func',
'help', 'log_level', 'logger', 'pbs_runner',
'result_renderer', 'proc_pre', 'proc_post', 'subparser')
'result_renderer', 'subparser')
argnames = [name for name in dir(args)
if not (name.startswith('_') or name in common_opts)]
kwargs = {k: getattr(args, k) for k in argnames if is_api_arg(k)}
Expand Down Expand Up @@ -712,8 +712,6 @@ def call_from_parser(cls, args):
# eval_results can't distinguish between --report-{status,type}
# not specified via the CLI and None passed via the Python API.
kwargs['result_filter'] = res_filter
kwargs['proc_pre'] = args.common_proc_pre
kwargs['proc_post'] = args.common_proc_post
try:
ret = cls.__call__(**kwargs)
if inspect.isgenerator(ret):
Expand Down
11 changes: 0 additions & 11 deletions datalad/interface/common_opts.py
Expand Up @@ -308,15 +308,6 @@
that carries the result dictionaries of the failures in its `failed`
attribute.""",
constraints=EnsureChoice('ignore', 'continue', 'stop')),
proc_pre=Parameter(
doc="""DataLad procedure to run prior to the main command. The argument
a list of lists with procedure names and optional arguments.
Procedures are called in the order their are given in this list.
It is important to provide the respective target dataset to run a procedure
on as the `dataset` argument of the main command."""),
proc_post=Parameter(
doc="""Like `proc_pre`, but procedures are executed after the main command
has finished."""),
)

eval_defaults = dict(
Expand All @@ -325,6 +316,4 @@
result_renderer=None,
result_xfm=None,
on_failure='continue',
proc_pre=None,
proc_post=None,
)
20 changes: 0 additions & 20 deletions datalad/interface/run_procedure.py
Expand Up @@ -275,26 +275,6 @@ class RunProcedure(Interface):
- 'datalad.procedures.<NAME>.help'
will be shown on `datalad run-procedure --help-proc NAME` to provide a
description and/or usage info for procedure NAME

*Customize other commands with procedures*

On execution of any commands, DataLad inspects two additional
configuration settings:

- 'datalad.<name>.proc-pre'

- 'datalad.<name>.proc-post'

where '<name>' is the name of a DataLad command. Using this mechanism
DataLad can be instructed to run one or more procedures before or
after the execution of a given command. For example, configuring
a set of metadata types in any newly created dataset can be achieved
via:

% datalad -c 'datalad.create.proc-post=cfg_metadatatypes xmp image' create -d myds

As procedures run on datasets, it is necessary to explicitly identify
the target dataset via the -d (--dataset) option.
"""
_params_ = dict(
spec=Parameter(
Expand Down
4 changes: 1 addition & 3 deletions datalad/interface/tests/test_base.py
Expand Up @@ -43,8 +43,6 @@ def _new_args(**kwargs):
common_on_failure=None, # ['ignore', 'continue', 'stop']
common_report_status=None, # ['all', 'success', 'failure', 'ok', 'notneeded', 'impossible', 'error']
common_report_type=None, # ['dataset', 'file']
common_proc_pre=None,
common_proc_post=None,
),
kwargs
)
Expand Down Expand Up @@ -151,4 +149,4 @@ def test_nagen():
def test_nadict():
d = nadict({1: 2})
eq_(d[1], 2)
eq_(str(d[2]), NA_STRING)
eq_(str(d[2]), NA_STRING)
56 changes: 0 additions & 56 deletions datalad/interface/tests/test_run_procedure.py
Expand Up @@ -70,57 +70,6 @@ def test_dirty(path):
assert_repo_status(ds.path)


@known_failure_windows #FIXME
@with_tree(tree={
'code': {'datalad_test_proc.py': """\
import sys
import os.path as op
from datalad.api import save, Dataset

with open(op.join(sys.argv[1], 'fromproc.txt'), 'w') as f:
f.write('hello\\n')
save(dataset=Dataset(sys.argv[1]), path='fromproc.txt')
"""}})
@with_tempfile
def test_basics(path, super_path):
ds = Dataset(path).create(force=True)
ds.run_procedure('cfg_yoda')
assert_false(ds.repo.is_under_annex("README.md"))
# save the procedure
ds.save('code')
# configure dataset to look for procedures in its code folder
ds.config.add(
'datalad.locations.dataset-procedures',
'code',
where='dataset')
# commit this procedure config for later use in a clone:
ds.save(op.join('.datalad', 'config'))
# configure dataset to run the demo procedure prior to the clean command
ds.config.add(
'datalad.clean.proc-pre',
'datalad_test_proc',
where='local')
# run command that should trigger the demo procedure
ds.clean()
# look for traces
ok_file_has_content(op.join(ds.path, 'fromproc.txt'), 'hello\n')

# make a fresh dataset:
super = Dataset(super_path).create()
# configure dataset to run the demo procedure prior to the clean command
super.config.add(
'datalad.clean.proc-pre',
'datalad_test_proc',
where='local')
# 'super' doesn't know any procedures but should get to know one by
# installing the above as a subdataset
super.install('sub', source=ds.path)
# run command that should trigger the demo procedure
super.clean()
# look for traces
ok_file_has_content(op.join(super.path, 'fromproc.txt'), 'hello\n')


@skip_if(cond=on_windows and cfg.obtain("datalad.repo.version") < 6)
@with_tree(tree={
'code': {'datalad_test_proc.py': """\
Expand Down Expand Up @@ -160,11 +109,6 @@ def test_procedure_discovery(path, super_path):
'datalad.locations.dataset-procedures',
'code',
where='dataset')
# configure dataset to run the demo procedure prior to the clean command
ds.config.add(
'datalad.clean.proc-pre',
'datalad_test_proc',
where='dataset')
ds.save(op.join('.datalad', 'config'))

# run discovery on the dataset:
Expand Down