Skip to content

Commit

Permalink
Merge pull request #27 from yarikoptic/rf-changes-from-datalad
Browse files Browse the repository at this point in the history
Rf changes from datalad
  • Loading branch information
mih committed Aug 21, 2015
2 parents 8a4c942 + 24e25e8 commit 77060c5
Show file tree
Hide file tree
Showing 6 changed files with 113 additions and 75 deletions.
14 changes: 5 additions & 9 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,12 @@ env:
- PYTHON=python PYSUF=''
# - PYTHON=python3 PYSUF=3 : python3-numpy not currently available
install:
- sudo rmdir /dev/shm
- sudo ln -Tsf /{run,dev}/shm
- sudo apt-get update -qq
- sudo apt-get install lsb-release
- source /etc/lsb-release
- echo ${DISTRIB_CODENAME}
- wget -O- http://neuro.debian.net/lists/${DISTRIB_CODENAME}.de-md.full | sudo tee /etc/apt/sources.list.d/neurodebian.sources.list
- sudo apt-key adv --recv-keys --keyserver pgp.mit.edu 2649A5A9
- sudo apt-get update -qq
# to prevent IPv6 being used for APT
- sudo bash -c "echo 'Acquire::ForceIPv4 \"true\";' > /etc/apt/apt.conf.d/99force-ipv4"
# The ultimate one-liner setup for NeuroDebian repository
- bash <(wget -q -O- http://neuro.debian.net/_files/neurodebian-travis.sh)
#- sudo apt-get install $PYTHON-dev
- sudo pip install six
- sudo apt-get install -qq $PYTHON-numpy
- sudo apt-get install -qq $PYTHON-scipy
- sudo apt-get install -qq $PYTHON-nibabel
Expand Down
16 changes: 5 additions & 11 deletions testkraut/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,11 @@ def __call__(mcs, sid, instance, *args):
mcs._instances[sid] = instance
return mcs._instances[sid]

class __Singleton:
"""To ensure single instance of a class instantiation (object)
"""

__metaclass__ = _SingletonType
def __init__(self, *args):
pass
# Provided __call__ just to make silly pylint happy
def __call__(self):
raise NotImplementedError
# Awkward way to define Python2 and Python3 compatible use of metaclasses
# Ref: https://wiki.python.org/moin/PortingToPy3k/BilingualQuickRef#metaclasses
__Singleton = _SingletonType(str('__Singleton'), (), {
'__doc__': "To ensure single instance of a class instantiation (object)"
})

#
# As the very first step: Setup configuration registry instance and
Expand Down
26 changes: 14 additions & 12 deletions testkraut/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@

__docformat__ = 'restructuredtext'

from ConfigParser import SafeConfigParser
from six.moves.configparser import SafeConfigParser
from six import iteritems

import os.path
from os.path import join as opj

Expand Down Expand Up @@ -92,9 +94,9 @@ def __init__(self, filenames=None):
self.__cfg_filenames = []

# set critical defaults
for sec, vars in ConfigManager._DEFAULTS.iteritems():
for sec, vars in iteritems(ConfigManager._DEFAULTS):
self.add_section(sec)
for key, value in vars.iteritems():
for key, value in iteritems(vars):
self.set(sec, key, value)

# now get the setting
Expand Down Expand Up @@ -191,11 +193,11 @@ def get(self, section, option, default=None, **kwargs):

try:
return SafeConfigParser.get(self, section, option, **kwargs)
except ValueError, e:
except ValueError as e:
# provide somewhat descriptive error
raise ValueError, \
"Failed to obtain value from configuration for %s.%s. " \
"Original exception was: %s" % (section, option, e)
raise ValueError(
"Failed to obtain value from configuration for %s.%s. "
"Original exception was: %s" % (section, option, e))


def getboolean(self, section, option, default=None):
Expand All @@ -215,7 +217,7 @@ def getboolean(self, section, option, default=None):
else:
boolean_states = self.BOOLEAN_STATES
if default.lower() not in boolean_states:
raise ValueError, 'Not a boolean: %s' % default
raise ValueError('Not a boolean: %s' % default)
return boolean_states[default.lower()]

return SafeConfigParser.getboolean(self, section, option)
Expand All @@ -235,8 +237,8 @@ def get_as_dtype(self, section, option, dtype, default=None):
return default
try:
return SafeConfigParser._get(self, section, dtype, option)
except ValueError, e:
except ValueError as e:
# provide somewhat descriptive error
raise ValueError, \
"Failed to obtain value from configuration for %s.%s. " \
"Original exception was: %s" % (section, option, e)
raise ValueError(
"Failed to obtain value from configuration for %s.%s. "
"Original exception was: %s" % (section, option, e))
24 changes: 17 additions & 7 deletions testkraut/spec.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@
import difflib
from uuid import uuid1 as uuid

from six import string_types, iteritems
from six.moves import xrange

__allowed_spec_keys__ = [
'assertions',
'authors',
Expand All @@ -38,9 +41,14 @@ def _raise(exception, why, input=None):

def _verify_tags(struct, tags, name):
for tag in tags:
if not tag in struct:
_raise(ValueError,
"mandatory key '%s' is not in %s" % (tag, name))
if isinstance(tag, set):
if not tag.intersection(struct):
_raise(ValueError,
"at least one of the keys %s must be in %s" % (tag, name))
else:
if not tag in struct:
_raise(ValueError,
"mandatory key '%s' is not in %s" % (tag, name))

def _verify_spec_tags(specs, tags, name):
for i, os_id in enumerate(specs):
Expand All @@ -67,7 +75,7 @@ def __init__(self, src=None):
dict.__init__(self)
if isinstance(src, file):
self.update(json.load(src))
elif isinstance(src, basestring):
elif isinstance(src, string_types):
self.update(json.loads(src))
elif isinstance(src, dict):
self.update(src)
Expand All @@ -79,8 +87,10 @@ def __init__(self, src=None):
self._check()

def _check(self):
# Late import to prevent circular imports
from .testcase import __spec_matchers__
_verify_tags(self, ('id', 'version', 'tests'), 'SPEC')
_verify_spec_tags(self.get('outputs', {}), ('type', 'value'),
_verify_spec_tags(self.get('outputs', {}), ('type', set(__spec_matchers__.keys())),
'outputs')
_verify_spec_tags(self.get('inputs', {}), ('type', 'value'),
'inputs')
Expand Down Expand Up @@ -111,7 +121,7 @@ def save(self, filename, minimize=False):
spec_file = open(filename, 'w')
if minimize:
# don't write empty containers
towrite = dict([(k, v) for k, v in self.iteritems()
towrite = dict([(k, v) for k, v in iteritems(self)
if not (isSequenceType(v) or isMappingType(v)) \
or len(v)])
else:
Expand Down Expand Up @@ -194,7 +204,7 @@ def diff(fr, to, recursive_list=False, min_abs_numdiff=None,
return dtree
else:
return None
elif isinstance(fr, basestring):
elif isinstance(fr, string_types):
# any string
if not fr == to:
return {'ndiff': difflib.ndiff(('%s\n' % fr).splitlines(True),
Expand Down
71 changes: 51 additions & 20 deletions testkraut/testcase.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,30 @@
from json import dumps as jds
from functools import wraps

from six import string_types, iteritems, text_type

import logging
lgr = logging.getLogger(__name__)

from testtools import TestCase, RunTest
from testtools.content import Content, text_content
from testtools.content_type import ContentType, UTF8_TEXT
from testtools import matchers as tm
from testtools.matchers import Equals, Annotate, FileExists, Contains, DirExists
from testtools.matchers import Equals, Annotate, FileExists, Contains, DirExists, \
MatchesRegex, StartsWith, EndsWith
# To be added whenever testtools gets upgraded (1.5.0 has those already exposed)
#DoesNotEndWith, DoesNotStartWith

__spec_matchers__ = {
'value': Equals,
'contains': Contains,
'matches': MatchesRegex,
'startswith': StartsWith,
'endswith': EndsWith,
# 'doesnotstartwith': DoesNotStartWith,
# 'doesnotendwith': DoesNotEndWith,
}

import testtools.matchers as tt_matchers
from . import matchers as tk_matchers

Expand Down Expand Up @@ -81,7 +97,7 @@ def __new__(cls, name, bases, attr):
if hasattr(attr[method_name], "template"):
source = attr[method_name]
source_name = method_name.lstrip("_")
for test_name, args in source.template.items():
for test_name, args in list(source.template.items()):
parg, kwargs = args
new_name = "test_%s" % test_name
new_methods[new_name] = _method_partial(source, *parg, **kwargs)
Expand Down Expand Up @@ -122,12 +138,15 @@ def discover_specs(paths=None):
# we actually found a new one
lgr.debug("discovered test SPEC '%s'" % spec_id)
discovered[spec_id] = spec_fname
except Exception, e:
# TODO: provide configuration variable allowing to avoid this
# swallow-everything catcher to troubleshoot problems in the code
# inside
except Exception as e:
# not a valid SPEC
lgr.warning("ignoring '%s': no a valid SPEC file: %s (%s)"
% (spec_fname, str(e), e.__class__.__name__))
# wrap spec file locations in TestArgs
return dict([(k, TestArgs(v)) for k, v in discovered.iteritems()])
return dict([(k, TestArgs(v)) for k, v in iteritems(discovered)])


class TestFromSPEC(TestCase):
Expand Down Expand Up @@ -279,12 +298,12 @@ def _execute_python_test(self, testid, testspec):
sys.stderr = capture_stderr = StringIO()
try:
if 'code' in testspec:
exec testspec['code'] in {}, {}
exec(testspec['code'], {}, {})
elif 'file' in testspec:
execfile(testspec['file'], {}, {})
else:
raise ValueError("no test code found")
except Exception, e:
except Exception as e:
execinfo['exception'] = dict(type=e.__class__.__name__,
info=str(e))
if not 'shouldfail' in testspec or testspec['shouldfail'] == False:
Expand Down Expand Up @@ -333,7 +352,7 @@ def _execute_shell_test(self, testid, testspec):
texec.returncode,
Annotate("test shell command '%s' yielded non-zero exit code" % cmd,
Equals(0)))
except OSError, e:
except OSError as e:
lgr.error("%s: %s" % (e.__class__.__name__, str(e)))
if not 'shouldfail' in testspec or testspec['shouldfail'] == False:
self.assertThat(e,
Expand Down Expand Up @@ -364,7 +383,7 @@ def _execute_nipype_test(self, testid, testspec):
locals = dict()
try:
execfile(testwffilepath, dict(), locals)
except Exception, e:
except Exception as e:
lgr.error("%s: %s" % (e.__class__.__name__, str(e)))
self.assertThat(e,
Annotate("test workflow setup failed: %s (%s)"
Expand All @@ -388,7 +407,7 @@ def _execute_nipype_test(self, testid, testspec):
sys.stderr = capture_stderr = StringIO()
try:
exec_graph = workflow.run()
except Exception, e:
except Exception as e:
execinfo['exception'] = dict(type=e.__class__.__name__,
info=str(e))
if not 'shouldfail' in testspec or testspec['shouldfail'] == False:
Expand Down Expand Up @@ -435,10 +454,22 @@ def _check_output_presence(self, spec):
elif ospectype == 'string' and ospec_id.startswith('tests'):
execinfo = self._details['exec_info']
sec, idx, field = ospec_id.split('::')
self.assertThat(
execinfo[idx][field],
Annotate("unexpected output for '%s'" % ospec_id,
Equals(ospec['value'])))
for f, matcher in iteritems(__spec_matchers__):
if f in ospec:
# allow for multiple target values (given a matcher) being
# specified. For some matchers it might make no sense
# (e.g. "endswith")
targets = ospec[f]
for target in (targets if isinstance(targets, list) else [targets]):
target = text_type.replace(target, "<NEWLINE>", os.linesep)
# TODO: This replacement may be should be done elsewhere
# to have a general solution. It's now affecting string-type only.
# Additionally, "<NEWLINE>" may appear in some output intentionally,
# so let's find sth closer to be 'unique'.
self.assertThat(
execinfo[idx][field],
Annotate("unexpected output for '%s'" % ospec_id,
matcher(target)))
else:
raise NotImplementedError(
"dunno how to handle output type '%s' yet"
Expand All @@ -447,7 +478,7 @@ def _check_output_presence(self, spec):

def _compute_metrics(self, spec, info):
metricspecs = spec.get('metrics', {})
for mid, mspec in metricspecs.iteritems():
for mid, mspec in iteritems(metricspecs):
metric = mspec.get('metric', None)
if metric is None:
lgr.warning("broken metric spec '%s': no metric given" % mid)
Expand All @@ -472,7 +503,7 @@ def _compute_metrics(self, spec, info):

def _check_assertions(self, spec, metric_info):
specs = spec.get('assertions', {})
for aid, aspec in specs.iteritems():
for aid, aspec in iteritems(specs):
lgr.debug("check assertion '%s'" % aid)
# preconditions
self.assertThat(aspec, Contains('value'))
Expand All @@ -497,7 +528,7 @@ def _check_assertions(self, spec, metric_info):
assertion = matcher(
**dict(
zip([(k, _resolve_metric_value(v, metric_info))
for k, v in args.iteritems()])))
for k, v in iteritems(args)])))
else:
assertion = matcher(_resolve_metric_value(args, metric_info))
# value to match
Expand Down Expand Up @@ -547,7 +578,7 @@ def _get_system_info(self):
return TestFromSPEC._system_info

def _verify_dependencies(self, spec):
for dep_id, depspec in spec.get('dependencies', {}).iteritems():
for dep_id, depspec in iteritems(spec.get('dependencies', {})):
if not 'type' in depspec or not 'location' in depspec:
raise ValueError("dependency SPEC '%s' contains no 'type' or no 'location' field"
% dep_id)
Expand Down Expand Up @@ -577,7 +608,7 @@ def _prepare_environment(self, spec):
# unset if null
if env in os.environ:
del os.environ[env]
elif isinstance(env_spec[env], basestring):
elif isinstance(env_spec[env], string_types):
# set if string
# set the new one
os.environ[env] = str(env_spec[env])
Expand All @@ -599,7 +630,7 @@ def _prepare_environment(self, spec):
def _restore_environment(self):
if self._environ_restore is None:
return
for env, val in self._environ_restore.iteritems():
for env, val in iteritems(self._environ_restore):
if val is None:
if env in os.environ:
del os.environ[env]
Expand All @@ -614,7 +645,7 @@ def _get_dep_info(self):
default=False):
return
spec = self._cur_spec
for dep_id, depspec in spec.get('dependencies', {}).iteritems():
for dep_id, depspec in iteritems(spec.get('dependencies', {})):
if not 'type' in depspec or not 'location' in depspec:
raise ValueError("dependency SPEC '%s' contains no 'type' or no 'location' field"
% dep_id)
Expand Down

0 comments on commit 77060c5

Please sign in to comment.