diff --git a/.gitignore b/.gitignore index 6cbbdfe..88d5232 100644 --- a/.gitignore +++ b/.gitignore @@ -2,9 +2,12 @@ *.pyo *.pyc __pycache__ -/artifacts -/dist -/.tox -/.eggs +/artifacts/ +/artifacts-*/ +/build/ +/dist/ +/.tox/ +/.eggs/ .cache *.egg-info +tests.retry diff --git a/.travis.yml b/.travis.yml index b96e202..aca37ec 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,4 +7,4 @@ install: - docker build -t taskotron . script: - - docker run -v $(pwd):$(pwd) -w $(pwd) -i -t taskotron + - docker run --cap-add=SYS_ADMIN -v $(pwd):$(pwd) -w $(pwd) -i -t taskotron diff --git a/Dockerfile b/Dockerfile index 3ff97c4..22ee8aa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,8 @@ FROM fedora RUN dnf -y install --setopt=install_weak_deps=false --setopt=tsflags=nodocs \ - --setopt=deltarpm=false python2-rpm libtaskotron-core libtaskotron-fedora \ - python3-rpm tox python2 python3 python2-dnf python3-dnf \ - python2-libarchive-c python-bugzilla && dnf clean all + --setopt=deltarpm=false python2-rpm python3-rpm tox python2-dnf \ + python3-dnf mock && dnf clean all ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 diff --git a/README.rst b/README.rst index 6fe4d58..1989c41 100644 --- a/README.rst +++ b/README.rst @@ -25,46 +25,51 @@ Currently the following checks are available: Running ------- -You can run the checks locally with -`Taskotron `__. First, -install it (you can -follow the -`Quickstart `__). -You'll also need the ``rpm``, ``dnf`` and ``libarchive-c`` Python 2 modules -(``python2-rpm``, ``python2-dnf``, ``python2-libarchive-c``). -Note that Taskotron unfortunately runs on Python 2, but the code in -this repository is Python 3 compatible as well. +To run this task locally, execute the following command as root (don't do this +on a production machine!):: -Once everything is installed you can run the task on a Koji build -using the -``name-(epoch:)version-release`` (``nevr``) identifier. + $ ansible-playbook tests.yml -e taskotron_item= -.. code:: console +where ``nevr`` is a Koji build ``name-(epoch:)version-release`` identifier. - $ runtask -i -t koji_build runtask.yml +For example:: -For example: + $ ansible-playbook tests.yml -e taskotron_item=python-gear-0.11.0-1.fc27 -.. code:: console +You can see the results in ``./artifacts/`` directory. - $ runtask -i eric-6.1.6-2.fc25 -t koji_build runtask.yml +You can also run the above in mock:: + + $ mock -r ./mock.cfg --init + $ mock -r ./mock.cfg --copyin taskotron_python_versions *.py tests.yml / + $ mock -r ./mock.cfg --shell 'ansible-playbook tests.yml -e taskotron_item=python-gear-0.11.0-1.fc27' + $ mock -r ./mock.cfg --copyout artifacts artifacts Tests ----- -There are also automatic tests available. You can run them using -`tox `__. -You'll need the above mentioned dependencies and ``python3-rpm`` -and ``python3-dnf`` installed as well. - -.. code:: console +This task is covered with functional and integration tests. +You can run them using `tox `__, but +you will need ``mock``, ``python3-rpm`` and ``python3-dnf`` installed. +For mock configuration see +`mock setup `__ +instructions. Use the following command to run the test suite:: $ tox -Automatic tests also happen on `Tarvis +The integration tests may take a while to execute, as they are +running real tasks in mock. However, for development you may +speed them up by reusing the results of the previous test run. +This is useful if you modify the test itself, without changing the +implementation of task checks. Use the following command to run +integration tests in a fake mode:: + + $ tox -e integration -- --fake + +The tests are also being executed on `Travis CI `__. Since Travis CI runs on Ubuntu -and Ubuntu lacks the RPM Python bindings and Taskotron, +and Ubuntu lacks the RPM Python bindings and mock, `Docker `__ is used to run the tests on Fedora. You can run the tests in Docker as well, just use the commands from the ``.travis.yml`` file. diff --git a/download_rpms.py b/download_rpms.py new file mode 100644 index 0000000..ed71102 --- /dev/null +++ b/download_rpms.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- + +'''Download correct NVRs for python-versions to operate on.''' + +import sys +import logging +from libtaskotron.directives import koji_directive + + +def download_rpms(koji_build, rpmsdir, arch=['x86_64'], arch_exclude=[], + src=True, debuginfo=False, build_log=True): + '''Download RPMs for a koji build NVR.''' + + koji = koji_directive.KojiDirective() + + print('Downloading rpms for %s into %s' % (koji_build, rpmsdir)) + params = { + 'action': 'download', + 'koji_build': koji_build, + 'arch': arch, + 'arch_exclude': arch_exclude, + 'src': src, + 'debuginfo': debuginfo, + 'target_dir': rpmsdir, + 'build_log': build_log, + } + arg_data = {'workdir': None} + koji.process(params, arg_data) + + print('Downloading complete') + + +if __name__ == '__main__': + print('Running script: %s' % sys.argv) + logging.basicConfig() + logging.getLogger('libtaskotron').setLevel(logging.DEBUG) + args = {} + + # arch is supposed to be a comma delimited string, but optional + arches = sys.argv[3] if len(sys.argv) >= 4 else '' + arches = [arch.strip() for arch in arches.split(',')] + if arches: + print('Requested arches: %s' % arches) + args['arch'] = arches + + download_rpms(koji_build=sys.argv[1], + rpmsdir=sys.argv[2], + **args + ) diff --git a/mock.cfg b/mock.cfg new file mode 100644 index 0000000..e9fc604 --- /dev/null +++ b/mock.cfg @@ -0,0 +1,7 @@ +include('/etc/mock/fedora-27-x86_64.cfg') + +config_opts['chroot_setup_cmd'] = 'install ansible dnf' +config_opts['use_host_resolv'] = True +config_opts['rpmbuild_networking'] = True +config_opts['use_nspawn'] = False +config_opts['root'] = 'fedora-27-x86_64-taskotron' diff --git a/python_versions_check.py b/python_versions_check.py index 97dc424..70ab96b 100644 --- a/python_versions_check.py +++ b/python_versions_check.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + import logging if __name__ == '__main__': # Set up logging ASAP to see potential problems during import. @@ -25,9 +27,12 @@ from taskotron_python_versions.common import log, Package, PackageException -def run(koji_build, workdir='.', artifactsdir='artifacts'): +def run(koji_build, workdir='.', artifactsdir='artifacts', + testcase='dist.python-versions'): '''The main method to run from Taskotron''' workdir = os.path.abspath(workdir) + results_path = os.path.join(artifactsdir, 'taskotron', 'results.yml') + artifact = os.path.join(artifactsdir, 'output.log') # find files to run on files = sorted(os.listdir(workdir)) @@ -57,8 +62,6 @@ def run(koji_build, workdir='.', artifactsdir='artifacts'): if not logs: log.warn('No build.log found, that should not happen') - artifact = os.path.join(artifactsdir, 'output.log') - # put all the details form subtask in this list details = [] details.append(task_two_three(packages, koji_build, artifact)) @@ -71,26 +74,39 @@ def run(koji_build, workdir='.', artifactsdir='artifacts'): srpm_packages + packages, koji_build, artifact)) details.append(task_python_usage(logs, koji_build, artifact)) + # update testcase for all subtasks (use their existing testcase as a + # suffix) + for detail in details: + detail.checkname = '{}.{}'.format(testcase, detail.checkname) + # finally, the main detail with overall results outcome = 'PASSED' for detail in details: if detail.outcome == 'FAILED': outcome = 'FAILED' break - - details.append(check.CheckDetail(checkname='python-versions', - item=koji_build, - report_type=check.ReportType.KOJI_BUILD, - outcome=outcome)) + overall_detail = check.CheckDetail(checkname=testcase, + item=koji_build, + report_type=check.ReportType.KOJI_BUILD, + outcome=outcome) if outcome == 'FAILED': - details[-1].artifact = artifact + overall_detail.artifact = artifact + details.append(overall_detail) summary = 'python-versions {} for {}.'.format(outcome, koji_build) log.info(summary) + # generate output reportable to ResultsDB output = check.export_YAML(details) - return output + with open(results_path, 'w') as results_file: + results_file.write(output) + + return 0 if overall_detail.outcome in ['PASSED', 'INFO'] else 1 if __name__ == '__main__': - run('test') + rc = run(koji_build=sys.argv[1], + workdir=sys.argv[2], + artifactsdir=sys.argv[3], + testcase=sys.argv[4]) + sys.exit(rc) diff --git a/runtask.yml b/runtask.yml deleted file mode 100644 index 08dc378..0000000 --- a/runtask.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: python-versions -namespace: dist -desc: | - Download specified Koji build and check each RPM for Python dependencies, - if a binary RPM depends on Python 2 and 3 at the same time, - it usually indicates some packaging errors, except for rare whitelisted cases. -maintainer: churchyard - -input: - args: - - koji_build - -environment: - rpm: - - rpm-python - - python2-dnf - - python2-libarchive-c - - python-bugzilla - -actions: - - name: download rpms from koji - koji: - action: download - koji_build: ${koji_build} - arch: ['all'] - src: True - build_log: True - - - name: check each rpm for python dependencies - python: - file: python_versions_check.py - callable: run - workdir: ${workdir} - koji_build: ${koji_build} - artifactsdir: ${artifactsdir} - export: python_versions_output - - - name: report results to resultsdb - resultsdb: - results: ${python_versions_output} diff --git a/taskotron_python_versions/executables.py b/taskotron_python_versions/executables.py index 6698536..1e8736c 100644 --- a/taskotron_python_versions/executables.py +++ b/taskotron_python_versions/executables.py @@ -90,7 +90,7 @@ def task_executables(packages, koji_build, artifact): package, '\n * '.join(sorted(bins))) detail = check.CheckDetail( - checkname='python-versions.executables', + checkname='executables', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -99,7 +99,7 @@ def task_executables(packages, koji_build, artifact): detail.artifact = artifact write_to_artifact(artifact, MESSAGE.format(message), INFO_URL) - log.info('python-versions.executables {} for {}'.format( + log.info('subcheck executables {} for {}'.format( outcome, koji_build)) return detail diff --git a/taskotron_python_versions/naming_scheme.py b/taskotron_python_versions/naming_scheme.py index b9b260c..b3429f4 100644 --- a/taskotron_python_versions/naming_scheme.py +++ b/taskotron_python_versions/naming_scheme.py @@ -91,7 +91,7 @@ def task_naming_scheme(packages, koji_build, artifact): package.filename)) detail = check.CheckDetail( - checkname='python-versions.naming_scheme', + checkname='naming_scheme', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -104,7 +104,7 @@ def task_naming_scheme(packages, koji_build, artifact): else: problems = 'No problems found.' - summary = 'python-versions.naming_scheme {} for {}. {}'.format( + summary = 'subcheck naming_scheme {} for {}. {}'.format( outcome, koji_build, problems) log.info(summary) diff --git a/taskotron_python_versions/py3_support.py b/taskotron_python_versions/py3_support.py index 73bba2a..de82edd 100644 --- a/taskotron_python_versions/py3_support.py +++ b/taskotron_python_versions/py3_support.py @@ -109,7 +109,7 @@ def task_py3_support(packages, koji_build, artifact): ' upstream, skipping Py3 support check') detail = check.CheckDetail( - checkname='python-versions.py3_support', + checkname='py3_support', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -118,7 +118,7 @@ def task_py3_support(packages, koji_build, artifact): detail.artifact = artifact write_to_artifact(artifact, MESSAGE.format(message), INFO_URL) - log.info('python-versions.py3_support {} for {}'.format( + log.info('subcheck py3_support {} for {}'.format( outcome, koji_build)) return detail diff --git a/taskotron_python_versions/python_usage.py b/taskotron_python_versions/python_usage.py index 049e417..ecfb955 100644 --- a/taskotron_python_versions/python_usage.py +++ b/taskotron_python_versions/python_usage.py @@ -61,7 +61,7 @@ def task_python_usage(logs, koji_build, artifact): outcome = 'FAILED' detail = check.CheckDetail( - checkname='python-versions.python_usage', + checkname='python_usage', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -74,7 +74,7 @@ def task_python_usage(logs, koji_build, artifact): else: problems = 'No problems found.' - summary = 'python-versions.python_usage {} for {}. {}'.format( + summary = 'subcheck python_usage {} for {}. {}'.format( outcome, koji_build, problems) log.info(summary) diff --git a/taskotron_python_versions/requires.py b/taskotron_python_versions/requires.py index 2a6c5d1..6966b32 100644 --- a/taskotron_python_versions/requires.py +++ b/taskotron_python_versions/requires.py @@ -162,7 +162,7 @@ def task_requires_naming_scheme(packages, koji_build, artifact): message_rpms += message detail = check.CheckDetail( - checkname='python-versions.requires_naming_scheme', + checkname='requires_naming_scheme', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -174,7 +174,7 @@ def task_requires_naming_scheme(packages, koji_build, artifact): else: problems = 'No problems found.' - summary = 'python-versions.requires_naming_scheme {} for {}. {}'.format( + summary = 'subcheck requires_naming_scheme {} for {}. {}'.format( outcome, koji_build, problems) log.info(summary) diff --git a/taskotron_python_versions/two_three.py b/taskotron_python_versions/two_three.py index e1c2332..512b3de 100644 --- a/taskotron_python_versions/two_three.py +++ b/taskotron_python_versions/two_three.py @@ -123,7 +123,7 @@ def task_two_three(packages, koji_build, artifact): outcome = 'FAILED' bads[package.filename] = py_versions - detail = check.CheckDetail(checkname='python-versions.two_three', + detail = check.CheckDetail(checkname='two_three', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -143,7 +143,7 @@ def task_two_three(packages, koji_build, artifact): else: problems = 'No problems found.' - summary = 'python-versions.two_three {} for {}. {}'.format( + summary = 'subcheck two_three {} for {}. {}'.format( outcome, koji_build, problems) log.info(summary) diff --git a/taskotron_python_versions/unversioned_shebangs.py b/taskotron_python_versions/unversioned_shebangs.py index 6ad0d1d..1ca5eef 100644 --- a/taskotron_python_versions/unversioned_shebangs.py +++ b/taskotron_python_versions/unversioned_shebangs.py @@ -94,7 +94,7 @@ def task_unversioned_shebangs(packages, koji_build, artifact): package, shebang, '\n '.join(sorted(scripts))) detail = check.CheckDetail( - checkname='python-versions.unversioned_shebangs', + checkname='unversioned_shebangs', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -105,7 +105,7 @@ def task_unversioned_shebangs(packages, koji_build, artifact): else: shebang_message = 'No problems found.' - log.info('python-versions.unversioned_shebangs {} for {}. {}'.format( + log.info('subcheck unversioned_shebangs {} for {}. {}'.format( outcome, koji_build, shebang_message)) return detail diff --git a/test/integration/conftest.py b/test/integration/conftest.py new file mode 100644 index 0000000..7afeabb --- /dev/null +++ b/test/integration/conftest.py @@ -0,0 +1,4 @@ +def pytest_addoption(parser): + parser.addoption('--fake', action='store_true', default=False, + help='don\'t run the code, reuse the result from ' + 'last tests') diff --git a/test/integration/test_integration.py b/test/integration/test_integration.py index 8bf90fa..53bff1a 100644 --- a/test/integration/test_integration.py +++ b/test/integration/test_integration.py @@ -1,4 +1,7 @@ from collections import namedtuple +import contextlib +import glob +import shutil import subprocess import sys from textwrap import dedent @@ -11,54 +14,98 @@ Result = namedtuple('Result', ['outcome', 'artifact', 'item']) -def parse_results(log): - ''' - From the given stdout log, parse the results - ''' - start = 'results:' - results = [] - record = False +class MockEnv: + '''Use this to work with mock. Mutliple instances are not safe.''' + mock = ['mock', '-r', './mock.cfg'] + + def __init__(self): + self._run(['--init'], check=True) + + def _run(self, what, **kwargs): + return subprocess.run(self.mock + what, **kwargs) + + def copy_in(self, files): + self._run(['--copyin'] + files + ['/'], check=True) + + def copy_out(self, directory, target, *, clean_target=False): + if clean_target: + with contextlib.suppress(FileNotFoundError): + shutil.rmtree(target) + self._run(['--copyout', directory, target], check=True) + + def shell(self, command): + cp = self._run(['--shell', command]) + return cp.returncode + + def orphanskill(self): + self._run(['--orphanskill']) - for line in log.splitlines(): - if line.strip() == start: - record = True - if record: - results.append(line) - if not line: - break - if not results: - raise RuntimeError('Could not parse output') - return yaml.load('\n'.join(results))['results'] +class FakeMockEnv(MockEnv): + '''Use this to fake the mock interactions''' + mock = ['echo', 'mock'] + def copy_out(self, directory, target, *, clean_target=False): + '''Fake it, never clean target''' + return super().copy_out(directory, target, clean_target=False) -def run_task(nevr, *, reterr=False): + +@pytest.fixture(scope="session") +def mock(request): + '''Setup a mock we can run Ansible tasks in under root''' + if request.config.getoption('--fake'): + mockenv = FakeMockEnv() + else: + mockenv = MockEnv() + files = ['taskotron_python_versions'] + glob.glob('*.py') + ['tests.yml'] + mockenv.copy_in(files) + yield mockenv + mockenv.orphanskill() + + +def parse_results(path): + ''' + From the given result file, parse the results ''' - Run the task on a Koji build. + with open(path) as f: + return yaml.load(f)['results'] + + +def run_task(nevr, *, mock): + ''' + Run the task on a Koji build in given mock. Returns a dict with Results (outcome, artifact, item) - If reterr is true, returns a tuple with the above and captured stderr - If reterr is false, prints the stderr + Actually returns a tuple with the above and captured log ''' - proc = subprocess.Popen( - ['runtask', '-i', nevr, '-t', 'koji_build', 'runtask.yml'], - stderr=subprocess.PIPE, - universal_newlines=True, - ) - _, err = proc.communicate() - if proc.returncode != 0: - print(err, file=sys.stderr) # always print stderr in this case - raise RuntimeError('runtask exited with {}'.format(proc.returncode)) - results = parse_results(err) + exit_code = mock.shell('ansible-playbook tests.yml ' + '-e taskotron_item={}'.format(nevr)) + artifacts = 'artifacts-{}'.format(nevr) + mock.copy_out('artifacts', artifacts, clean_target=True) + mock.shell('rm artifacts -rf') # purge the logs + + with open(artifacts + '/test.log') as f: + log = f.read() + + # 0 for PASSED + # 2 for FAILED + if exit_code not in (0, 2): + print(log, file=sys.stderr) + raise RuntimeError('mock shell ended with {}'.format(exit_code)) + + results = parse_results(artifacts + '/taskotron/results.yml') + + # we need to preserve the artifacts for each nevr separately + # but the saved path is just ./artifacts/... + def fix_artifact_path(path): + if path is None: + return None + return path.replace('/artifacts/', '/{}/'.format(artifacts)) ret = {r['checkname']: Result(r.get('outcome'), - r.get('artifact'), + fix_artifact_path(r.get('artifact')), r.get('item')) for r in results} - if reterr: - return ret, err - - print(err, file=sys.stderr) - return ret + return ret, log def fixtures_factory(nevr): @@ -69,8 +116,8 @@ def fixtures_factory(nevr): See examples bellow.''' if not nevr.startswith('_'): @pytest.fixture(scope="session") - def _results(): - return run_task(nevr, reterr=True) + def _results(mock): + return run_task(nevr, mock=mock) return _results @@ -141,17 +188,17 @@ def test_number_of_results(results, request): 'bucky')) def test_two_three_passed(results, request): results = request.getfixturevalue(results) - assert results['python-versions.two_three'].outcome == 'PASSED' + assert results['dist.python-versions.two_three'].outcome == 'PASSED' def test_two_three_failed(tracer): - assert tracer['python-versions.two_three'].outcome == 'FAILED' + assert tracer['dist.python-versions.two_three'].outcome == 'FAILED' @pytest.mark.parametrize('results', ('tracer', 'copr', 'admesh')) def test_one_failed_result_is_total_failed(results, request): results = request.getfixturevalue(results) - assert results['python-versions'].outcome == 'FAILED' + assert results['dist.python-versions'].outcome == 'FAILED' @pytest.mark.parametrize(('results', 'task'), @@ -160,12 +207,12 @@ def test_one_failed_result_is_total_failed(results, request): ('admesh', 'requires_naming_scheme'))) def test_artifact_is_the_same(results, task, request): results = request.getfixturevalue(results) - assert (results['python-versions'].artifact == - results['python-versions.' + task].artifact) + assert (results['dist.python-versions'].artifact == + results['dist.python-versions.' + task].artifact) def test_artifact_contains_two_three_and_looks_as_expected(tracer): - result = tracer['python-versions.two_three'] + result = tracer['dist.python-versions.two_three'] with open(result.artifact) as f: artifact = f.read() @@ -180,17 +227,17 @@ def test_artifact_contains_two_three_and_looks_as_expected(tracer): @pytest.mark.parametrize('results', ('eric', 'epub', 'twine', 'vdirsyncer')) def test_naming_scheme_passed(results, request): results = request.getfixturevalue(results) - assert results['python-versions.naming_scheme'].outcome == 'PASSED' + assert results['dist.python-versions.naming_scheme'].outcome == 'PASSED' @pytest.mark.parametrize('results', ('copr', 'six', 'admesh', 'bucky')) def test_naming_scheme_failed(results, request): results = request.getfixturevalue(results) - assert results['python-versions.naming_scheme'].outcome == 'FAILED' + assert results['dist.python-versions.naming_scheme'].outcome == 'FAILED' def test_artifact_contains_naming_scheme_and_looks_as_expected(copr): - result = copr['python-versions.naming_scheme'] + result = copr['dist.python-versions.naming_scheme'] with open(result.artifact) as f: artifact = f.read() @@ -203,20 +250,20 @@ def test_artifact_contains_naming_scheme_and_looks_as_expected(copr): @pytest.mark.parametrize('results', ('eric', 'twine', 'six')) def test_requires_naming_scheme_passed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.requires_naming_scheme'] + task_result = results['dist.python-versions.requires_naming_scheme'] assert task_result.outcome == 'PASSED' @pytest.mark.parametrize('results', ('admesh', 'copr')) def test_requires_naming_scheme_failed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.requires_naming_scheme'] + task_result = results['dist.python-versions.requires_naming_scheme'] assert task_result.outcome == 'FAILED' def test_artifact_contains_requires_naming_scheme_and_looks_as_expected( tracer): - result = tracer['python-versions.requires_naming_scheme'] + result = tracer['dist.python-versions.requires_naming_scheme'] with open(result.artifact) as f: artifact = f.read() @@ -238,7 +285,7 @@ def test_artifact_contains_requires_naming_scheme_and_looks_as_expected( def test_requires_naming_scheme_contains_python(yum): - result = yum['python-versions.requires_naming_scheme'] + result = yum['dist.python-versions.requires_naming_scheme'] with open(result.artifact) as f: artifact = f.read() @@ -251,20 +298,20 @@ def test_requires_naming_scheme_contains_python(yum): 'copr', 'epub', 'twine', 'bucky')) def test_executables_passed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.executables'] + task_result = results['dist.python-versions.executables'] assert task_result.outcome == 'PASSED' @pytest.mark.parametrize('results', ('docutils',)) def test_executables_failed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.executables'] + task_result = results['dist.python-versions.executables'] assert task_result.outcome == 'FAILED' def test_artifact_contains_executables_and_looks_as_expected( docutils): - result = docutils['python-versions.executables'] + result = docutils['dist.python-versions.executables'] with open(result.artifact) as f: artifact = f.read() @@ -297,18 +344,20 @@ def test_artifact_contains_executables_and_looks_as_expected( 'epub', 'twine', 'nodejs')) def test_unvesioned_shebangs_passed(results, request): results = request.getfixturevalue(results) - assert results['python-versions.unversioned_shebangs'].outcome == 'PASSED' + result = results['dist.python-versions.unversioned_shebangs'] + assert result.outcome == 'PASSED' @pytest.mark.parametrize('results', ('yum', 'tracer', 'bucky')) def test_unvesioned_shebangs_failed(results, request): results = request.getfixturevalue(results) - assert results['python-versions.unversioned_shebangs'].outcome == 'FAILED' + result = results['dist.python-versions.unversioned_shebangs'] + assert result.outcome == 'FAILED' def test_artifact_contains_unversioned_shebangs_and_looks_as_expected( tracer): - result = tracer['python-versions.unversioned_shebangs'] + result = tracer['dist.python-versions.unversioned_shebangs'] with open(result.artifact) as f: artifact = f.read() @@ -328,14 +377,14 @@ def test_artifact_contains_unversioned_shebangs_and_looks_as_expected( 'copr', 'epub', 'twine', 'docutils')) def test_py3_support_passed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.py3_support'] + task_result = results['dist.python-versions.py3_support'] assert task_result.outcome == 'PASSED' @pytest.mark.parametrize('results', ('bucky',)) def test_py3_support_failed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.py3_support'] + task_result = results['dist.python-versions.py3_support'] assert task_result.outcome == 'FAILED' @@ -347,7 +396,7 @@ def test_artifact_contains_py3_support_and_looks_as_expected( gets ported to Python 3 and its Bugzilla gets closed. See https://bugzilla.redhat.com/show_bug.cgi?id=1367012 """ - result = bucky['python-versions.py3_support'] + result = bucky['dist.python-versions.py3_support'] with open(result.artifact) as f: artifact = f.read() @@ -367,19 +416,19 @@ def test_artifact_contains_py3_support_and_looks_as_expected( 'copr', 'epub', 'twine', 'docutils')) def test_python_usage_passed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.python_usage'] + task_result = results['dist.python-versions.python_usage'] assert task_result.outcome == 'PASSED' @pytest.mark.parametrize('results', ('jsonrpc',)) def test_python_usage_failed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.python_usage'] + task_result = results['dist.python-versions.python_usage'] assert task_result.outcome == 'FAILED' def test_artifact_contains_python_usage_and_looks_as_expected(jsonrpc): - result = jsonrpc['python-versions.python_usage'] + result = jsonrpc['dist.python-versions.python_usage'] with open(result.artifact) as f: artifact = f.read() @@ -388,7 +437,7 @@ def test_artifact_contains_python_usage_and_looks_as_expected(jsonrpc): assert dedent(""" You've used /usr/bin/python during build on the following arches: - jsonrpc-glib-3.27.4-1.fc28: armv7hl, i686, x86_64 + jsonrpc-glib-3.27.4-1.fc28: x86_64 Use /usr/bin/python3 or /usr/bin/python2 explicitly. /usr/bin/python will be removed or switched to Python 3 in the future. diff --git a/tests.yml b/tests.yml new file mode 100644 index 0000000..dcef3da --- /dev/null +++ b/tests.yml @@ -0,0 +1,66 @@ +# Run python-versions on {{ taskotron_item }} NVR build + +- hosts: localhost + remote_user: root + vars: + testcase: dist.python-versions + taskotron_generic_task: true + # below are fallback vars for local testing + artifacts: ./artifacts + taskotron_item: python-gear-0.11.0-1.fc27 # you should really override at least this :) + taskotron_supported_arches: + - x86_64 + tasks: + - name: Install required packages + dnf: + name: "{{ item }}" + state: latest + with_items: + - rpm-python + - python2-dnf + - python2-libarchive-c + - python-bugzilla + - libtaskotron-core + - libtaskotron-fedora + + - name: Make sure taskotron results dir exists + # this is for placing results.yml file + file: + path: "{{ artifacts }}/taskotron" + state: directory + + - name: Create work dir + tempfile: + path: /var/tmp + state: directory + prefix: task-{{ testcase }}_ + register: workdir + + - name: Print work dir + debug: + var: workdir.path + + - name: Compute architectures to download + set_fact: + download_arches: "{{ taskotron_supported_arches | join(',') }}" + + - name: Print architectures to download + debug: + var: download_arches + + - block: + - name: Download RPMs from Koji + shell: > + python2 download_rpms.py {{ taskotron_item }} {{ workdir.path }} + {{ download_arches }} + &> {{ artifacts }}/test.log + + - name: Run task + shell: > + python2 python_versions_check.py {{ taskotron_item }} {{ workdir.path }} + {{ artifacts }} {{ testcase }} + &>> {{ artifacts }}/test.log + always: + - name: Print results location + debug: + msg: 'You can see task results at: {{ artifacts | realpath }}' diff --git a/tox.ini b/tox.ini index 5a1213a..e463d0c 100644 --- a/tox.ini +++ b/tox.ini @@ -7,6 +7,7 @@ deps = pytest libarchive-c bugzilla + requests commands = python -m pytest -v {posargs} test/functional sitepackages = True