From b405c1f57991a577f67dd80315687778d73ed491 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kamil=20P=C3=A1ral?= Date: Fri, 12 Jan 2018 13:56:17 +0100 Subject: [PATCH 01/20] convert to ansiblized Taskotron (generic) task integration tests are disabled for the moment --- README.rst | 26 ++++------- python_versions_check.py | 42 ++++++++++++----- runtask.yml | 40 ----------------- taskotron_python_versions/executables.py | 4 +- taskotron_python_versions/naming_scheme.py | 4 +- taskotron_python_versions/py3_support.py | 4 +- taskotron_python_versions/python_usage.py | 4 +- taskotron_python_versions/requires.py | 4 +- taskotron_python_versions/two_three.py | 4 +- .../unversioned_shebangs.py | 4 +- tests.yml | 45 +++++++++++++++++++ tox.ini | 15 ++++--- 12 files changed, 106 insertions(+), 90 deletions(-) mode change 100644 => 100755 python_versions_check.py delete mode 100644 runtask.yml create mode 100644 tests.yml diff --git a/README.rst b/README.rst index 6fe4d58..c04a69a 100644 --- a/README.rst +++ b/README.rst @@ -25,29 +25,19 @@ Currently the following checks are available: Running ------- -You can run the checks locally with -`Taskotron `__. First, -install it (you can -follow the -`Quickstart `__). -You'll also need the ``rpm``, ``dnf`` and ``libarchive-c`` Python 2 modules -(``python2-rpm``, ``python2-dnf``, ``python2-libarchive-c``). -Note that Taskotron unfortunately runs on Python 2, but the code in -this repository is Python 3 compatible as well. - -Once everything is installed you can run the task on a Koji build -using the -``name-(epoch:)version-release`` (``nevr``) identifier. +To run this task locally, execute the following command as root (don't do this +on a production machine!):: -.. code:: console + $ ansible-playbook tests.yml -e taskotron_item= - $ runtask -i -t koji_build runtask.yml +where ``nevr`` is a Koji build ``name-(epoch:)version-release`` identifier. -For example: +For example:: -.. code:: console + $ ansible-playbook tests.yml -e taskotron_item=python-gear-0.11.0-1.fc27 + +You can see the results in ``./artifacts/`` directory. - $ runtask -i eric-6.1.6-2.fc25 -t koji_build runtask.yml Tests ----- diff --git a/python_versions_check.py b/python_versions_check.py old mode 100644 new mode 100755 index 97dc424..ec4e2a5 --- a/python_versions_check.py +++ b/python_versions_check.py @@ -1,3 +1,6 @@ +#!/usr/bin/python2 +# -*- coding: utf-8 -*- + import logging if __name__ == '__main__': # Set up logging ASAP to see potential problems during import. @@ -25,9 +28,15 @@ from taskotron_python_versions.common import log, Package, PackageException -def run(koji_build, workdir='.', artifactsdir='artifacts'): +def run(koji_build, workdir='.', artifactsdir='artifacts', + testcase='dist.python-versions'): '''The main method to run from Taskotron''' workdir = os.path.abspath(workdir) + resultsdir = os.path.join(artifactsdir, 'taskotron') + if not os.path.exists(resultsdir): + os.makedirs(resultsdir) + results_path = os.path.join(resultsdir, 'results.yml') + artifact = os.path.join(artifactsdir, 'output.log') # find files to run on files = sorted(os.listdir(workdir)) @@ -57,8 +66,6 @@ def run(koji_build, workdir='.', artifactsdir='artifacts'): if not logs: log.warn('No build.log found, that should not happen') - artifact = os.path.join(artifactsdir, 'output.log') - # put all the details form subtask in this list details = [] details.append(task_two_three(packages, koji_build, artifact)) @@ -71,26 +78,39 @@ def run(koji_build, workdir='.', artifactsdir='artifacts'): srpm_packages + packages, koji_build, artifact)) details.append(task_python_usage(logs, koji_build, artifact)) + # update testcase for all subtasks (use their existing testcase as a + # suffix) + for detail in details: + detail.checkname = '{}.{}'.format(testcase, detail.checkname) + # finally, the main detail with overall results outcome = 'PASSED' for detail in details: if detail.outcome == 'FAILED': outcome = 'FAILED' break - - details.append(check.CheckDetail(checkname='python-versions', - item=koji_build, - report_type=check.ReportType.KOJI_BUILD, - outcome=outcome)) + overall_detail = check.CheckDetail(checkname=testcase, + item=koji_build, + report_type=check.ReportType.KOJI_BUILD, + outcome=outcome) if outcome == 'FAILED': - details[-1].artifact = artifact + overall_detail.artifact = artifact + details.append(overall_detail) summary = 'python-versions {} for {}.'.format(outcome, koji_build) log.info(summary) + # generate output reportable to ResultsDB output = check.export_YAML(details) - return output + with open(results_path, 'w') as results_file: + results_file.write(output) + + return 0 if overall_detail.outcome in ['PASSED', 'INFO'] else 1 if __name__ == '__main__': - run('test') + rc = run(koji_build=sys.argv[1], + workdir=sys.argv[2], + artifactsdir=sys.argv[3], + testcase=sys.argv[4]) + sys.exit(rc) diff --git a/runtask.yml b/runtask.yml deleted file mode 100644 index 08dc378..0000000 --- a/runtask.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: python-versions -namespace: dist -desc: | - Download specified Koji build and check each RPM for Python dependencies, - if a binary RPM depends on Python 2 and 3 at the same time, - it usually indicates some packaging errors, except for rare whitelisted cases. -maintainer: churchyard - -input: - args: - - koji_build - -environment: - rpm: - - rpm-python - - python2-dnf - - python2-libarchive-c - - python-bugzilla - -actions: - - name: download rpms from koji - koji: - action: download - koji_build: ${koji_build} - arch: ['all'] - src: True - build_log: True - - - name: check each rpm for python dependencies - python: - file: python_versions_check.py - callable: run - workdir: ${workdir} - koji_build: ${koji_build} - artifactsdir: ${artifactsdir} - export: python_versions_output - - - name: report results to resultsdb - resultsdb: - results: ${python_versions_output} diff --git a/taskotron_python_versions/executables.py b/taskotron_python_versions/executables.py index 6698536..1e8736c 100644 --- a/taskotron_python_versions/executables.py +++ b/taskotron_python_versions/executables.py @@ -90,7 +90,7 @@ def task_executables(packages, koji_build, artifact): package, '\n * '.join(sorted(bins))) detail = check.CheckDetail( - checkname='python-versions.executables', + checkname='executables', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -99,7 +99,7 @@ def task_executables(packages, koji_build, artifact): detail.artifact = artifact write_to_artifact(artifact, MESSAGE.format(message), INFO_URL) - log.info('python-versions.executables {} for {}'.format( + log.info('subcheck executables {} for {}'.format( outcome, koji_build)) return detail diff --git a/taskotron_python_versions/naming_scheme.py b/taskotron_python_versions/naming_scheme.py index b9b260c..b3429f4 100644 --- a/taskotron_python_versions/naming_scheme.py +++ b/taskotron_python_versions/naming_scheme.py @@ -91,7 +91,7 @@ def task_naming_scheme(packages, koji_build, artifact): package.filename)) detail = check.CheckDetail( - checkname='python-versions.naming_scheme', + checkname='naming_scheme', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -104,7 +104,7 @@ def task_naming_scheme(packages, koji_build, artifact): else: problems = 'No problems found.' - summary = 'python-versions.naming_scheme {} for {}. {}'.format( + summary = 'subcheck naming_scheme {} for {}. {}'.format( outcome, koji_build, problems) log.info(summary) diff --git a/taskotron_python_versions/py3_support.py b/taskotron_python_versions/py3_support.py index 73bba2a..de82edd 100644 --- a/taskotron_python_versions/py3_support.py +++ b/taskotron_python_versions/py3_support.py @@ -109,7 +109,7 @@ def task_py3_support(packages, koji_build, artifact): ' upstream, skipping Py3 support check') detail = check.CheckDetail( - checkname='python-versions.py3_support', + checkname='py3_support', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -118,7 +118,7 @@ def task_py3_support(packages, koji_build, artifact): detail.artifact = artifact write_to_artifact(artifact, MESSAGE.format(message), INFO_URL) - log.info('python-versions.py3_support {} for {}'.format( + log.info('subcheck py3_support {} for {}'.format( outcome, koji_build)) return detail diff --git a/taskotron_python_versions/python_usage.py b/taskotron_python_versions/python_usage.py index 049e417..ecfb955 100644 --- a/taskotron_python_versions/python_usage.py +++ b/taskotron_python_versions/python_usage.py @@ -61,7 +61,7 @@ def task_python_usage(logs, koji_build, artifact): outcome = 'FAILED' detail = check.CheckDetail( - checkname='python-versions.python_usage', + checkname='python_usage', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -74,7 +74,7 @@ def task_python_usage(logs, koji_build, artifact): else: problems = 'No problems found.' - summary = 'python-versions.python_usage {} for {}. {}'.format( + summary = 'subcheck python_usage {} for {}. {}'.format( outcome, koji_build, problems) log.info(summary) diff --git a/taskotron_python_versions/requires.py b/taskotron_python_versions/requires.py index 2a6c5d1..6966b32 100644 --- a/taskotron_python_versions/requires.py +++ b/taskotron_python_versions/requires.py @@ -162,7 +162,7 @@ def task_requires_naming_scheme(packages, koji_build, artifact): message_rpms += message detail = check.CheckDetail( - checkname='python-versions.requires_naming_scheme', + checkname='requires_naming_scheme', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -174,7 +174,7 @@ def task_requires_naming_scheme(packages, koji_build, artifact): else: problems = 'No problems found.' - summary = 'python-versions.requires_naming_scheme {} for {}. {}'.format( + summary = 'subcheck requires_naming_scheme {} for {}. {}'.format( outcome, koji_build, problems) log.info(summary) diff --git a/taskotron_python_versions/two_three.py b/taskotron_python_versions/two_three.py index e1c2332..512b3de 100644 --- a/taskotron_python_versions/two_three.py +++ b/taskotron_python_versions/two_three.py @@ -123,7 +123,7 @@ def task_two_three(packages, koji_build, artifact): outcome = 'FAILED' bads[package.filename] = py_versions - detail = check.CheckDetail(checkname='python-versions.two_three', + detail = check.CheckDetail(checkname='two_three', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -143,7 +143,7 @@ def task_two_three(packages, koji_build, artifact): else: problems = 'No problems found.' - summary = 'python-versions.two_three {} for {}. {}'.format( + summary = 'subcheck two_three {} for {}. {}'.format( outcome, koji_build, problems) log.info(summary) diff --git a/taskotron_python_versions/unversioned_shebangs.py b/taskotron_python_versions/unversioned_shebangs.py index 6ad0d1d..1ca5eef 100644 --- a/taskotron_python_versions/unversioned_shebangs.py +++ b/taskotron_python_versions/unversioned_shebangs.py @@ -94,7 +94,7 @@ def task_unversioned_shebangs(packages, koji_build, artifact): package, shebang, '\n '.join(sorted(scripts))) detail = check.CheckDetail( - checkname='python-versions.unversioned_shebangs', + checkname='unversioned_shebangs', item=koji_build, report_type=check.ReportType.KOJI_BUILD, outcome=outcome) @@ -105,7 +105,7 @@ def task_unversioned_shebangs(packages, koji_build, artifact): else: shebang_message = 'No problems found.' - log.info('python-versions.unversioned_shebangs {} for {}. {}'.format( + log.info('subcheck unversioned_shebangs {} for {}. {}'.format( outcome, koji_build, shebang_message)) return detail diff --git a/tests.yml b/tests.yml new file mode 100644 index 0000000..2784fe8 --- /dev/null +++ b/tests.yml @@ -0,0 +1,45 @@ +--- +- hosts: localhost + remote_user: root + vars: + artifacts: ./artifacts + testcase: dist.python-versions + taskotron_generic_task: true + tasks: + - name: Install required packages + dnf: + name: "{{ item }}" + state: latest + with_items: + - rpm-python + - python2-dnf + - python2-libarchive-c + - python-bugzilla + - koji + - libtaskotron-core + + - name: Make sure artifacts dir exists + # This is not necessary when running through taskotron (since SI mandates + # the dir has to exist), but is useful for local execution of just this + # playbook + file: + path: "{{ artifacts }}" + state: directory + + - name: Create work dir + tempfile: + path: /var/tmp + state: directory + prefix: task-{{ testcase }}_ + register: workdir + + - name: Download RPMs from koji + shell: koji download-build {{ taskotron_item }} + args: + chdir: "{{ workdir.path }}" + + - name: Run task + shell: > + ./python_versions_check.py {{ taskotron_item }} {{ workdir.path }} + {{ artifacts }} {{ testcase }} + > {{ artifacts }}/test.log 2>&1 diff --git a/tox.ini b/tox.ini index 5a1213a..17490f5 100644 --- a/tox.ini +++ b/tox.ini @@ -10,13 +10,14 @@ deps = commands = python -m pytest -v {posargs} test/functional sitepackages = True -[testenv:integration] -deps = - pytest - pyyaml -basepython = python3 -commands = python -m pytest -v {posargs} test/integration -sitepackages = False +;; disabled during transition to ansiblized Taskotron (Standard Interface support) +; [testenv:integration] +; deps = +; pytest +; pyyaml +; basepython = python3 +; commands = python -m pytest -v {posargs} test/integration +; sitepackages = False [testenv:style] deps = flake8 From 640cf7e609d611983c4ce3d224cd9ae953efcc02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kamil=20P=C3=A1ral?= Date: Tue, 16 Jan 2018 15:24:58 +0100 Subject: [PATCH 02/20] create artifacts/taskotron in playbook --- python_versions_check.py | 5 +---- tests.yml | 8 +++----- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/python_versions_check.py b/python_versions_check.py index ec4e2a5..5fc0dca 100755 --- a/python_versions_check.py +++ b/python_versions_check.py @@ -32,10 +32,7 @@ def run(koji_build, workdir='.', artifactsdir='artifacts', testcase='dist.python-versions'): '''The main method to run from Taskotron''' workdir = os.path.abspath(workdir) - resultsdir = os.path.join(artifactsdir, 'taskotron') - if not os.path.exists(resultsdir): - os.makedirs(resultsdir) - results_path = os.path.join(resultsdir, 'results.yml') + results_path = os.path.join(artifactsdir, 'taskotron', 'results.yml') artifact = os.path.join(artifactsdir, 'output.log') # find files to run on diff --git a/tests.yml b/tests.yml index 2784fe8..61a4a39 100644 --- a/tests.yml +++ b/tests.yml @@ -18,12 +18,10 @@ - koji - libtaskotron-core - - name: Make sure artifacts dir exists - # This is not necessary when running through taskotron (since SI mandates - # the dir has to exist), but is useful for local execution of just this - # playbook + - name: Make sure taskotron results dir exists + # this is for placing results.yml file file: - path: "{{ artifacts }}" + path: "{{ artifacts }}/taskotron" state: directory - name: Create work dir From d762ac8809f0345f28db994d0c42f57d930a81c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kamil=20P=C3=A1ral?= Date: Fri, 26 Jan 2018 13:30:24 +0100 Subject: [PATCH 03/20] download only supported architectures --- tests.yml | 49 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 38 insertions(+), 11 deletions(-) diff --git a/tests.yml b/tests.yml index 61a4a39..a25ad03 100644 --- a/tests.yml +++ b/tests.yml @@ -1,10 +1,15 @@ ---- +# Run python-versions on {{ taskotron_item }} NVR build + - hosts: localhost remote_user: root vars: - artifacts: ./artifacts testcase: dist.python-versions taskotron_generic_task: true + # below are fallback vars for local testing + artifacts: ./artifacts + taskotron_item: python-gear-0.11.0-1.fc27 # you should really override at least this :) + taskotron_supported_arches: + - x86_64 tasks: - name: Install required packages dnf: @@ -31,13 +36,35 @@ prefix: task-{{ testcase }}_ register: workdir - - name: Download RPMs from koji - shell: koji download-build {{ taskotron_item }} - args: - chdir: "{{ workdir.path }}" + - name: Print work dir + debug: + var: workdir.path + + - name: Compute architectures to download + set_fact: + download_arches: "{{ taskotron_supported_binary_arches }} + ['noarch', 'src']" + + - name: Print architectures to download + debug: + var: download_arches + + - block: + - name: Download RPMs from koji + # use realpath because when running in local mode, artifacts is a + # relative path and we use chdir + shell: > + koji download-build --arch {{ download_arches | join(' --arch ') }} + {{ taskotron_item }} + &> {{ artifacts | realpath }}/test.log + args: + chdir: "{{ workdir.path }}" - - name: Run task - shell: > - ./python_versions_check.py {{ taskotron_item }} {{ workdir.path }} - {{ artifacts }} {{ testcase }} - > {{ artifacts }}/test.log 2>&1 + - name: Run task + shell: > + ./python_versions_check.py {{ taskotron_item }} {{ workdir.path }} + {{ artifacts }} {{ testcase }} + &>> {{ artifacts }}/test.log + always: + - name: Print results location + debug: + msg: 'You can see task results at: {{ artifacts | realpath }}' From 8df21766a9442b19627b070b257ec81737e920d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kamil=20P=C3=A1ral?= Date: Thu, 1 Feb 2018 14:16:44 +0100 Subject: [PATCH 04/20] allow to download build.log The koji command can't do it itself, use the koji directive from libtaskotron. --- download_rpms.py | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ tests.yml | 16 ++++++---------- 2 files changed, 54 insertions(+), 10 deletions(-) create mode 100755 download_rpms.py diff --git a/download_rpms.py b/download_rpms.py new file mode 100755 index 0000000..b29394c --- /dev/null +++ b/download_rpms.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- + +'''Download correct NVRs for python-versions to operate on.''' + +import sys +import logging +from libtaskotron.directives import koji_directive + +def download_rpms(koji_build, rpmsdir, arch=['x86_64'], arch_exclude=[], + src=True, debuginfo=False, build_log=True): + '''Download RPMs for a koji build NVR.''' + + koji = koji_directive.KojiDirective() + + print 'Downloading rpms for %s into %s' % (koji_build, rpmsdir) + params = {'action': 'download', + 'koji_build': koji_build, + 'arch': arch, + 'arch_exclude': arch_exclude, + 'src': src, + 'debuginfo': debuginfo, + 'target_dir': rpmsdir, + 'build_log': build_log, + } + arg_data = {'workdir': None} + koji.process(params, arg_data) + + print 'Downloading complete' + + +if __name__ == '__main__': + print 'Running script: %s' % sys.argv + logging.basicConfig() + logging.getLogger('libtaskotron').setLevel(logging.DEBUG) + args = {} + + # arch is supposed to be a comma delimited string, but optional + arches = sys.argv[3] if len(sys.argv) >= 4 else '' + arches = [arch.strip() for arch in arches.split(',')] + if arches: + print 'Requested arches: %s' % arches + args['arch'] = arches + + download_rpms(koji_build=sys.argv[1], + rpmsdir=sys.argv[2], + **args + ) diff --git a/tests.yml b/tests.yml index a25ad03..0af3aba 100644 --- a/tests.yml +++ b/tests.yml @@ -20,8 +20,8 @@ - python2-dnf - python2-libarchive-c - python-bugzilla - - koji - libtaskotron-core + - libtaskotron-fedora - name: Make sure taskotron results dir exists # this is for placing results.yml file @@ -42,22 +42,18 @@ - name: Compute architectures to download set_fact: - download_arches: "{{ taskotron_supported_binary_arches }} + ['noarch', 'src']" + download_arches: "{{ taskotron_supported_arches | join(',') }}" - name: Print architectures to download debug: var: download_arches - block: - - name: Download RPMs from koji - # use realpath because when running in local mode, artifacts is a - # relative path and we use chdir + - name: Download RPMs from Koji shell: > - koji download-build --arch {{ download_arches | join(' --arch ') }} - {{ taskotron_item }} - &> {{ artifacts | realpath }}/test.log - args: - chdir: "{{ workdir.path }}" + ./download_rpms.py {{ taskotron_item }} {{ workdir.path }} + {{ download_arches }} + &> {{ artifacts }}/test.log - name: Run task shell: > From 70978fbef08803a930ed871de013cb3644c0ed74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= Date: Thu, 1 Feb 2018 18:39:43 +0100 Subject: [PATCH 05/20] Reenable the integration tests, use mock --- Dockerfile | 2 +- mock.cfg | 5 + test/integration/test_integration.py | 148 +++++++++++++++------------ tox.ini | 15 ++- 4 files changed, 97 insertions(+), 73 deletions(-) create mode 100644 mock.cfg diff --git a/Dockerfile b/Dockerfile index 3ff97c4..c780066 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ FROM fedora RUN dnf -y install --setopt=install_weak_deps=false --setopt=tsflags=nodocs \ --setopt=deltarpm=false python2-rpm libtaskotron-core libtaskotron-fedora \ python3-rpm tox python2 python3 python2-dnf python3-dnf \ - python2-libarchive-c python-bugzilla && dnf clean all + python2-libarchive-c python-bugzilla mock && dnf clean all ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 diff --git a/mock.cfg b/mock.cfg new file mode 100644 index 0000000..b2a40c4 --- /dev/null +++ b/mock.cfg @@ -0,0 +1,5 @@ +include('/etc/mock/fedora-27-x86_64.cfg') + +config_opts['chroot_setup_cmd'] = 'install ansible dnf' +config_opts['use_host_resolv'] = True +config_opts['rpmbuild_networking'] = True diff --git a/test/integration/test_integration.py b/test/integration/test_integration.py index 8bf90fa..4706502 100644 --- a/test/integration/test_integration.py +++ b/test/integration/test_integration.py @@ -1,4 +1,7 @@ from collections import namedtuple +import contextlib +import glob +import shutil import subprocess import sys from textwrap import dedent @@ -11,54 +14,69 @@ Result = namedtuple('Result', ['outcome', 'artifact', 'item']) -def parse_results(log): - ''' - From the given stdout log, parse the results - ''' - start = 'results:' - results = [] - record = False +class MockEnv: + '''Use this to work with mock. Mutliple instances are not safe.''' + + def __init__(self): + self.mock = ['mock', '-r', './mock.cfg'] + self._run(['--init'], check=True) + + def _run(self, what, **kwargs): + return subprocess.run(self.mock + what, **kwargs) + + def copy_in(self, files): + self._run(['--copyin'] + files + ['/'], check=True) + + def copy_out(self, directory, *, clean_target=False): + if clean_target: + with contextlib.suppress(FileNotFoundError): + shutil.rmtree(directory) + self._run(['--copyout', directory, directory], check=True) + + def shell(self, command): + self._run(['--shell', command]) + + def orphanskill(self): + self._run(['--orphanskill']) - for line in log.splitlines(): - if line.strip() == start: - record = True - if record: - results.append(line) - if not line: - break - if not results: - raise RuntimeError('Could not parse output') - return yaml.load('\n'.join(results))['results'] +@pytest.fixture(scope="session") +def mock(): + '''Setup a mock we can run Ansible tasks in under root''' + mockenv = MockEnv() + files = ['taskotron_python_versions'] + glob.glob('*.py') + ['tests.yml'] + mockenv.copy_in(files) + yield mockenv + mockenv.orphanskill() -def run_task(nevr, *, reterr=False): +def parse_results(path): ''' - Run the task on a Koji build. + From the given result file, parse the results + ''' + with open(path) as f: + return yaml.load(f)['results'] + + +def run_task(nevr, *, mock): + ''' + Run the task on a Koji build in given mock. Returns a dict with Results (outcome, artifact, item) - If reterr is true, returns a tuple with the above and captured stderr - If reterr is false, prints the stderr + Actually returns a tuple with the above and captured log ''' - proc = subprocess.Popen( - ['runtask', '-i', nevr, '-t', 'koji_build', 'runtask.yml'], - stderr=subprocess.PIPE, - universal_newlines=True, - ) - _, err = proc.communicate() - if proc.returncode != 0: - print(err, file=sys.stderr) # always print stderr in this case - raise RuntimeError('runtask exited with {}'.format(proc.returncode)) - results = parse_results(err) + mock.shell('ansible-playbook tests.yml -e taskotron_item={}'.format(nevr)) + mock.copy_out('artifacts', clean_target=True) + + with open('artifacts/test.log') as f: + log = f.read() + + results = parse_results('artifacts/taskotron/results.yml') ret = {r['checkname']: Result(r.get('outcome'), r.get('artifact'), r.get('item')) for r in results} - if reterr: - return ret, err - - print(err, file=sys.stderr) - return ret + return ret, log def fixtures_factory(nevr): @@ -69,8 +87,8 @@ def fixtures_factory(nevr): See examples bellow.''' if not nevr.startswith('_'): @pytest.fixture(scope="session") - def _results(): - return run_task(nevr, reterr=True) + def _results(mock): + return run_task(nevr, mock=mock) return _results @@ -141,17 +159,17 @@ def test_number_of_results(results, request): 'bucky')) def test_two_three_passed(results, request): results = request.getfixturevalue(results) - assert results['python-versions.two_three'].outcome == 'PASSED' + assert results['dist.python-versions.two_three'].outcome == 'PASSED' def test_two_three_failed(tracer): - assert tracer['python-versions.two_three'].outcome == 'FAILED' + assert tracer['dist.python-versions.two_three'].outcome == 'FAILED' @pytest.mark.parametrize('results', ('tracer', 'copr', 'admesh')) def test_one_failed_result_is_total_failed(results, request): results = request.getfixturevalue(results) - assert results['python-versions'].outcome == 'FAILED' + assert results['dist.python-versions'].outcome == 'FAILED' @pytest.mark.parametrize(('results', 'task'), @@ -160,12 +178,12 @@ def test_one_failed_result_is_total_failed(results, request): ('admesh', 'requires_naming_scheme'))) def test_artifact_is_the_same(results, task, request): results = request.getfixturevalue(results) - assert (results['python-versions'].artifact == - results['python-versions.' + task].artifact) + assert (results['dist.python-versions'].artifact == + results['dist.python-versions.' + task].artifact) def test_artifact_contains_two_three_and_looks_as_expected(tracer): - result = tracer['python-versions.two_three'] + result = tracer['dist.python-versions.two_three'] with open(result.artifact) as f: artifact = f.read() @@ -180,17 +198,17 @@ def test_artifact_contains_two_three_and_looks_as_expected(tracer): @pytest.mark.parametrize('results', ('eric', 'epub', 'twine', 'vdirsyncer')) def test_naming_scheme_passed(results, request): results = request.getfixturevalue(results) - assert results['python-versions.naming_scheme'].outcome == 'PASSED' + assert results['dist.python-versions.naming_scheme'].outcome == 'PASSED' @pytest.mark.parametrize('results', ('copr', 'six', 'admesh', 'bucky')) def test_naming_scheme_failed(results, request): results = request.getfixturevalue(results) - assert results['python-versions.naming_scheme'].outcome == 'FAILED' + assert results['dist.python-versions.naming_scheme'].outcome == 'FAILED' def test_artifact_contains_naming_scheme_and_looks_as_expected(copr): - result = copr['python-versions.naming_scheme'] + result = copr['dist.python-versions.naming_scheme'] with open(result.artifact) as f: artifact = f.read() @@ -203,20 +221,20 @@ def test_artifact_contains_naming_scheme_and_looks_as_expected(copr): @pytest.mark.parametrize('results', ('eric', 'twine', 'six')) def test_requires_naming_scheme_passed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.requires_naming_scheme'] + task_result = results['dist.python-versions.requires_naming_scheme'] assert task_result.outcome == 'PASSED' @pytest.mark.parametrize('results', ('admesh', 'copr')) def test_requires_naming_scheme_failed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.requires_naming_scheme'] + task_result = results['dist.python-versions.requires_naming_scheme'] assert task_result.outcome == 'FAILED' def test_artifact_contains_requires_naming_scheme_and_looks_as_expected( tracer): - result = tracer['python-versions.requires_naming_scheme'] + result = tracer['dist.python-versions.requires_naming_scheme'] with open(result.artifact) as f: artifact = f.read() @@ -238,7 +256,7 @@ def test_artifact_contains_requires_naming_scheme_and_looks_as_expected( def test_requires_naming_scheme_contains_python(yum): - result = yum['python-versions.requires_naming_scheme'] + result = yum['dist.python-versions.requires_naming_scheme'] with open(result.artifact) as f: artifact = f.read() @@ -251,20 +269,20 @@ def test_requires_naming_scheme_contains_python(yum): 'copr', 'epub', 'twine', 'bucky')) def test_executables_passed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.executables'] + task_result = results['dist.python-versions.executables'] assert task_result.outcome == 'PASSED' @pytest.mark.parametrize('results', ('docutils',)) def test_executables_failed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.executables'] + task_result = results['dist.python-versions.executables'] assert task_result.outcome == 'FAILED' def test_artifact_contains_executables_and_looks_as_expected( docutils): - result = docutils['python-versions.executables'] + result = docutils['dist.python-versions.executables'] with open(result.artifact) as f: artifact = f.read() @@ -297,18 +315,20 @@ def test_artifact_contains_executables_and_looks_as_expected( 'epub', 'twine', 'nodejs')) def test_unvesioned_shebangs_passed(results, request): results = request.getfixturevalue(results) - assert results['python-versions.unversioned_shebangs'].outcome == 'PASSED' + result = results['dist.python-versions.unversioned_shebangs'] + assert result.outcome == 'PASSED' @pytest.mark.parametrize('results', ('yum', 'tracer', 'bucky')) def test_unvesioned_shebangs_failed(results, request): results = request.getfixturevalue(results) - assert results['python-versions.unversioned_shebangs'].outcome == 'FAILED' + result = results['dist.python-versions.unversioned_shebangs'] + assert result.outcome == 'FAILED' def test_artifact_contains_unversioned_shebangs_and_looks_as_expected( tracer): - result = tracer['python-versions.unversioned_shebangs'] + result = tracer['dist.python-versions.unversioned_shebangs'] with open(result.artifact) as f: artifact = f.read() @@ -328,14 +348,14 @@ def test_artifact_contains_unversioned_shebangs_and_looks_as_expected( 'copr', 'epub', 'twine', 'docutils')) def test_py3_support_passed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.py3_support'] + task_result = results['dist.python-versions.py3_support'] assert task_result.outcome == 'PASSED' @pytest.mark.parametrize('results', ('bucky',)) def test_py3_support_failed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.py3_support'] + task_result = results['dist.python-versions.py3_support'] assert task_result.outcome == 'FAILED' @@ -347,7 +367,7 @@ def test_artifact_contains_py3_support_and_looks_as_expected( gets ported to Python 3 and its Bugzilla gets closed. See https://bugzilla.redhat.com/show_bug.cgi?id=1367012 """ - result = bucky['python-versions.py3_support'] + result = bucky['dist.python-versions.py3_support'] with open(result.artifact) as f: artifact = f.read() @@ -367,19 +387,19 @@ def test_artifact_contains_py3_support_and_looks_as_expected( 'copr', 'epub', 'twine', 'docutils')) def test_python_usage_passed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.python_usage'] + task_result = results['dist.python-versions.python_usage'] assert task_result.outcome == 'PASSED' @pytest.mark.parametrize('results', ('jsonrpc',)) def test_python_usage_failed(results, request): results = request.getfixturevalue(results) - task_result = results['python-versions.python_usage'] + task_result = results['dist.python-versions.python_usage'] assert task_result.outcome == 'FAILED' def test_artifact_contains_python_usage_and_looks_as_expected(jsonrpc): - result = jsonrpc['python-versions.python_usage'] + result = jsonrpc['dist.python-versions.python_usage'] with open(result.artifact) as f: artifact = f.read() @@ -388,7 +408,7 @@ def test_artifact_contains_python_usage_and_looks_as_expected(jsonrpc): assert dedent(""" You've used /usr/bin/python during build on the following arches: - jsonrpc-glib-3.27.4-1.fc28: armv7hl, i686, x86_64 + jsonrpc-glib-3.27.4-1.fc28: x86_64 Use /usr/bin/python3 or /usr/bin/python2 explicitly. /usr/bin/python will be removed or switched to Python 3 in the future. diff --git a/tox.ini b/tox.ini index 17490f5..5a1213a 100644 --- a/tox.ini +++ b/tox.ini @@ -10,14 +10,13 @@ deps = commands = python -m pytest -v {posargs} test/functional sitepackages = True -;; disabled during transition to ansiblized Taskotron (Standard Interface support) -; [testenv:integration] -; deps = -; pytest -; pyyaml -; basepython = python3 -; commands = python -m pytest -v {posargs} test/integration -; sitepackages = False +[testenv:integration] +deps = + pytest + pyyaml +basepython = python3 +commands = python -m pytest -v {posargs} test/integration +sitepackages = False [testenv:style] deps = flake8 From d5f0afe5220991b390cf73b26a00679db70e854f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= Date: Thu, 1 Feb 2018 19:17:50 +0100 Subject: [PATCH 06/20] Add --cap-add=SYS_ADMIN to docker on Tarvis Otherwise mock is not possible, see https://lists.fedoraproject.org/archives/list/buildsys@lists.fedoraproject.org/message/UMYBANKEJRCP52I5O7LAWX35VUYQFVH3/ This is probably not safe, but we are on Travis. I have no idea what I'm doing. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index b96e202..aca37ec 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,4 +7,4 @@ install: - docker build -t taskotron . script: - - docker run -v $(pwd):$(pwd) -w $(pwd) -i -t taskotron + - docker run --cap-add=SYS_ADMIN -v $(pwd):$(pwd) -w $(pwd) -i -t taskotron From 3e4d8e5e19c4d8b419e10f3558726d02048eb55c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= Date: Thu, 1 Feb 2018 19:25:23 +0100 Subject: [PATCH 07/20] Don't use nspawn --- mock.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/mock.cfg b/mock.cfg index b2a40c4..c94ca07 100644 --- a/mock.cfg +++ b/mock.cfg @@ -3,3 +3,4 @@ include('/etc/mock/fedora-27-x86_64.cfg') config_opts['chroot_setup_cmd'] = 'install ansible dnf' config_opts['use_host_resolv'] = True config_opts['rpmbuild_networking'] = True +config_opts['use_nspawn'] = False From b2f95c84a99af04aa4915fc6bb5958636f3cbeb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= Date: Thu, 1 Feb 2018 19:49:12 +0100 Subject: [PATCH 08/20] Document running in mock --- README.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.rst b/README.rst index c04a69a..efde82b 100644 --- a/README.rst +++ b/README.rst @@ -38,6 +38,13 @@ For example:: You can see the results in ``./artifacts/`` directory. +You can run the above in mock:: + + $ mock -r ./mock.cfg --init + $ mock -r ./mock.cfg --copyin taskotron_python_versions *.py tests.yml / + $ mock -r ./mock.cfg --shell 'ansible-playbook tests.yml -e taskotron_item=python-gear-0.11.0-1.fc27' + $ mock -r ./mock.cfg --copyout artifacts artifacts + Tests ----- From 8ef4147a6d43c644a8a022fb82452a427f9df50a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= Date: Thu, 1 Feb 2018 20:28:58 +0100 Subject: [PATCH 09/20] Don't use shebangs --- download_rpms.py | 1 - python_versions_check.py | 1 - tests.yml | 4 ++-- 3 files changed, 2 insertions(+), 4 deletions(-) mode change 100755 => 100644 download_rpms.py mode change 100755 => 100644 python_versions_check.py diff --git a/download_rpms.py b/download_rpms.py old mode 100755 new mode 100644 index b29394c..92bdd83 --- a/download_rpms.py +++ b/download_rpms.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python2 # -*- coding: utf-8 -*- '''Download correct NVRs for python-versions to operate on.''' diff --git a/python_versions_check.py b/python_versions_check.py old mode 100755 new mode 100644 index 5fc0dca..70ab96b --- a/python_versions_check.py +++ b/python_versions_check.py @@ -1,4 +1,3 @@ -#!/usr/bin/python2 # -*- coding: utf-8 -*- import logging diff --git a/tests.yml b/tests.yml index 0af3aba..dcef3da 100644 --- a/tests.yml +++ b/tests.yml @@ -51,13 +51,13 @@ - block: - name: Download RPMs from Koji shell: > - ./download_rpms.py {{ taskotron_item }} {{ workdir.path }} + python2 download_rpms.py {{ taskotron_item }} {{ workdir.path }} {{ download_arches }} &> {{ artifacts }}/test.log - name: Run task shell: > - ./python_versions_check.py {{ taskotron_item }} {{ workdir.path }} + python2 python_versions_check.py {{ taskotron_item }} {{ workdir.path }} {{ artifacts }} {{ testcase }} &>> {{ artifacts }}/test.log always: From 50a09164c30f01ffa7a5d557f6471e421537abe6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= Date: Thu, 1 Feb 2018 20:30:59 +0100 Subject: [PATCH 10/20] Fix style issues in download_rpms.py --- download_rpms.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/download_rpms.py b/download_rpms.py index 92bdd83..ed71102 100644 --- a/download_rpms.py +++ b/download_rpms.py @@ -6,30 +6,32 @@ import logging from libtaskotron.directives import koji_directive + def download_rpms(koji_build, rpmsdir, arch=['x86_64'], arch_exclude=[], src=True, debuginfo=False, build_log=True): '''Download RPMs for a koji build NVR.''' koji = koji_directive.KojiDirective() - print 'Downloading rpms for %s into %s' % (koji_build, rpmsdir) - params = {'action': 'download', - 'koji_build': koji_build, - 'arch': arch, - 'arch_exclude': arch_exclude, - 'src': src, - 'debuginfo': debuginfo, - 'target_dir': rpmsdir, - 'build_log': build_log, - } + print('Downloading rpms for %s into %s' % (koji_build, rpmsdir)) + params = { + 'action': 'download', + 'koji_build': koji_build, + 'arch': arch, + 'arch_exclude': arch_exclude, + 'src': src, + 'debuginfo': debuginfo, + 'target_dir': rpmsdir, + 'build_log': build_log, + } arg_data = {'workdir': None} koji.process(params, arg_data) - print 'Downloading complete' + print('Downloading complete') if __name__ == '__main__': - print 'Running script: %s' % sys.argv + print('Running script: %s' % sys.argv) logging.basicConfig() logging.getLogger('libtaskotron').setLevel(logging.DEBUG) args = {} @@ -38,7 +40,7 @@ def download_rpms(koji_build, rpmsdir, arch=['x86_64'], arch_exclude=[], arches = sys.argv[3] if len(sys.argv) >= 4 else '' arches = [arch.strip() for arch in arches.split(',')] if arches: - print 'Requested arches: %s' % arches + print('Requested arches: %s' % arches) args['arch'] = arches download_rpms(koji_build=sys.argv[1], From 4b2cd9676a42ac8183fdc4d362bdfc73f8b6480f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= Date: Thu, 1 Feb 2018 20:37:39 +0100 Subject: [PATCH 11/20] Use a dedicated mock root --- mock.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/mock.cfg b/mock.cfg index c94ca07..e9fc604 100644 --- a/mock.cfg +++ b/mock.cfg @@ -4,3 +4,4 @@ config_opts['chroot_setup_cmd'] = 'install ansible dnf' config_opts['use_host_resolv'] = True config_opts['rpmbuild_networking'] = True config_opts['use_nspawn'] = False +config_opts['root'] = 'fedora-27-x86_64-taskotron' From 6e46a8b1d66b51a2ca580faaac53aede4b20acc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= Date: Fri, 2 Feb 2018 09:42:10 +0100 Subject: [PATCH 12/20] Remove unneeded packages from Docker --- Dockerfile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index c780066..22ee8aa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,8 @@ FROM fedora RUN dnf -y install --setopt=install_weak_deps=false --setopt=tsflags=nodocs \ - --setopt=deltarpm=false python2-rpm libtaskotron-core libtaskotron-fedora \ - python3-rpm tox python2 python3 python2-dnf python3-dnf \ - python2-libarchive-c python-bugzilla mock && dnf clean all + --setopt=deltarpm=false python2-rpm python3-rpm tox python2-dnf \ + python3-dnf mock && dnf clean all ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 From 57253daac42e1f0dbe9e57b594e45519eefccbe5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= Date: Fri, 2 Feb 2018 09:46:24 +0100 Subject: [PATCH 13/20] bugzilla needs requests but does not specify it https://github.com/gdestuynder/simple_bugzilla/pull/1 --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 5a1213a..e463d0c 100644 --- a/tox.ini +++ b/tox.ini @@ -7,6 +7,7 @@ deps = pytest libarchive-c bugzilla + requests commands = python -m pytest -v {posargs} test/functional sitepackages = True From d20272e30f2b24931fb8d8f0f11c3e4b8de94e87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= Date: Fri, 2 Feb 2018 12:27:34 +0100 Subject: [PATCH 14/20] Error if mock shell ends with unexpected exitcode --- test/integration/test_integration.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/test/integration/test_integration.py b/test/integration/test_integration.py index 4706502..98ad76b 100644 --- a/test/integration/test_integration.py +++ b/test/integration/test_integration.py @@ -34,7 +34,8 @@ def copy_out(self, directory, *, clean_target=False): self._run(['--copyout', directory, directory], check=True) def shell(self, command): - self._run(['--shell', command]) + cp = self._run(['--shell', command]) + return cp.returncode def orphanskill(self): self._run(['--orphanskill']) @@ -64,12 +65,19 @@ def run_task(nevr, *, mock): Returns a dict with Results (outcome, artifact, item) Actually returns a tuple with the above and captured log ''' - mock.shell('ansible-playbook tests.yml -e taskotron_item={}'.format(nevr)) + exit_code = mock.shell('ansible-playbook tests.yml ' + '-e taskotron_item={}'.format(nevr)) mock.copy_out('artifacts', clean_target=True) with open('artifacts/test.log') as f: log = f.read() + # 0 for PASSED + # 2 for FAILED + if exit_code not in (0, 2): + print(log, file=sys.stderr) + raise RuntimeError('mock shell ended with {}'.format(exit_code)) + results = parse_results('artifacts/taskotron/results.yml') ret = {r['checkname']: Result(r.get('outcome'), From 118f8c6e59cccdf34161aede169272c653a8204e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= Date: Fri, 2 Feb 2018 13:05:55 +0100 Subject: [PATCH 15/20] For each nevr, use it's own artifacts directory Previously this only worked by accident Bonus: We can now inspect the contents of the folder, if there is a failure --- .gitignore | 9 +++++---- test/integration/test_integration.py | 23 ++++++++++++++++------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index 6cbbdfe..09c5976 100644 --- a/.gitignore +++ b/.gitignore @@ -2,9 +2,10 @@ *.pyo *.pyc __pycache__ -/artifacts -/dist -/.tox -/.eggs +/artifacts-*/ +/build/ +/dist/ +/.tox/ +/.eggs/ .cache *.egg-info diff --git a/test/integration/test_integration.py b/test/integration/test_integration.py index 98ad76b..ef40cc7 100644 --- a/test/integration/test_integration.py +++ b/test/integration/test_integration.py @@ -27,11 +27,11 @@ def _run(self, what, **kwargs): def copy_in(self, files): self._run(['--copyin'] + files + ['/'], check=True) - def copy_out(self, directory, *, clean_target=False): + def copy_out(self, directory, target, *, clean_target=False): if clean_target: with contextlib.suppress(FileNotFoundError): - shutil.rmtree(directory) - self._run(['--copyout', directory, directory], check=True) + shutil.rmtree(target) + self._run(['--copyout', directory, target], check=True) def shell(self, command): cp = self._run(['--shell', command]) @@ -67,9 +67,11 @@ def run_task(nevr, *, mock): ''' exit_code = mock.shell('ansible-playbook tests.yml ' '-e taskotron_item={}'.format(nevr)) - mock.copy_out('artifacts', clean_target=True) + artifacts = 'artifacts-{}'.format(nevr) + mock.copy_out('artifacts', artifacts, clean_target=True) + mock.shell('rm artifacts -rf') # purge the logs - with open('artifacts/test.log') as f: + with open(artifacts + '/test.log') as f: log = f.read() # 0 for PASSED @@ -78,10 +80,17 @@ def run_task(nevr, *, mock): print(log, file=sys.stderr) raise RuntimeError('mock shell ended with {}'.format(exit_code)) - results = parse_results('artifacts/taskotron/results.yml') + results = parse_results(artifacts + '/taskotron/results.yml') + + # we need to preserve the artifacts for each nevr separately + # but the saved path is just ./artifacts/... + def fix_artifact_path(path): + if path is None: + return None + return path.replace('/artifacts/', '/{}/'.format(artifacts)) ret = {r['checkname']: Result(r.get('outcome'), - r.get('artifact'), + fix_artifact_path(r.get('artifact')), r.get('item')) for r in results} return ret, log From 8601f68120fcd08c93c20dd47897e4b44ad57765 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= Date: Fri, 2 Feb 2018 13:33:42 +0100 Subject: [PATCH 16/20] Add a --fake switch for integration tests --- test/integration/conftest.py | 4 ++++ test/integration/test_integration.py | 18 +++++++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 test/integration/conftest.py diff --git a/test/integration/conftest.py b/test/integration/conftest.py new file mode 100644 index 0000000..7afeabb --- /dev/null +++ b/test/integration/conftest.py @@ -0,0 +1,4 @@ +def pytest_addoption(parser): + parser.addoption('--fake', action='store_true', default=False, + help='don\'t run the code, reuse the result from ' + 'last tests') diff --git a/test/integration/test_integration.py b/test/integration/test_integration.py index ef40cc7..53bff1a 100644 --- a/test/integration/test_integration.py +++ b/test/integration/test_integration.py @@ -16,9 +16,9 @@ class MockEnv: '''Use this to work with mock. Mutliple instances are not safe.''' + mock = ['mock', '-r', './mock.cfg'] def __init__(self): - self.mock = ['mock', '-r', './mock.cfg'] self._run(['--init'], check=True) def _run(self, what, **kwargs): @@ -41,10 +41,22 @@ def orphanskill(self): self._run(['--orphanskill']) +class FakeMockEnv(MockEnv): + '''Use this to fake the mock interactions''' + mock = ['echo', 'mock'] + + def copy_out(self, directory, target, *, clean_target=False): + '''Fake it, never clean target''' + return super().copy_out(directory, target, clean_target=False) + + @pytest.fixture(scope="session") -def mock(): +def mock(request): '''Setup a mock we can run Ansible tasks in under root''' - mockenv = MockEnv() + if request.config.getoption('--fake'): + mockenv = FakeMockEnv() + else: + mockenv = MockEnv() files = ['taskotron_python_versions'] + glob.glob('*.py') + ['tests.yml'] mockenv.copy_in(files) yield mockenv From 71cce323c298f71311b93f6a221ad8718672276f Mon Sep 17 00:00:00 2001 From: Iryna Shcherbina Date: Fri, 2 Feb 2018 15:58:56 +0100 Subject: [PATCH 17/20] Add artifacts directory to gitignore for direct runs --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 09c5976..c46f4d2 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ *.pyo *.pyc __pycache__ +/artifacts/ /artifacts-*/ /build/ /dist/ From 5b945f18160a1881c423a633fe6af6c0cd4d4543 Mon Sep 17 00:00:00 2001 From: Iryna Shcherbina Date: Fri, 2 Feb 2018 17:42:58 +0100 Subject: [PATCH 18/20] Update tests section in README --- README.rst | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/README.rst b/README.rst index efde82b..a9fa2a9 100644 --- a/README.rst +++ b/README.rst @@ -38,7 +38,7 @@ For example:: You can see the results in ``./artifacts/`` directory. -You can run the above in mock:: +You can also run the above in mock:: $ mock -r ./mock.cfg --init $ mock -r ./mock.cfg --copyin taskotron_python_versions *.py tests.yml / @@ -49,19 +49,18 @@ You can run the above in mock:: Tests ----- -There are also automatic tests available. You can run them using -`tox `__. -You'll need the above mentioned dependencies and ``python3-rpm`` -and ``python3-dnf`` installed as well. +This task is covered with functional and integration tests. +You can run them using `tox `__, but +you will need ``mock``, ``python3-rpm`` and ``python3-dnf`` installed. .. code:: console $ tox -Automatic tests also happen on `Tarvis +The tests are also being executed on `Travis CI `__. Since Travis CI runs on Ubuntu -and Ubuntu lacks the RPM Python bindings and Taskotron, +and Ubuntu lacks the RPM Python bindings and mock, `Docker `__ is used to run the tests on Fedora. You can run the tests in Docker as well, just use the commands from the ``.travis.yml`` file. From 651975daff55e661ea2a9658b4107b3b73436eb2 Mon Sep 17 00:00:00 2001 From: Iryna Shcherbina Date: Fri, 2 Feb 2018 18:06:03 +0100 Subject: [PATCH 19/20] Add generated tests.retry file to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index c46f4d2..88d5232 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ __pycache__ /.eggs/ .cache *.egg-info +tests.retry From dbc9ac1f382b9dba6ac469b437221cd11bec04ad Mon Sep 17 00:00:00 2001 From: Iryna Shcherbina Date: Fri, 2 Feb 2018 19:02:35 +0100 Subject: [PATCH 20/20] Document how to use --fake option in integration tests --- README.rst | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index a9fa2a9..1989c41 100644 --- a/README.rst +++ b/README.rst @@ -45,18 +45,27 @@ You can also run the above in mock:: $ mock -r ./mock.cfg --shell 'ansible-playbook tests.yml -e taskotron_item=python-gear-0.11.0-1.fc27' $ mock -r ./mock.cfg --copyout artifacts artifacts - Tests ----- This task is covered with functional and integration tests. You can run them using `tox `__, but you will need ``mock``, ``python3-rpm`` and ``python3-dnf`` installed. - -.. code:: console +For mock configuration see +`mock setup `__ +instructions. Use the following command to run the test suite:: $ tox +The integration tests may take a while to execute, as they are +running real tasks in mock. However, for development you may +speed them up by reusing the results of the previous test run. +This is useful if you modify the test itself, without changing the +implementation of task checks. Use the following command to run +integration tests in a fake mode:: + + $ tox -e integration -- --fake + The tests are also being executed on `Travis CI `__. Since Travis CI runs on Ubuntu