Skip to content

Commit

Permalink
Drop Python 2.7 support from CI and tox
Browse files Browse the repository at this point in the history
  • Loading branch information
kalikiana committed Mar 10, 2021
1 parent 1e522d2 commit fbf69a9
Show file tree
Hide file tree
Showing 8 changed files with 39 additions and 110 deletions.
1 change: 0 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
sudo: false
language: python
python:
- "2.7"
- "3.6"
- "3.7"
- "3.8"
Expand Down
7 changes: 1 addition & 6 deletions openqa_review/browser.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,3 @@
# Python 2 and 3: easiest option
# see http://python-future.org/compatible_idioms.html
from future.standard_library import install_aliases # isort:skip to keep 'install_aliases()'
install_aliases()

import codecs
import json
import logging
Expand Down Expand Up @@ -89,7 +84,7 @@ def get_page(self, url, as_json=False, cache=True):
log.info('Loading content instead of URL %s from filename %s' % (url, filename))
try:
raw = codecs.open(os.path.join(self.load_dir, filename), 'r', 'utf8').read()
except IOError as e:
except FileNotFoundError as e:
if e.errno == errno.ENOENT:
msg = 'Request to %s was not successful, file %s not found' % (url, filename)
log.info(msg)
Expand Down
66 changes: 29 additions & 37 deletions openqa_review/openqa_review.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,14 +87,6 @@
"""

# Python 2 and 3: easiest option
# see http://python-future.org/compatible_idioms.html
from __future__ import absolute_import, unicode_literals

from future.standard_library import install_aliases # isort:skip to keep 'install_aliases()'
install_aliases()
from future.utils import iteritems

import argparse
import codecs
import datetime
Expand Down Expand Up @@ -305,16 +297,16 @@ def get_arch_state_results(arch, current_details, previous_details, output_state
skipped = get_skipped_dict(arch, current_details)

test_results_previous_dict = {i['id']: i for i in test_results_previous if i['id'] in test_results_dict.keys()}
states = SortedDict(get_state(v, test_results_previous_dict) for k, v in iteritems(test_results_dict))
states = SortedDict(get_state(v, test_results_previous_dict) for k, v in test_results_dict.items())

# intermediate step:
# - print report of differences
interesting_states = SortedDict({k.split(arch + '_')[1]: v for k, v in iteritems(states) if v['state'] != 'STABLE'})
interesting_states = SortedDict({k.split(arch + '_')[1]: v for k, v in states.items() if v['state'] != 'STABLE'})

if output_state_results:
print('arch: %s' % arch)
for state in interesting_states_names:
print('\n%s:\n\t%s\n' % (state, ', '.join(k for k, v in iteritems(interesting_states) if v['state'] == state)))
print('\n%s:\n\t%s\n' % (state, ', '.join(k for k, v in interesting_states.items() if v['state'] == state)))
interesting_states.update({'skipped': skipped})
return interesting_states

Expand Down Expand Up @@ -379,7 +371,7 @@ def get_results_by_bugref(results, args):

# plain for-loop with append is most efficient: https://stackoverflow.com/questions/11276473/append-to-a-dict-of-lists-with-a-dict-comprehension
results_by_bugref = defaultdict(list)
for k, v in iteritems(results):
for k, v in results.items():
if not re.match('(' + '|'.join(include_tags) + ')', v['state']):
continue
key = v['bugref'] if (args.bugrefs and 'bugref' in v and v['bugref']) else 'todo'
Expand All @@ -405,8 +397,8 @@ def find_builds(builds, running_threshold=0):
# filter out empty builds
def non_empty(r):
return r['total'] != 0 and r['total'] > r['skipped'] and not ('build' in r.keys() and r['build'] is None)
builds = {build: result for build, result in iteritems(builds) if non_empty(result)}
finished = {build: result for build, result in iteritems(builds) if not result['unfinished']
builds = {build: result for build, result in builds.items() if non_empty(result)}
finished = {build: result for build, result in builds.items() if not result['unfinished']
or (100 * float(result['unfinished']) / result['total']) <= threshold}

log.debug('Found the following finished non-empty builds: %s' % ', '.join(finished.keys()))
Expand Down Expand Up @@ -574,7 +566,7 @@ def build_link(v):
component_config_section = 'product_issues:%s:component_mapping' % root_url.rstrip('/')
try:
components_config_dict = dict(config.items(component_config_section))
component = [v for k, v in iteritems(components_config_dict) if re.match(k, complete_module)][0]
component = [v for k, v in components_config_dict.items() if re.match(k, complete_module)][0]
except (NoSectionError, IndexError) as e: # pragma: no cover
log.info("No matching component could be found for the module_folder '%s' and module name '%s' in the config section '%s'" % (module_folder, module, e))
component = ''
Expand Down Expand Up @@ -826,7 +818,7 @@ def __init__(self, arch, results, args, root_url, progress_browser, bugzilla_bro
# ... else (no ticket linked) we don't group them as we don't know if it really is the same issue and handle them outside
results_by_bugref = SortedDict(get_results_by_bugref(results, self.args))
self.issues = defaultdict(lambda: defaultdict(list))
for bugref, result_list in iteritems(results_by_bugref):
for bugref, result_list in results_by_bugref.items():
if re.match('todo', bugref):
log.info('Skipping "todo" bugref \'%s\' in \'%s\'' % (bugref, result_list))
continue
Expand All @@ -849,7 +841,7 @@ def __init__(self, arch, results, args, root_url, progress_browser, bugzilla_bro
self.issues['existing']['product'].append(IssueEntry(self.args, self.root_url, existing_soft_fails))

def _search_for_bugrefs_for_softfailures(self, results):
for k, v in iteritems(results):
for k, v in results.items():
if v['state'] in soft_fail_states:
try:
module_url = self._get_url_to_softfailed_module(v['href'])
Expand Down Expand Up @@ -881,8 +873,8 @@ def _search_for_bugrefs_for_softfailures(self, results):
def total_issues(self):
"""Return Number of issue entries for this arch."""
total = 0
for issue_status, issue_types in iteritems(self.issues):
for issue_type, ies in iteritems(issue_types):
for issue_status, issue_types in self.issues.items():
for issue_type, ies in issue_types.items():
total += len(ies)
return total

Expand Down Expand Up @@ -980,8 +972,8 @@ def __init__(self, browser, job_group_url, root_url, args):
current_summary = parse_summary(current_details)
previous_summary = parse_summary(previous_details)

changes = SortedDict({k: v - previous_summary.get(k, 0) for k, v in iteritems(current_summary)})
self.changes_str = '***Changes since reference build***\n\n* ' + '\n* '.join('%s: %s' % (k, v) for k, v in iteritems(changes)) + '\n'
changes = SortedDict({k: v - previous_summary.get(k, 0) for k, v in current_summary.items()})
self.changes_str = '***Changes since reference build***\n\n* ' + '\n* '.join('%s: %s' % (k, v) for k, v in changes.items()) + '\n'
log.info('%s' % self.changes_str)

self.build = get_build_nr(current_url)
Expand Down Expand Up @@ -1154,11 +1146,11 @@ def _pgroup_prefix(group):
job_groups[_pgroup_prefix(job_group)] = urljoin(root_url, '/group_overview/%i' % job_group['id'])
if args.job_groups:
job_pattern = re.compile('(%s)' % '|'.join(args.job_groups.split(',')))
job_groups = {k: v for k, v in iteritems(job_groups) if job_pattern.search(k)}
job_groups = {k: v for k, v in job_groups.items() if job_pattern.search(k)}
log.info('Job group URL for %s: %s' % (args.job_groups, job_groups))
if args.exclude_job_groups:
job_pattern = re.compile('(%s)' % '|'.join(args.exclude_job_groups.split(',')))
job_groups = {k: v for k, v in iteritems(job_groups) if not job_pattern.search(k)}
job_groups = {k: v for k, v in job_groups.items() if not job_pattern.search(k)}
log.info('Job group URL excluding %s: %s' % (args.exclude_job_groups, job_groups))
return SortedDict(job_groups)

Expand All @@ -1178,7 +1170,7 @@ def __init__(self, browser, args, root_url, job_groups):
self._progress = 0
self.report = SortedDict()

for k, v in iteritems(job_groups):
for k, v in job_groups.items():
log.info("Processing '%s'" % v)
if args.no_progress or not humanfriendly_available:
self.report[k] = self._one_report(v)
Expand All @@ -1203,7 +1195,7 @@ def _next_label(self):
def __str__(self):
"""Generate markdown."""
report_str = ''
for k, v in iteritems(self.report):
for k, v in self.report.items():
report_str += '# %s\n\n%s\n---\n' % (k, v)
return report_str

Expand Down Expand Up @@ -1250,14 +1242,14 @@ def load_config():


def filter_report(report, iefilter):
report.report = SortedDict({p: pr for p, pr in iteritems(report.report) if isinstance(pr, ProductReport)})
for product, pr in iteritems(report.report):
for arch, ar in iteritems(pr.reports):
for issue_status, issue_types in iteritems(ar.issues):
for issue_type, ies in iteritems(issue_types):
report.report = SortedDict({p: pr for p, pr in report.report.items() if isinstance(pr, ProductReport)})
for product, pr in report.report.items():
for arch, ar in pr.reports.items():
for issue_status, issue_types in ar.issues.items():
for issue_type, ies in issue_types.items():
issue_types[issue_type] = [ie for ie in ies if iefilter(ie)]
pr.reports = SortedDict({a: ar for a, ar in iteritems(pr.reports) if ar.total_issues > 0})
report.report = SortedDict({p: pr for p, pr in iteritems(report.report) if pr.reports})
pr.reports = SortedDict({a: ar for a, ar in pr.reports.items() if ar.total_issues > 0})
report.report = SortedDict({p: pr for p, pr in report.report.items() if pr.reports})


def reminder_comment_on_issue(ie, min_days_unchanged=MIN_DAYS_UNCHANGED):
Expand All @@ -1274,11 +1266,11 @@ def reminder_comment_on_issue(ie, min_days_unchanged=MIN_DAYS_UNCHANGED):

def reminder_comment_on_issues(report, min_days_unchanged=MIN_DAYS_UNCHANGED):
processed_issues = set()
report.report = SortedDict({p: pr for p, pr in iteritems(report.report) if isinstance(pr, ProductReport)})
for product, pr in iteritems(report.report):
for arch, ar in iteritems(pr.reports):
for issue_status, issue_types in iteritems(ar.issues):
for issue_type, ies in iteritems(issue_types):
report.report = SortedDict({p: pr for p, pr in report.report.items() if isinstance(pr, ProductReport)})
for product, pr in report.report.items():
for arch, ar in pr.reports.items():
for issue_status, issue_types in ar.issues.items():
for issue_type, ies in issue_types.items():
for ie in ies:
issue = ie.bug
if issue:
Expand Down
13 changes: 3 additions & 10 deletions openqa_review/tumblesle_release.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,6 @@
notifications are serialized in JSON strings.
"""

# Python 2 and 3: easiest option
# see http://python-future.org/compatible_idioms.html
from __future__ import absolute_import
from future.standard_library import install_aliases # isort:skip to keep 'install_aliases()'
install_aliases()
from future.utils import iteritems

import argparse
import fnmatch
import glob
Expand Down Expand Up @@ -253,7 +246,7 @@ def check_last_builds(self):
hard_failed_jobs = {k: self._filter_whitelisted_fails(jobs_by_result[k]['failed']) for k in ['released', 'last']}
# count passed, failed for both released/new
passed['released'] = len(jobs_by_result['released']['passed']) + len(jobs_by_result['released']['softfailed'])
hard_failed = {k: len(v) for k, v in iteritems(hard_failed_jobs)}
hard_failed = {k: len(v) for k, v in hard_failed_jobs.items()}
whitelisted = {'last': failed['last'] - hard_failed['last']}
passed['last'] += whitelisted['last']
assert (passed['last'] + hard_failed['last']) > 0, "passed['last'] (%s) + hard_failed['last'] (%s) must be more than zero" % (
Expand All @@ -266,8 +259,8 @@ def check_last_builds(self):
self.release_build = build['last']
# TODO auto-remove entries from whitelist which are passed now
else:
hard_failed_jobs_by_scenario = {k: {scenario(j): j for j in v} for k, v in iteritems(hard_failed_jobs)}
sets = {k: set(v) for k, v in iteritems(hard_failed_jobs_by_scenario)}
hard_failed_jobs_by_scenario = {k: {scenario(j): j for j in v} for k, v in hard_failed_jobs.items()}
sets = {k: set(v) for k, v in hard_failed_jobs_by_scenario.items()}
new_failures = sets['last'].difference(sets['released'])
new_fixed = sets['released'].difference(sets['last'])
log.info('Regression in new build %s, new failures: %s' % (build['last'], ', '.join(new_failures)))
Expand Down
12 changes: 0 additions & 12 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import os
import sys
from subprocess import check_output, CalledProcessError

from setuptools import setup
Expand All @@ -9,12 +8,6 @@
# Also, when git is not available (PyPi package), use stored version.py.
version_py = os.path.join(os.path.dirname(__file__), 'version.py')

# python2 backwards craft
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError

try:
# This will not generate PEP440 compliant version strings for any commit
# that is not on the tag itself. setuptools/dist will give a warning.
Expand All @@ -32,7 +25,6 @@

install_requires = [
'beautifulsoup4',
'future',
'sortedcontainers',
'humanfriendly',
'requests',
Expand All @@ -41,10 +33,6 @@
'certifi',
]

# there is also a new version 'configparser2' to resolve the name ambuigity but that package might not be available everywhere
if sys.version_info < (3, 0):
install_requires += ['configparser']

setup(
name='openqa_review',
version='{ver}'.format(ver=version_git),
Expand Down
29 changes: 4 additions & 25 deletions tests/test_openqa_review.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,9 @@
isort:skip_file
"""

# see http://python-future.org/compatible_idioms.html
from __future__ import unicode_literals
from future.standard_library import install_aliases # isort:skip to keep 'install_aliases()'
from future.utils import iteritems

install_aliases()
import codecs
import contextlib
import os.path
import re
import shutil
import sys
import tempfile
from argparse import Namespace
Expand Down Expand Up @@ -65,16 +57,6 @@ def browser_factory(args=None):
return openqa_review.Browser(args, urljoin(args.host, args.base_url))


# similar to python3.2 TemporaryDirectory, not available on older versions
# also see http://stackoverflow.com/a/13379969/5031322

@contextlib.contextmanager
def TemporaryDirectory(): # noqa
temp_dir = tempfile.mkdtemp()
yield temp_dir
shutil.rmtree(temp_dir)


def test_help():
sys.argv += '--help'.split()
with pytest.raises(SystemExit):
Expand Down Expand Up @@ -206,10 +188,7 @@ def test_specified_job_group_yields_single_product_report():
# openqa_review tries to parse all and as there is no cache page
# for 'openSUSE Tumbleweed 2.KDE' saved, we assume its corresponding
# page can not be retrieved.
# Unfortunately we can not easily be more specific about the
# exception as python2 raises IOError, python3 FileNotFoundError
# but we can check the content anyway.
with pytest.raises(Exception) as e:
with pytest.raises(FileNotFoundError) as e:
report = str(openqa_review.generate_report(args))
assert 'group_overview:26' in str(e.value)

Expand All @@ -219,7 +198,7 @@ def test_specified_job_group_yields_single_product_report():
# It will be invisible but executed.
args.no_progress = False
# see above
with pytest.raises(Exception) as e:
with pytest.raises(FileNotFoundError) as e:
report = str(openqa_review.generate_report(args))
assert 'group_overview:26' in str(e.value)

Expand Down Expand Up @@ -377,7 +356,7 @@ def test_filename_to_url_encodes_valid_url():

def test_single_job_group_pages_can_be_cached_from_cache():
args = cache_test_args_factory()
with TemporaryDirectory() as tmp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
args.save_dir = tmp_dir
args.save = True
report = str(openqa_review.generate_report(args))
Expand Down Expand Up @@ -491,7 +470,7 @@ def test_reminder_comments_on_referenced_bugs_are_posted():
report = openqa_review.generate_report(args)

# test double comment prevention code
p, pr = list(iteritems(report.report))[0]
p, pr = list(report.report.items())[0]
report.report[p + 237] = pr

openqa_review.reminder_comment_on_issues(report)
Expand Down
18 changes: 1 addition & 17 deletions tests/test_tumblesle_release.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,6 @@
# see http://python-future.org/compatible_idioms.html
from future.standard_library import install_aliases # isort:skip to keep 'install_aliases()'

install_aliases()
import contextlib
import os
import os.path
import shutil
import sys
import tempfile
from argparse import Namespace
Expand All @@ -16,20 +11,9 @@
from openqa_review.tumblesle_release import UnsupportedRsyncArgsError


# similar to python3.2 TemporaryDirectory, not available on older versions
# also see http://stackoverflow.com/a/13379969/5031322


@contextlib.contextmanager
def TemporaryDirectory(): # noqa
temp_dir = tempfile.mkdtemp()
yield temp_dir
shutil.rmtree(temp_dir)


@contextlib.contextmanager
def TumblesleDirectory(args): # noqa
with TemporaryDirectory() as tmp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
args.src = os.path.join(tmp_dir, 'src') + '/'
args.dest = os.path.join(tmp_dir, 'dest') + '/'
# create a fake config entry
Expand Down
3 changes: 1 addition & 2 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,14 @@
# and then run "tox" from this directory.

[tox]
envlist = flake8,doctests,py{27,36,37,38},cov
envlist = flake8,doctests,py{36,37,38},cov
deps = -rrequirements.txt
skip_missing_interpreters = true

[travis]
# splitting the subtests among each environment for speed reasons
# TODO doctests fail to find modules on travis
python =
2.7: py27
3.6: flake8,py36
3.7: py37
3.8: py38,cov
Expand Down

0 comments on commit fbf69a9

Please sign in to comment.