From cf1fb7afd9e0ad74e119238cd44592028f89aff9 Mon Sep 17 00:00:00 2001 From: Lubomir Gallovic Date: Wed, 15 Apr 2020 12:07:15 +0200 Subject: [PATCH 1/2] Make code compatible with Python 3 [CLOUDDST-27] 2to3 tool was used to automatically detect code lines which are problematic for Python 3. They were fixed in a way that ensures continued Python 2 compatibility. 'six' package was added as a dependency as it is used in some fixes. --- alt_src/__init__.py | 2 +- alt_src/alt_src.py | 88 ++++++++++++++++++++++--------------------- requirements.txt | 1 + test-requirements.txt | 1 - 4 files changed, 47 insertions(+), 45 deletions(-) diff --git a/alt_src/__init__.py b/alt_src/__init__.py index 9e0a226..c8d6a1d 100644 --- a/alt_src/__init__.py +++ b/alt_src/__init__.py @@ -1 +1 @@ -from alt_src import * +from .alt_src import * diff --git a/alt_src/alt_src.py b/alt_src/alt_src.py index eec28aa..37ff7a8 100644 --- a/alt_src/alt_src.py +++ b/alt_src/alt_src.py @@ -2,10 +2,12 @@ ''' Given an srpm and product, stage for alt-src release ''' +from __future__ import print_function -import ConfigParser +import six +from six.moves import configparser import copy -import cStringIO +from six.moves import cStringIO as StringIO import datetime import errno import fcntl @@ -22,8 +24,8 @@ import sys import time import traceback -from urllib import urlencode -from urllib2 import Request, urlopen +from six.moves.urllib.parse import urlencode +from six.moves.urllib.request import Request, urlopen import yaml import simplejson as json @@ -73,7 +75,7 @@ def get_config(cfile, overrides): if not os.access(cfile, os.F_OK): die("Missing config file: %s" % cfile) - confp = ConfigParser.RawConfigParser() + confp = configparser.RawConfigParser() confp.read(cfile) if not confp.has_section('altsrc'): die("Configuration file missing [altsrc] section: %s" % cfile) @@ -180,14 +182,14 @@ def __init__(self, options): self.rpms_or_module_dir = 'rpms' if not os.path.isfile(self.source_file): - raise StartupError, "No such file: %s" % self.source_file + raise StartupError("No such file: %s" % self.source_file) def run(self): raise NotImplementedError def add_error_logger(self): """Capture all error messages for use in later notifications""" - self.error_log = cStringIO.StringIO() + self.error_log = StringIO() handler = logging.StreamHandler(self.error_log) handler.setFormatter(logging.Formatter(self.options.config['log_format'])) handler.setLevel(logging.ERROR) @@ -195,14 +197,14 @@ def add_error_logger(self): def setup_logfile(self, logname): if not os.path.isdir(self.workdir): - raise SanityError, "Work dir does not exist: %s" % self.workdir + raise SanityError("Work dir does not exist: %s" % self.workdir) fname = fname_ = os.path.join(self.workdir, logname) ntimes = 0 while os.path.exists(fname): # don't overwrite an old logfile ntimes += 1 if ntimes > 1024: - raise SanityError, "Too many log backups" + raise SanityError("Too many log backups") fname = "%s.%d" % (fname_, ntimes) self.logfile = file(fname, 'w') handler = logging.StreamHandler(self.logfile) @@ -250,7 +252,7 @@ def log_cmd(self, cmd, logfile=None, fatal=True, tries=None, **kwargs): time.sleep(sleep_time) if ret: if fatal: - raise CommandError, "command failed: %r" % cmd + raise CommandError("command failed: %r" % cmd) #otherwise self.logger.warn("Command failed: %r", cmd) return ret @@ -276,7 +278,7 @@ def get_output(self, cmd, fatal=True, **kwargs): if retval: self.logger.warn("Command failed: %r", cmd) if fatal: - raise CommandError, "command failed: %r" % cmd + raise CommandError("command failed: %r" % cmd) return output, retval def _get_koji_session_and_pathinfo(self): @@ -289,9 +291,9 @@ def get_koji_srpm(self): session, pathinfo = self._get_koji_session_and_pathinfo() rpminfo = session.getRPM(self.options.source) if not rpminfo: - raise SanityError, "No such rpm: %s" % self.options.source + raise SanityError("No such rpm: %s" % self.options.source) if rpminfo['arch'] != 'src': - raise SanityError, "Not a source rpm: %s" % self.options.source + raise SanityError("Not a source rpm: %s" % self.options.source) binfo = session.getBuild(rpminfo['build_id']) bdir = pathinfo.build(binfo) relpath = pathinfo.rpm(rpminfo) @@ -336,7 +338,7 @@ def read_srpm(self): headers = koji.get_rpm_header(self.source_file) self.headers = headers if headers[rpm.RPMTAG_SOURCEPACKAGE] != 1: - raise InputError, "%s is not a source package" % self.source_file + raise InputError("%s is not a source package" % self.source_file) data = koji.get_header_fields(headers, ['name', 'version', 'release', 'summary']) self.nvr = "%(name)s-%(version)s-%(release)s" % data self.package = data['name'] @@ -375,7 +377,7 @@ def check_package(self): self.logger.debug("Got blacklist: %r", blacklist) if blacklist and koji.util.multi_fnmatch(self.package, blacklist): # raise FilterError, 'Blacklisted package: %s' % self.package - print 'Blacklisted package: %s, quitting' % self.package + print('Blacklisted package: %s, quitting' % self.package) sys.exit(0) def git_push_url(self): @@ -440,7 +442,7 @@ def find_spec(self, relative=False): if fname.endswith('.spec'): return os.path.join(specdir, fname) - raise SanityError, 'No spec file in checkout: %s' % self.checkout + raise SanityError('No spec file in checkout: %s' % self.checkout) def duplicate_check(self): """Check to see if we're already on remote""" @@ -485,7 +487,7 @@ def get_state(self): def set_state(self, state): if not self.workdir: - raise SanityError, "No workdir to set state for" + raise SanityError("No workdir to set state for") statefile = os.path.join(self.workdir, 'state') self.logger.debug('Writing state %s to file %s', state, statefile) fobj = open(statefile, 'w') @@ -684,7 +686,7 @@ def make_workdir(self): self.workdir = dirname = self.get_workdir() koji.ensuredir(os.path.dirname(self.workdir)) if os.path.islink(dirname): - raise SanityError, "%s is a symlink" % dirname + raise SanityError("%s is a symlink" % dirname) elif os.path.isdir(dirname): # TODO - more sanity checks self.set_in_progress() @@ -707,7 +709,7 @@ def make_workdir(self): will overwrite.", dirname, state) shutil.rmtree(dirname) elif os.path.exists(dirname): - raise SanityError, "%s exists and is not a directory" % dirname + raise SanityError("%s exists and is not a directory" % dirname) self.logger.info('Creating working directory: %s', dirname) koji.ensuredir(dirname) self.set_in_progress() @@ -1019,7 +1021,7 @@ def copy_to_lookaside(self, path, digest): if st1.st_size != st2.st_size: self.logger.error("Possibly corrupt lookaside entry: %s", lpath) self.logger.error("Size: %s, but current matching source is %s", st1.st_size, st2.st_size) - raise SanityError, "Lookaside size mismatch" + raise SanityError("Lookaside size mismatch") # TODO - more sanity checks self.logger.info('Skipping source, already in digest: %s', path) @@ -1030,7 +1032,7 @@ def debrand(self): if not self.options.config['debrand']: self.logger.warning("Debranding is disabled") return - confp = ConfigParser.RawConfigParser() + confp = configparser.RawConfigParser() for name in 'altsrc-global', self.package: cfile = os.path.join(self.options.config['rulesdir'], name + '.cfg') self.logger.debug('Looking for rules in %s', cfile) @@ -1063,7 +1065,7 @@ def debrand(self): for key, rtype, section in rules: handler = 'rule_handler_%s' % rtype if not hasattr(self, handler): - raise ConfigError, "No handler for rule type %s" % rtype + raise ConfigError("No handler for rule type %s" % rtype) data = dict(confp.items(section)) if 'enabled' in data: enabled = data['enabled'].lower().strip() @@ -1142,7 +1144,7 @@ def debrand(self): cmd = ['git', 'diff', '--cached', '--name-only'] output, _ = self.get_output(cmd, cwd=self.checkout, stderr='keep', fatal=False) if not output: - raise SanityError, "Debranding rules made no changes" + raise SanityError("Debranding rules made no changes") # caller will clean up cmd = self.git_base_cmd() @@ -1302,13 +1304,13 @@ def rule_handler_mmd(self, data): if isinstance(current_data, list): gen = enumerate(current_data) elif isinstance(current_data, dict): - gen = current_data.items() + gen = list(current_data.items()) if gen: for key, val in gen: if re.match(matching_path[0], str(key)): stack.append((val, current_data, key, matching_path[1:])) - if isinstance(current_data, basestring): + if isinstance(current_data, six.string_types): replaced = re.sub(matching_path[0], data['replace'], current_data) @@ -1396,7 +1398,7 @@ def handle_add_patch(self, data): # http://www.rpm.org/max-rpm/s1-rpm-inside-tags.html if patchnum == pnum: self.logger.error("Patch %s already present: %s", patchnum, line) - raise SanityError, "Duplicate patch number" + raise SanityError("Duplicate patch number") elif alt_re.search(line): l_alt = lineno if patchnum == -1: @@ -1435,7 +1437,7 @@ def handle_add_patch(self, data): lines.insert(lnum + 1, entry) self.logger.debug("Inserting spec line: %i: %s", lnum+1, entry) else: - raise SanityError, "Unable to apply patch %s" % patchname + raise SanityError("Unable to apply patch %s" % patchname) elif data.get('apply', 'y').lower() in ('y', 'yes', '1', 'true'): entry = "%%patch%d -p%d\n" % (patchnum, patchstrip) apply_re = re.compile(r'^\s*%patch\d+\s+-p\d') @@ -1463,7 +1465,7 @@ def handle_add_patch(self, data): lines.insert(lsetup + 1, entry) self.logger.debug("Inserting spec line: %i: %s", lsetup+1, entry) else: - raise SanityError, "Unable to apply patch %s" % patchname + raise SanityError("Unable to apply patch %s" % patchname) # write it back out fobj = file(specfile, 'w') @@ -1493,7 +1495,7 @@ def handle_rm_patch(self, data): patch_re = re.compile(r'^\s*[pP]atch(' + str(patchnum) + r'):\s+(\S+?)\s*$') else: self.logger.error('No patch specified for removal') - raise SanityError, 'Invalid rule' + raise SanityError('Invalid rule') specfile = self.find_spec() fobj = file(specfile, 'r') @@ -1510,7 +1512,7 @@ def handle_rm_patch(self, data): break else: self.logger.error("No match for pattern: %r", patch_re.pattern) - raise SanityError, "Could not find patch to remove" + raise SanityError("Could not find patch to remove") # remove the matching line if lineno: del lines[lineno] @@ -1531,7 +1533,7 @@ def handle_rm_patch(self, data): self.logger.warning('Patch %s appears to be applied by %%autosetup', patchname) else: self.logger.error('No %%patch line for patch %s', patchname) - raise SanityError, "Unable to remove patch" + raise SanityError("Unable to remove patch") # write it back out fobj = file(specfile, 'w') @@ -1549,7 +1551,7 @@ def rule_handler_source(self, data): #XXX support del? else: self.logger.error('Unknown source rule method: %s', method) - raise ConfigError, 'Invalid method in source rule' + raise ConfigError('Invalid method in source rule') def handle_add_source(self, data): """Add a source entry in spec file""" @@ -1576,7 +1578,7 @@ def handle_add_source(self, data): # http://www.rpm.org/max-rpm/s1-rpm-inside-tags.html if sourcenum == snum: self.logger.error("Source %s already present: %s", sourcenum, line) - raise SanityError, "Duplicate source number" + raise SanityError("Duplicate source number") elif name_re.search(line): lname = lineno if sourcenum == -1: @@ -1626,7 +1628,7 @@ def handle_replace_source(self, data): break else: self.logger.error('Could not find source, no match for %r', source_re.pattern) - raise SanityError, 'No such source' + raise SanityError('No such source') # ... and replace it entry = "%s%s\n" % (head, sourcename) lines[lnum] = entry @@ -1646,7 +1648,7 @@ def rule_handler_script(self, data): fname = data['script'] script = os.path.join(self.options.config['rulesdir'], fname) if not os.path.isfile(script): - raise ConfigError, 'Script missing: %s' % script + raise ConfigError('Script missing: %s' % script) cmd = [script, self.checkout, self.find_spec()] self.log_cmd(cmd, cwd=self.checkout) @@ -1698,9 +1700,9 @@ def check_workdir(self): self.checkout = os.path.join(self.workdir, "checkout") self.logger.info('Checking working directory: %s', dirname) if os.path.islink(dirname): - raise SanityError, "%s is a symlink" % dirname + raise SanityError("%s is a symlink" % dirname) if not os.path.isdir(dirname): - raise SanityError, "Not staged. No such directory: %s" % dirname + raise SanityError("Not staged. No such directory: %s" % dirname) state = self.get_state() if state == 'UNTAGGED': if self.options.config['push_tags']: @@ -1713,7 +1715,7 @@ def check_workdir(self): self.logger.warn('Already pushed') return state if state != 'STAGED': - raise SanityError, "Staging incomplete" + raise SanityError("Staging incomplete") return state def add_changelog(self): @@ -1749,7 +1751,7 @@ def add_changelog(self): return elif len(parts) == 2: # should not be possible - raise SanityError, 'Unable to split changelog from spec' + raise SanityError('Unable to split changelog from spec') outf = file(spec, 'w') for part in parts[:2]: outf.write(part) @@ -1859,7 +1861,7 @@ def push_git(self, state): cmd = ['git', 'diff', '--cached', '--name-only'] output, _ = self.get_output(cmd, cwd=self.checkout, stderr='keep', fatal=False) if not output: - raise SanityError, "Debranding commits resulted in no changes?" + raise SanityError("Debranding commits resulted in no changes?") #commit cmd = self.git_base_cmd() cmd.extend(['commit', '-m', 'debrand %s' % self.nvr]) @@ -1973,7 +1975,7 @@ def explode_srpm(srpm, destdir=None, logfile=None): if header[rpm.RPMTAG_SOURCEPACKAGE] != 1: # we checked this earlier, but since we're about to rpm -i it, # let's check again - raise SanityError, "%s is not a source package" % srpm + raise SanityError("%s is not a source package" % srpm) if destdir is None: destdir = os.getcwd() else: @@ -1988,7 +1990,7 @@ def explode_srpm(srpm, destdir=None, logfile=None): proc = subprocess.Popen(cmd, **popts) ret = proc.wait() if ret: - raise CommandError, "command failed: %r" % cmd + raise CommandError("command failed: %r" % cmd) def wipe_git_dir(dirname): @@ -2003,7 +2005,7 @@ def wipe_git_dir(dirname): def die(msg): - print msg + print(msg) sys.exit(1) diff --git a/requirements.txt b/requirements.txt index 0b715c0..bacc8b9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,3 +3,4 @@ rpm-py-installer requests PyYAML simplejson +six diff --git a/test-requirements.txt b/test-requirements.txt index 3e979d7..4f89b8b 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,7 +1,6 @@ mock PyHamcrest configparser -six > 1.10 # koji 1.20 doesn't include it # as dependency From a9098f89cf55038ab8adfa2915eda40536fd44ed Mon Sep 17 00:00:00 2001 From: Lubomir Gallovic Date: Wed, 15 Apr 2020 16:12:17 +0200 Subject: [PATCH 2/2] Fix Python 3 testcases [CLOUDDST-27] After the automatically detected problems were fixed, the following issues had to be resolved to ensure that the whole test suite passes in Python 3: - in testcase 'test_get_state_with_error_other_than_enoent', '__builtin__' was changed to 'six.moves.builtins' because the built-in library was renamed in Python 3. Also, the testcase itself had been broken because it had raised an IOError due to an incorrect setup (file was assigned as a directory) and not because of the patched 'open' function. It was fixed despite incidentally fulfilling its desired function in order to prevent user confusion. - In Python 3, output for invoked bash commands is a bytes type, which can cause many issues because the code expects to work with a string. Adding the 'universal_newlines' forces the output to be returned as a string. Also, print statements were changed to logger messages. --- alt_src/alt_src.py | 73 +++++++++++++++++++++---------------------- setup.py | 1 + tests/test_debrand.py | 15 +-------- tests/test_rpms.py | 51 +++++------------------------- 4 files changed, 46 insertions(+), 94 deletions(-) diff --git a/alt_src/alt_src.py b/alt_src/alt_src.py index 37ff7a8..430c740 100644 --- a/alt_src/alt_src.py +++ b/alt_src/alt_src.py @@ -2,12 +2,8 @@ ''' Given an srpm and product, stage for alt-src release ''' -from __future__ import print_function -import six -from six.moves import configparser import copy -from six.moves import cStringIO as StringIO import datetime import errno import fcntl @@ -19,15 +15,18 @@ import os.path import re import shutil +import simplejson as json +import six +from six.moves import configparser +from six.moves import cStringIO as StringIO +from six.moves.urllib.parse import urlencode +from six.moves.urllib.request import Request, urlopen import smtplib import subprocess import sys import time import traceback -from six.moves.urllib.parse import urlencode -from six.moves.urllib.request import Request, urlopen import yaml -import simplejson as json import koji import rpm @@ -206,7 +205,7 @@ def setup_logfile(self, logname): if ntimes > 1024: raise SanityError("Too many log backups") fname = "%s.%d" % (fname_, ntimes) - self.logfile = file(fname, 'w') + self.logfile = open(fname, 'w') handler = logging.StreamHandler(self.logfile) handler.setFormatter(logging.Formatter(self.options.config['log_format'])) handler.setLevel(self.options.file_log_level) @@ -265,13 +264,13 @@ def get_output(self, cmd, fatal=True, **kwargs): if 'stderr' in kwargs: # convenience values if kwargs['stderr'] == 'null': - kwargs['stderr'] = file('/dev/null', 'w') + kwargs['stderr'] = open('/dev/null', 'w') elif kwargs['stderr'] == 'keep': kwargs['stderr'] = subprocess.STDOUT elif self.logfile: self.logfile.flush() kwargs['stderr'] = self.logfile - proc = subprocess.Popen(cmd, **kwargs) + proc = subprocess.Popen(cmd, universal_newlines=True, **kwargs) output = proc.communicate()[0] self.logger.debug("Command output was:\n%s", output) retval = proc.wait() @@ -377,7 +376,7 @@ def check_package(self): self.logger.debug("Got blacklist: %r", blacklist) if blacklist and koji.util.multi_fnmatch(self.package, blacklist): # raise FilterError, 'Blacklisted package: %s' % self.package - print('Blacklisted package: %s, quitting' % self.package) + self.logger.info('Blacklisted package: %s, quitting' % self.package) sys.exit(0) def git_push_url(self): @@ -805,7 +804,7 @@ def init_new_repo(self): cmd = ['git', 'clone', '--bare', initdir, "repo_init.git"] self.log_cmd(cmd, cwd=self.workdir) descfile = os.path.join(self.workdir, "repo_init.git", "description") - fobj = file(descfile, 'w') + fobj = open(descfile, 'w') fobj.write(self.summary) fobj.write('\n') fobj.close() @@ -813,7 +812,7 @@ def init_new_repo(self): # add gitblit options to git config # XXX this content should not be hard coded git_config = os.path.join(self.workdir, "repo_init.git", "config") - fobj = file(git_config, 'a') + fobj = open(git_config, 'a') params = { 'summary' : self.summary, 'package' : self.package, @@ -950,8 +949,8 @@ def import_sources(self): to_move.sort() # move files to lookaside - meta = file(os.path.join(dst, ".%s.metadata" % self.package), 'w') - gitignore = file(os.path.join(dst, ".gitignore"), 'w') + meta = open(os.path.join(dst, ".%s.metadata" % self.package), 'w') + gitignore = open(os.path.join(dst, ".gitignore"), 'w') for fname in to_move: path = os.path.join(sourcedir, fname) digest = self.get_digest(path) @@ -997,7 +996,7 @@ def get_digest(self, path): """Calculate hex digest for file""" csum = hashlib.sha1() - fobj = file(path, 'rb') + fobj = open(path, 'rb') chunk = 'IGNORE ME!' while chunk: chunk = fobj.read(8192) @@ -1118,8 +1117,8 @@ def debrand(self): if self.for_lookaside(path): for_lookaside.append(fname) if for_lookaside: - meta = file(os.path.join(self.checkout, ".%s.metadata" % self.package), 'a') - gitignore = file(os.path.join(self.checkout, ".gitignore"), 'a') + meta = open(os.path.join(self.checkout, ".%s.metadata" % self.package), 'a') + gitignore = open(os.path.join(self.checkout, ".gitignore"), 'a') for fname in for_lookaside: path = os.path.join(self.checkout, fname) digest = self.get_digest(path) @@ -1167,7 +1166,7 @@ def handle_debrand_fail(self): self.logger.warning("Adding debranding failure notice") fname = os.path.join(self.checkout, "README.debrand") - fobj = file(fname, 'w') + fobj = open(fname, 'w') fobj.write('''\ Warning: This package was configured for automatic debranding, but the changes failed to apply. @@ -1202,7 +1201,7 @@ def prep_changelog(self, notes): parts.append('- %s\n' % line) else: parts.append('- %s\n' % line) - fobj = file(os.path.join(self.workdir, 'changelog.txt'), 'w') + fobj = open(os.path.join(self.workdir, 'changelog.txt'), 'w') for part in parts: fobj.write(part) self.logger.debug("%s", part) @@ -1279,7 +1278,7 @@ def rule_handler_re(self, data): fname = os.path.join(self.checkout, data['file']) else: fname = self.find_spec() - fobj = file(fname, 'r') + fobj = open(fname, 'r') text = fobj.read() fobj.close() count = int(data.get('count', '0')) @@ -1287,7 +1286,7 @@ def rule_handler_re(self, data): text = re.sub(data['match'], data['replace'], text, count) else: text = re.sub(data['match'], data['replace'], text) - fobj = file(fname, 'w') + fobj = open(fname, 'w') fobj.write(text) fobj.close() @@ -1329,7 +1328,7 @@ def handle_re_line(self, data): else: fname = self.find_spec() self.logger.info('Applying regex substitutions to %s', fname) - fobj = file(fname, 'r') + fobj = open(fname, 'r') lines = fobj.readlines() fobj.close() prog = re.compile(data['match']) @@ -1353,7 +1352,7 @@ def handle_re_line(self, data): else: self.logger.error('No matches for pattern %r', prog.pattern) # write it back out - fobj = file(fname, 'w') + fobj = open(fname, 'w') fobj.writelines(lines) fobj.close() @@ -1376,7 +1375,7 @@ def handle_add_patch(self, data): patchstrip = int(data.get('strip', '1')) self.logger.debug("Adding patch: %r", data) specfile = self.find_spec() - fobj = file(specfile, 'r') + fobj = open(specfile, 'r') lines = fobj.readlines() fobj.close() # find highest patch number and last patch line location @@ -1468,7 +1467,7 @@ def handle_add_patch(self, data): raise SanityError("Unable to apply patch %s" % patchname) # write it back out - fobj = file(specfile, 'w') + fobj = open(specfile, 'w') fobj.writelines(lines) fobj.close() @@ -1498,7 +1497,7 @@ def handle_rm_patch(self, data): raise SanityError('Invalid rule') specfile = self.find_spec() - fobj = file(specfile, 'r') + fobj = open(specfile, 'r') lines = fobj.readlines() fobj.close() @@ -1536,7 +1535,7 @@ def handle_rm_patch(self, data): raise SanityError("Unable to remove patch") # write it back out - fobj = file(specfile, 'w') + fobj = open(specfile, 'w') fobj.writelines(lines) fobj.close() @@ -1560,7 +1559,7 @@ def handle_add_source(self, data): sourcefile = os.path.join(self.options.config['rulesdir'], data['source']) sourcename = os.path.basename(sourcefile) specfile = self.find_spec() - fobj = file(specfile, 'r') + fobj = open(specfile, 'r') lines = fobj.readlines() fobj.close() @@ -1598,7 +1597,7 @@ def handle_add_source(self, data): self.copy_new_source(sourcefile) # write it back out - fobj = file(specfile, 'w') + fobj = open(specfile, 'w') fobj.writelines(lines) fobj.close() @@ -1608,7 +1607,7 @@ def handle_replace_source(self, data): sourcefile = os.path.join(self.options.config['rulesdir'], data['source']) sourcename = os.path.basename(sourcefile) specfile = self.find_spec() - fobj = file(specfile, 'r') + fobj = open(specfile, 'r') lines = fobj.readlines() fobj.close() @@ -1639,7 +1638,7 @@ def handle_replace_source(self, data): #TODO - option to remove old # write it back out - fobj = file(specfile, 'w') + fobj = open(specfile, 'w') fobj.writelines(lines) fobj.close() @@ -1732,7 +1731,7 @@ def add_changelog(self): self.log_cmd(['git', 'checkout', stage_branch], cwd=self.checkout) # get the changelog entry - fobj = file(fname, 'r') + fobj = open(fname, 'r') clog = fobj.read() now = datetime.datetime.now().strftime('%a %b %d %Y') if clog.find('INSERT_DATE_HERE') == -1: @@ -1743,7 +1742,7 @@ def add_changelog(self): # insert the entry into spec spec = self.find_spec() prog = re.compile(r'^(\s*%changelog.*)$', re.MULTILINE) - inf = file(spec, 'r') + inf = open(spec, 'r') parts = prog.split(inf.read()) inf.close() if len(parts) == 1: @@ -1752,7 +1751,7 @@ def add_changelog(self): elif len(parts) == 2: # should not be possible raise SanityError('Unable to split changelog from spec') - outf = file(spec, 'w') + outf = open(spec, 'w') for part in parts[:2]: outf.write(part) outf.write('\n') @@ -1924,7 +1923,7 @@ def init_pagure_remote_repo(self): def push_lookaside(self): - meta = file(os.path.join(self.checkout, ".%s.metadata" % self.package), 'r') + meta = open(os.path.join(self.checkout, ".%s.metadata" % self.package), 'r') for line in meta.readlines(): line = line.strip() digest, _ = line.split(None, 1) @@ -2005,7 +2004,7 @@ def wipe_git_dir(dirname): def die(msg): - print(msg) + self.logger.error(msg) sys.exit(1) diff --git a/setup.py b/setup.py index 4c21c8a..8024fe1 100755 --- a/setup.py +++ b/setup.py @@ -37,6 +37,7 @@ def get_long_description(): "Programming Language :: Python :: 2.4", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", ], install_requires=get_requirements(), diff --git a/tests/test_debrand.py b/tests/test_debrand.py index e80121a..47fd5df 100644 --- a/tests/test_debrand.py +++ b/tests/test_debrand.py @@ -8,12 +8,7 @@ import pytest import yaml - -# ensure python2 before attempting to import sources -if sys.version_info < (3, 0): - from alt_src.alt_src import Stager - -xfail = pytest.mark.xfail(sys.version_info >= (3, 0), reason='Incompatible with python3') +from alt_src.alt_src import Stager TESTS_PATH = os.path.dirname(__file__) MODULES_PATH = os.path.join(TESTS_PATH, 'data', 'module_source') @@ -294,7 +289,6 @@ def spec_file(request, checkout_dir): yield fname -@xfail(strict=True) @pytest.fixture def stager_setup(request, read_source, rule_cfg, options, checkout_dir, spec_file, work_dir, rules_dir): @@ -323,7 +317,6 @@ def stager_setup_mmd_params(modname, branch, rule_cfg): {'fname': modname}) -@xfail(strict=True) @pytest.mark.parametrize( 'stager_setup,options,rule_cfg,spec_file,expected', [stager_setup_params('foo-package', 'test-b', spec_rule_config) + @@ -349,7 +342,6 @@ def test_rule_spec(stager_setup, expected): assert stager.log_cmd.mock_calls[0] == call(expected_cmd, cwd=stager.checkout) -@xfail(strict=True) @pytest.mark.parametrize( 'stager_setup,options,rule_cfg,spec_file,expected', [stager_setup_params('foo-package', 'test-b', re_rule_config) + @@ -375,7 +367,6 @@ def test_rule_re(stager_setup, expected): print(spec_words) assert spec_words.count(word) == count -@xfail(strict=True) @pytest.mark.parametrize('stager_setup,options,rule_cfg,spec_file,expected', [stager_setup_params('foo-package', 'test-b', patch_rule_config_add) + (contains_string('Patch2: foo.patch'),), @@ -401,7 +392,6 @@ def test_rule_patch(stager_setup, expected): assert_that(spec, expected) -@xfail(strict=True) @pytest.mark.parametrize( 'stager_setup,options,rule_cfg,spec_file,expected', [stager_setup_params('foo-package', 'test-b', source_rule_config_add) + @@ -428,7 +418,6 @@ def test_rule_source(stager_setup, expected): assert_that(spec, expected) -@xfail(strict=True) @pytest.mark.parametrize('stager_setup,options,rule_cfg,spec_file,expected', [stager_setup_params('foo-package', 'test-b', script_rule_config) + (("{rules_dir}/some-script {checkout_dir} {spec_file}",),), @@ -449,7 +438,6 @@ def test_rule_script(stager_setup, expected): expected_cmd.append(e) assert stager.log_cmd.mock_calls[0] == call(expected_cmd, cwd=stager.checkout) -@xfail(strict=True) @pytest.mark.parametrize('stager_setup,options,rule_cfg,spec_file,expected', [stager_setup_mmd_params('postgresql', 'test-b', mmd_rule_config) + (contains_string('ref: stream-centos-9.6'),), @@ -473,7 +461,6 @@ def test_rule_mmd(stager_setup, expected): assert_that(mmd, expected) -@xfail(strict=True) @pytest.mark.parametrize('stager_setup,options,rule_cfg,spec_file', [stager_setup_mmd_params('postgresql', 'test-b', mmd_rule_config2)], ids=['mmd-replace-no-change'], diff --git a/tests/test_rpms.py b/tests/test_rpms.py index 2bda7f6..f4eb248 100644 --- a/tests/test_rpms.py +++ b/tests/test_rpms.py @@ -9,19 +9,16 @@ from subprocess import PIPE, Popen, check_call, check_output import pytest import yaml +import six from configparser import RawConfigParser from hamcrest import assert_that, calling, empty, equal_to, not_, raises from mock import MagicMock, call, patch from .matchers import exits -# ensure python2 before attempting to import sources -if sys.version_info < (3, 0): - from alt_src.alt_src import (main, BaseProcessor, acquire_lock, StartupError, - SanityError, InputError, CONFIG_DEFAULTS, Stager, Pusher, - CommandError) - -xfail = pytest.mark.xfail(sys.version_info >= (3, 0), reason="Incompatible with python3") +from alt_src.alt_src import (main, BaseProcessor, acquire_lock, StartupError, + SanityError, InputError, CONFIG_DEFAULTS, Stager, Pusher, + CommandError) TESTS_PATH = os.path.dirname(__file__) RPMS_PATH = os.path.join(TESTS_PATH, 'data', 'rpms') @@ -135,7 +132,7 @@ def mock_koji_pathinfo(): def git_subject(git_dir, ref): """Return subject of a git ref within the given path.""" cmd = ['git', 'show', '-s', '--format=%s', ref] - proc = Popen(cmd, cwd=git_dir, stdout=PIPE) + proc = Popen(cmd, cwd=git_dir, stdout=PIPE, universal_newlines=True) out, _ = proc.communicate() assert_that(proc.returncode, equal_to(0), "`git show' failed") @@ -177,7 +174,6 @@ def get_test_mmd_str_and_dict(): return mmd_str, mmd_dict['data'] -@xfail(strict=True) @pytest.mark.parametrize('branch,name,version,release', [ ('c7', 'grub2', '2.02', '0.64.el7'), ('c7', 'ntp', '4.2.6p5', '25.el7_3.2'), @@ -217,7 +213,6 @@ def test_push_with_debrand(config_file, pushdir, lookasidedir, remove_handlers() -@xfail(strict=True) @pytest.mark.parametrize('branch,name,version,release', [ ('c7', 'grub2', '2.02', '0.64.el7'), ('c7', 'ntp', '4.2.6p5', '25.el7_3.2'), @@ -270,7 +265,6 @@ def test_repush_with_staged_data(config_file, pushdir, lookasidedir, assert_that(files, not_(empty())) -@xfail(strict=True) @pytest.mark.parametrize('branch,name,version,release', [ ('c7', 'grub2', '2.02', '0.64.el7'), ('c7', 'ntp', '4.2.6p5', '25.el7_3.2'), @@ -333,7 +327,6 @@ def test_repush_without_staged_data(config_file, pushdir, lookasidedir, assert_that(files, not_(empty())) -@xfail(strict=True) @pytest.mark.parametrize('branch,name,version,release', [ ('c7', 'fake', '1.1', '22'), # need no-debrand packages for this test @@ -380,7 +373,6 @@ def test_repush_without_tag(config_file, pushdir, lookasidedir, branch, assert dupwarn -@xfail(strict=True) @pytest.mark.parametrize('branch,name,version,release', [ ('c7', 'grub2', '2.02', '0.64.el7'), ]) @@ -440,7 +432,6 @@ def test_push_with_existing_local_tag(config_file, pushdir, lookasidedir, remove_handlers() -@xfail(strict=True) def test_repush_with_state_init(config_file, pushdir, lookasidedir, default_config, capsys): rpm = 'grub2-2.02-0.64.el7.src.rpm' @@ -478,7 +469,6 @@ def test_repush_with_state_init(config_file, pushdir, lookasidedir, default_conf assert_that(files, not_(empty())) -@xfail(strict=True) def test_repush_with_state_none(config_file, lookasidedir, capsys): """ set_state fails, state file is not created, so get_state fails to open @@ -522,19 +512,16 @@ def test_repush_with_state_none(config_file, lookasidedir, capsys): assert_that(files, not_(empty())) -@xfail(strict=True) def test_get_state_with_error_other_than_enoent(tempdir): options = MagicMock(koji=None, source=tempfile.mkstemp(dir=tempdir)[1]) processor = BaseProcessor(options) - processor.workdir = tempfile.mkstemp(dir=tempdir)[1] - + processor.workdir = tempdir # attempting to open state file raises generic IOError - with patch('__builtin__.open', autospec=True, side_effect=IOError): + with patch('six.moves.builtins.open', autospec=True, side_effect=IOError): # error is raised by method because only ENOENT is handled assert_that(calling(processor.get_state), raises(IOError)) -@xfail(strict=True) def test_repush_with_state_staged(config_file, pushdir, lookasidedir, default_config, capsys): rpm = 'grub2-2.02-0.64.el7.src.rpm' @@ -577,7 +564,6 @@ def test_repush_with_state_staged(config_file, pushdir, lookasidedir, default_co assert_that(files, not_(empty())) -@xfail(strict=True) def test_log_cmd_with_retries(capsys): mock_options = MagicMock(koji=False) @@ -602,7 +588,6 @@ def test_log_cmd_with_retries(capsys): assert expected in err -@xfail(strict=True) @pytest.mark.parametrize('cmd, expected', [(['git', 'clone', 'some_git_url'], 4), (['rsync', 'src', 'dst'], 4), (['echo', 'foo'], 1) @@ -619,7 +604,6 @@ def test_default_tries(cmd, expected): assert processor.default_tries(cmd) == expected -@xfail(strict=True) def test_push_when_already_pushed(config_file, lookasidedir, default_config, capsys): """ test if the same content has already pushed to remote, @@ -657,7 +641,7 @@ def test_push_when_already_pushed(config_file, lookasidedir, default_config, cap # check if both tags are in the remote repo git_url = default_config['git_push_url'] % {'package':'rcm-repoquery'} cmd = ['git', 'tag'] - out = check_output(cmd, cwd=git_url) + out = check_output(cmd, cwd=git_url, universal_newlines=True) assert sorted(out.splitlines()) == ['imports/c7/rcm-repoquery-1.4-1.bar', 'imports/c7/rcm-repoquery-1.4-1.foo'] @@ -667,7 +651,6 @@ def test_push_when_already_pushed(config_file, lookasidedir, default_config, cap assert_that(files, not_(empty())) -@xfail(strict=True) def test_acquire_release_lock(tempdir): # test lock and relase file lock function works as expected logger = logging.getLogger('altsrc') @@ -708,7 +691,6 @@ def test_acquire_release_lock(tempdir): remove_handlers() -@xfail(strict=True) def test_stage_only(config_file, pushdir, capsys): """ test a task without push option @@ -730,7 +712,6 @@ def test_stage_only(config_file, pushdir, capsys): remove_handlers() -@xfail(strict=True) def test_stage_repo_no_master(config_file, pushdir, capsys, default_config): """ check staging on new branch in repo having no master branch @@ -786,7 +767,6 @@ def log_cmd(cmd, fatal=True, **kwargs): remove_handlers() -@xfail(strict=True) def test_not_existing_source_file(config_file): rpm = 'foo.src.rpm' @@ -800,7 +780,6 @@ def test_not_existing_source_file(config_file): remove_handlers() -@xfail(strict=True) def test_srpm_koji(mock_koji_session, mock_koji_pathinfo): mock_koji_session.return_value.getRPM.return_value = {'arch': 'src', 'build_id': 42} mock_koji_pathinfo.return_value.build.return_value = "test_build" @@ -812,7 +791,6 @@ def test_srpm_koji(mock_koji_session, mock_koji_pathinfo): assert_that(processor.source_file, equal_to("test_build/test_relpath")) -@xfail(strict=True) @pytest.mark.parametrize('getRPM_return_value', [{'arch': 'foo'}, None], ids=("wrong_arch", "source_not_found")) @@ -825,7 +803,6 @@ def test_srpm_koji_sanity_error(getRPM_return_value, mock_koji_session, mock_koj assert_that(calling(BaseProcessor).with_args(mock_options), raises(SanityError)) -@xfail(strict=True) def test_module_src_koji(mock_koji_session, mock_koji_pathinfo): binfo = {'extra': {'typeinfo': {'module': {'modulemd_str': "foo_module_str"}}}} mock_koji_session.return_value.getBuild.return_value = binfo @@ -838,7 +815,6 @@ def test_module_src_koji(mock_koji_session, mock_koji_pathinfo): assert_that(processor.mmd, equal_to("foo_module_str")) -@xfail(strict=True) def test_module_src_koji_build_not_found(mock_koji_session, mock_koji_pathinfo): mock_koji_session.return_value.getBuild.return_value = None mock_koji_pathinfo.return_value.build.return_value = "test_build" @@ -847,7 +823,6 @@ def test_module_src_koji_build_not_found(mock_koji_session, mock_koji_pathinfo): assert_that(calling(BaseProcessor).with_args(mock_options), raises(SanityError)) -@xfail(strict=True) def test_read_srpm_input_error(mock_koji_session, mock_koji_pathinfo): mock_koji_session.return_value.getRPM.return_value = {'arch': 'src', 'build_id': 42} mock_koji_pathinfo.return_value.build.return_value = "test_build" @@ -860,7 +835,6 @@ def test_read_srpm_input_error(mock_koji_session, mock_koji_pathinfo): assert_that(calling(processor.read_srpm), raises(InputError)) -@xfail(strict=True) def test_read_mmd_str(mock_koji_session, mock_koji_pathinfo): mmd_str, mmd_dict = get_test_mmd_str_and_dict() @@ -878,7 +852,6 @@ def test_read_mmd_str(mock_koji_session, mock_koji_pathinfo): assert_that(processor.summary, equal_to(mmd_dict['summary'])) -@xfail(strict=True) def test_mmd_no_changelog(mock_koji_session, mock_koji_pathinfo): mmd_str, mmd_dict = get_test_mmd_str_and_dict() mock_koji_pathinfo.return_value.rpm.return_value = "test_relpath" @@ -897,7 +870,6 @@ def test_mmd_no_changelog(mock_koji_session, mock_koji_pathinfo): assert_that(processor.package, equal_to(mmd_dict['name'])) -@xfail(strict=True) def test_git_url_module(mock_koji_session, mock_koji_pathinfo): mmd_str, mmd_dict = get_test_mmd_str_and_dict() binfo = {'extra': {'typeinfo': {'module': {'modulemd_str': mmd_str}}}} @@ -920,13 +892,11 @@ def test_git_url_module(mock_koji_session, mock_koji_pathinfo): % {'package': mmd_dict['name']})) -@xfail(strict=True) def test_unsupported_source_startup_error(): mock_options = MagicMock(koji=True, source="build_nvr.src.foo") assert_that(calling(BaseProcessor).with_args(mock_options), raises(StartupError)) -@xfail(strict=True) def test_stage_module_src(config_file, pushdir, lookasidedir, capsys, default_config, mock_koji_session, mock_koji_pathinfo): """Verify that alt-src command completes without any errors and generates @@ -964,7 +934,6 @@ def test_stage_module_src(config_file, pushdir, lookasidedir, capsys, default_co remove_handlers() -@xfail(strict=True) def test_push_to_pagure(config_file, key_file, pushdir, lookasidedir, capsys): rpm = 'grub2-2.02-0.64.el7.src.rpm' @@ -1003,7 +972,6 @@ def side_eff(): remove_handlers() -@xfail(strict=True) def test_push_module_to_pagure(config_file, key_file, pushdir, capsys, mock_koji_session, mock_koji_pathinfo): """ verifies modules are pushed to pagure repo without any error """ @@ -1044,7 +1012,6 @@ def side_eff(): assert_that(len(err), equal_to(0)) -@xfail(strict=True) @pytest.mark.parametrize('cmd_args,package,expected_extra_dir', [ ([os.path.join(RPMS_PATH, 'grub2-2.02-0.64.el7.src.rpm')], 'grub2', 'rpms'), (['--koji', 'fake-nvr:modulemd.src.txt'], 'my_package', 'modules'), @@ -1108,7 +1075,6 @@ def side_eff(): assert missing_repo_str in std assert "Initializing new repo:" in std -@xfail(strict=True) @pytest.mark.parametrize('cmd_args,package,expected_extra_dir', [ ([os.path.join(RPMS_PATH, 'grub2-2.02-0.64.el7.src.rpm')], 'grub2', 'rpms'), (['--koji', 'fake-nvr:modulemd.src.txt'], 'my_package', 'modules'), @@ -1173,7 +1139,6 @@ def patched_get_output_sf(cmd, *args, **kwargs): assert "Initializing new repo:" not in std -@xfail(strict=True) def test_option_alias(config_file, pushdir, lookasidedir, default_config, capsys): rpm = 'grub2-2.02-0.64.el7'