Skip to content

Commit

Permalink
Prospector 1.4.1 fixes
Browse files Browse the repository at this point in the history
Major changes:
- Return None in mount_single_layer function when something goes
  wrong. Do a check for the target_dir string existing before passing
  it on for analysis.
- Return None for DriverManager try-except calls when there is a
  NoMatches exception. This modifies all instantiations of Stevedore's
  DriverManager class.
- Iterate over the dictionary using .items() rather than just the plain
  dictionary.
- Use "with" to open files and subprocess pipes. This may result in
  certain errors not being raised, but it is hard to know at this
  point.

Minor changes:
- Used the "maxsplit" argument in __main__.py as we only need the
  first item.
- Used [] rather than list() to initialize lists.
- Specify encoding as 'utf-8' when opening files.
- Update re-raises to explicitly state which exception they are
  re-raising from using "from".
- Use "enumerate" for looping through list's index while throwing
  out the unused value.
- Suppressed the "useless-suppression" message along with
  "too-many-branches" suppression due to pylint bug:
  pylint-dev/pylint#2366
- Wraped some lines that were too long, except in the HTML format
  file due to the need to keep the literal HTML formatting.
- Updated the year on some modified files.

Signed-off-by: Nisha K <nishak@vmware.com>
  • Loading branch information
Nisha K committed Sep 1, 2021
1 parent 9bbb5dd commit 7c61a6d
Show file tree
Hide file tree
Showing 23 changed files with 72 additions and 62 deletions.
2 changes: 1 addition & 1 deletion tern/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def main():
"default option.")

# sys.version gives more information than we care to print
py_ver = sys.version.replace('\n', '').split('[')[0]
py_ver = sys.version.replace('\n', '').split('[', maxsplit=1)[0]
parser.add_argument('-v', '--version', action='version',
version="{ver_str}\n python version = {py_v}".format(
ver_str=get_version(), py_v=py_ver))
Expand Down
4 changes: 2 additions & 2 deletions tern/analyze/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def get_licenses_from_deb_copyright(deb_copyright):
3. returns a list of unique licenses found inside
the copyright text
'''
collected_paragraphs = list()
collected_paragraphs = []
pkg_licenses = set()
for paragraph in iter(debcon.get_paragraphs_data(deb_copyright)):
if 'license' in paragraph:
Expand All @@ -190,7 +190,7 @@ def get_deb_package_licenses(deb_copyrights):
Given a list of debian copyrights for the same number of packages,
returns a list package licenses for each of the packages
'''
deb_licenses = list()
deb_licenses = []
for deb_copyright in deb_copyrights:
deb_licenses.append(get_licenses_from_deb_copyright(deb_copyright))
return deb_licenses
Expand Down
6 changes: 3 additions & 3 deletions tern/analyze/default/command_lib/command_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,11 @@

# command library
command_lib = {'common': {}, 'base': {}, 'snippets': {}}
with open(os.path.abspath(common_file)) as f:
with open(os.path.abspath(common_file), encoding='utf-8') as f:
command_lib['common'] = yaml.safe_load(f)
with open(os.path.abspath(base_file)) as f:
with open(os.path.abspath(base_file), encoding='utf-8') as f:
command_lib['base'] = yaml.safe_load(f)
with open(os.path.abspath(snippet_file)) as f:
with open(os.path.abspath(snippet_file), encoding='utf-8') as f:
command_lib['snippets'] = yaml.safe_load(f)
# list of package information keys that the command library can accomodate
base_keys = {'names', 'versions', 'licenses', 'copyrights', 'proj_urls',
Expand Down
7 changes: 5 additions & 2 deletions tern/analyze/default/container/single_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def find_os_release(host_path):
return ''
etc_path = lib_path
# file exists at this point, try to read it
with open(etc_path, 'r') as f:
with open(etc_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
# Create dictionary from os-release values
os_release_dict = {}
Expand Down Expand Up @@ -106,9 +106,11 @@ def mount_first_layer(layer_obj):
except subprocess.CalledProcessError as e: # nosec
logger.critical("Cannot mount filesystem and/or device nodes: %s", e)
dcom.abort_analysis()
return None
except KeyboardInterrupt:
logger.critical(errors.keyboard_interrupt)
dcom.abort_analysis()
return None


def analyze_first_layer(image_obj, master_list, options):
Expand Down Expand Up @@ -155,7 +157,8 @@ def analyze_first_layer(image_obj, master_list, options):
# mount the first layer
target_dir = mount_first_layer(image_obj.layers[0])
# set the host path to the mount point
prereqs.host_path = target_dir
if target_dir:
prereqs.host_path = target_dir
# core default execution on the first layer
core.execute_base(image_obj.layers[0], prereqs)
# unmount
Expand Down
2 changes: 1 addition & 1 deletion tern/analyze/default/dockerfile/lock.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,5 +264,5 @@ def write_locked_dockerfile(dfile, destination=None):
file_name = destination
else:
file_name = constants.locked_dockerfile
with open(file_name, 'w') as f:
with open(file_name, 'w', encoding='utf-8') as f:
f.write(dfile)
6 changes: 3 additions & 3 deletions tern/analyze/default/dockerfile/parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def get_dockerfile_obj(dockerfile_name, prev_env=None):
previous stages in a multistage docker build. Should be a python dictionary
of the form {'ENV': 'value',...}'''
dfobj = Dockerfile()
with open(dockerfile_name) as f:
with open(dockerfile_name, encoding='utf-8') as f:
parser = DockerfileParser(parent_env=prev_env, fileobj=f)
dfobj.filepath = dockerfile_name
dfobj.structure = parser.structure
Expand Down Expand Up @@ -180,8 +180,8 @@ def expand_from_images(dfobj, image_list):
if command_dict['instruction'] in import_str:
dfobj.structure[index]['content'] = import_str + '\n'
else:
dfobj.structure[index]['content'] = command_dict['instruction'] + \
' ' + import_str + '\n'
dfobj.structure[index]['content'] = \
command_dict['instruction'] + ' ' + import_str + '\n'
image_count += 1


Expand Down
2 changes: 1 addition & 1 deletion tern/analyze/default/dockerfile/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def write_dockerfile_stages(dfobj):
for stage in stages:
stagefile = os.path.join(
filepath, '{}_{}'.format(filename, stages.index(stage) + 1))
with open(stagefile, 'w') as f:
with open(stagefile, 'w', encoding='utf-8') as f:
f.write(stage)
dockerfiles.append(stagefile)
return dockerfiles
Expand Down
6 changes: 3 additions & 3 deletions tern/analyze/default/live/collect.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def create_script(command, prereqs, method):
if method == 'host':
script = host_script.format(host_shell=prereqs.host_shell,
snip=command)
with open(script_path, 'w') as f:
with open(script_path, 'w', encoding='utf-8') as f:
f.write(script)
os.chmod(script_path, 0o700)
return script_path
Expand All @@ -54,8 +54,8 @@ def snippets_to_script(snippet_list):
final_list = []
for snippet in snippet_list:
# replace the escaped characters
for key in replace_dict:
snippet = re.sub(key, replace_dict[key], snippet)
for key, val in replace_dict.items():
snippet = re.sub(key, val, snippet)
final_list.append(snippet)
return " && ".join(final_list)

Expand Down
2 changes: 1 addition & 1 deletion tern/analyze/default/live/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def get_context_layers(reports, format_string):
)
return mgr.driver.consume_layer(reports)
except NoMatches:
pass
return None


def resolve_context_packages(layers):
Expand Down
4 changes: 2 additions & 2 deletions tern/analyze/passthrough.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def run_extension(image_obj, ext_string, redo=False):
)
return mgr.driver.execute(image_obj, redo)
except NoMatches:
pass
return None


def run_extension_layer(image_layer, ext_string, redo=False):
Expand All @@ -113,4 +113,4 @@ def run_extension_layer(image_layer, ext_string, redo=False):
)
return mgr.driver.execute_layer(image_layer, redo)
except NoMatches:
pass
return None
16 changes: 9 additions & 7 deletions tern/classes/docker_image.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2020 VMware, Inc. All Rights Reserved.
# Copyright (c) 2017-2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause

import json
Expand Down Expand Up @@ -62,7 +62,7 @@ def get_image_manifest(self):
an image inside, get a dict of the manifest.json file'''
temp_path = rootfs.get_working_dir()
with general.pushd(temp_path):
with open(manifest_file) as f:
with open(manifest_file, encoding='utf-8') as f:
json_obj = json.loads(f.read())
return json_obj

Expand Down Expand Up @@ -94,7 +94,7 @@ def get_image_config(self, manifest):
# manifest file
temp_path = rootfs.get_working_dir()
with general.pushd(temp_path):
with open(config_file) as f:
with open(config_file, encoding='utf-8') as f:
json_obj = json.loads(f.read())
return json_obj

Expand Down Expand Up @@ -151,9 +151,11 @@ def load_image(self, load_until_layer=0):
layer_count = 1
while layer_diffs and layer_paths:
layer = ImageLayer(layer_diffs.pop(0), layer_paths.pop(0))
# Only load metadata for the layers we need to report on according to the --layers command line option
# Only load metadata for the layers we need to report on
# according to the --layers command line option
# If --layers option is not present, load all the layers
if self.load_until_layer >= layer_count or self.load_until_layer == 0:
if (self.load_until_layer >= layer_count
or self.load_until_layer == 0):
layer.set_checksum(checksum_type, layer.diff_id)
layer.gen_fs_hash()
layer.layer_index = layer_count
Expand All @@ -166,9 +168,9 @@ def load_image(self, load_until_layer=0):
self._load_until_layer = 0
self.set_layer_created_by()
except NameError as e:
raise NameError(e)
raise NameError(e) from e
except subprocess.CalledProcessError as e:
raise subprocess.CalledProcessError(
e.returncode, cmd=e.cmd, output=e.output, stderr=e.stderr)
except IOError as e:
raise IOError(e)
raise IOError(e) from e
7 changes: 4 additions & 3 deletions tern/classes/file_data.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 VMware, Inc. All Rights Reserved.
# Copyright (c) 2020-2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause

import datetime
Expand Down Expand Up @@ -98,8 +98,9 @@ def date(self, date):
if date:
try:
datetime.datetime.strptime(date, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect date format, should be YYYY-MM-DD")
except ValueError as vr:
raise ValueError(
"Incorrect date format, should be YYYY-MM-DD") from vr
self.__date = date

@property
Expand Down
6 changes: 3 additions & 3 deletions tern/classes/image_layer.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2020 VMware, Inc. All Rights Reserved.
# Copyright (c) 2017-2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
import os
import re
Expand Down Expand Up @@ -241,7 +241,7 @@ def remove_file(self, file_path):
in the layer'''
rem_index = 0
success = False
for index in range(0, len(self.__files)):
for index, _ in enumerate(self.__files):
if self.__files[index].path == file_path:
rem_index = index
success = True
Expand Down Expand Up @@ -313,7 +313,7 @@ def add_files(self):
hash_file = os.path.join(os.path.dirname(fs_path),
self.__fs_hash) + '.txt'
pattern = re.compile(r'([\w\-|]+)\s+(.+)')
with open(hash_file) as f:
with open(hash_file, encoding='utf-8') as f:
content = f.readlines()
for line in content:
m = pattern.search(line)
Expand Down
4 changes: 2 additions & 2 deletions tern/extensions/scancode/executor.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019-2020 VMware, Inc. All Rights Reserved.
# Copyright (c) 2019-2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause

"""
Expand Down Expand Up @@ -119,7 +119,7 @@ def add_scancode_headers(layer_obj, headers):
'''Given a list of headers from scancode data, add unique headers to
the list of existing headers in the layer object'''
unique_notices = {header.get("notice") for header in headers}
layer_headers = layer_obj.extension_info.get("headers", list())
layer_headers = layer_obj.extension_info.get("headers", [])
for lh in layer_headers:
unique_notices.add(lh)
layer_obj.extension_info["headers"] = list(unique_notices)
Expand Down
5 changes: 2 additions & 3 deletions tern/formats/html/generator.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 VMware, Inc. All Rights Reserved.
# Copyright (c) 2020-2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause

"""
Expand Down Expand Up @@ -215,8 +215,7 @@ def list_handler(list_obj, indent):
return html_string


# pylint: disable=too-many-branches
def dict_handler(dict_obj, indent):
def dict_handler(dict_obj, indent): # pylint: disable=too-many-branches, useless-suppression
'''Writes html code for dictionary in report dictionary'''
html_string = ''
html_string = html_string + ' '*indent + '<ul class ="nested"> \n'
Expand Down
8 changes: 4 additions & 4 deletions tern/formats/json/consumer.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,14 @@ def create_image_layer(report):
# expect a json input, raise an error if it is not
content = {}
try:
f = open(os.path.abspath(report))
content = json.load(f)
with open(os.path.abspath(report), encoding='utf-8') as f:
content = json.load(f)
except OSError as err:
logger.critical("Cannot access file %s: %s", report, err)
raise ConsumerError(f"Error with given report file: {report}")
raise ConsumerError(f"Error with given report file: {report}") from err
except json.JSONDecodeError as err:
logger.critical("Cannot parse JSON in file %s: %s", report, err)
raise ConsumerError(f"Error with given report file: {report}")
raise ConsumerError(f"Error with given report file: {report}") from err
# we should have some content but it may be empty
if not content:
raise ConsumerError("No content consumed from given report file")
Expand Down
8 changes: 4 additions & 4 deletions tern/formats/spdx/spdxjson/consumer.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,14 +54,14 @@ def create_image_layer(report):
# expect a json input, raise an error if it is not
content = {}
try:
f = open(os.path.abspath(report))
content = json.load(f)
with open(os.path.abspath(report), encoding='utf-8') as f:
content = json.load(f)
except OSError as err:
logger.critical("Cannot access file %s: %s", report, err)
raise ConsumerError(f"Error with given report file: {report}")
raise ConsumerError(f"Error with given report file: {report}") from err
except json.JSONDecodeError as err:
logger.critical("Cannot parse JSON in file %s: %s", report, err)
raise ConsumerError(f"Error with given report file: {report}")
raise ConsumerError(f"Error with given report file: {report}") from err
# we should have some content but it may be empty
if not content:
raise ConsumerError("No content consumed from given report file")
Expand Down
3 changes: 2 additions & 1 deletion tern/formats/spdx/spdxjson/file_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ def get_file_dict(filedata, template, layer_id):
'SPDXID': spdx_common.get_file_spdxref(filedata, layer_id),
'checksums': [{
'algorithm':
spdx_common.get_file_checksum(filedata).split(': ')[0],
spdx_common.get_file_checksum(filedata).split(
': ', maxsplit=1)[0],
'checksumValue':
spdx_common.get_file_checksum(filedata).split(': ')[1]
}],
Expand Down
3 changes: 2 additions & 1 deletion tern/formats/spdx/spdxjson/layer_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,8 @@ def get_layer_dict(layer_obj):
'filesAnalyzed': 'true' if layer_obj.files_analyzed else 'false',
'checksums': [{
'algorithm':
spdx_common.get_layer_checksum(layer_obj).split(': ')[0],
spdx_common.get_layer_checksum(layer_obj).split(
': ', maxsplit=1)[0],
'checksumValue':
spdx_common.get_layer_checksum(layer_obj).split(': ')[1]
}],
Expand Down
2 changes: 1 addition & 1 deletion tern/load/docker_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def build_image(dfile, client):
dfcontents = ''
dfcontext = os.path.dirname(df_path)
try:
with open(df_path) as f:
with open(df_path, encoding='utf-8') as f:
dfcontents = f.read()
# terrible bypass of the API
docker.api.build.process_dockerfile = lambda dockerfile, path: (
Expand Down
6 changes: 3 additions & 3 deletions tern/report/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def write_report(report, args):
'''Write the report to a file'''
if args.output_file:
file_name = args.output_file
with open(file_name, 'w') as f:
with open(file_name, 'w', encoding='utf-8') as f:
f.write(report)


Expand Down Expand Up @@ -64,7 +64,7 @@ def generate_format(images, format_string, print_inclusive):
)
return mgr.driver.generate(images, print_inclusive)
except NoMatches:
pass
return None


def generate_format_layer(layer, format_string):
Expand All @@ -78,7 +78,7 @@ def generate_format_layer(layer, format_string):
)
return mgr.driver.generate_layer(layer)
except NoMatches:
pass
return None


def report_out(args, *images):
Expand Down
Loading

0 comments on commit 7c61a6d

Please sign in to comment.