From b983b61768f40dbf027302ca526844813281887d Mon Sep 17 00:00:00 2001 From: Ian Stapleton Cordasco Date: Fri, 12 Nov 2021 19:15:01 -0600 Subject: [PATCH] Start using auto-formatters Use black to auto-format the style so it's always consistent and pyupgrade will allow us to auto-upgrade to the newest language features. --- .github/workflows/pythonpackage.yml | 38 +- .pre-commit-config.yaml | 33 + CONTRIBUTING.md | 4 +- LICENSE | 1 - bandit/__init__.py | 4 +- bandit/__main__.py | 1 + bandit/blacklists/calls.py | 541 +++++++------ bandit/blacklists/imports.py | 231 ++++-- bandit/blacklists/utils.py | 12 +- bandit/cli/baseline.py | 111 +-- bandit/cli/config_generator.py | 93 ++- bandit/cli/main.py | 507 +++++++----- bandit/core/__init__.py | 2 - bandit/core/blacklisting.py | 23 +- bandit/core/config.py | 175 ++-- bandit/core/constants.py | 16 +- bandit/core/context.py | 153 ++-- bandit/core/docs_utils.py | 43 +- bandit/core/extension_loader.py | 61 +- bandit/core/issue.py | 93 ++- bandit/core/manager.py | 219 +++-- bandit/core/meta_ast.py | 19 +- bandit/core/metrics.py | 24 +- bandit/core/node_visitor.py | 143 ++-- bandit/core/test_properties.py | 26 +- bandit/core/test_set.py | 51 +- bandit/core/tester.py | 45 +- bandit/core/utils.py | 119 +-- bandit/formatters/csv.py | 44 +- bandit/formatters/custom.py | 67 +- bandit/formatters/html.py | 84 +- bandit/formatters/json.py | 45 +- bandit/formatters/screen.py | 156 ++-- bandit/formatters/text.py | 125 +-- bandit/formatters/utils.py | 1 - bandit/formatters/xml.py | 44 +- bandit/formatters/yaml.py | 42 +- bandit/plugins/app_debug.py | 19 +- bandit/plugins/asserts.py | 18 +- .../crypto_request_no_cert_validation.py | 21 +- bandit/plugins/django_sql_injection.py | 43 +- bandit/plugins/django_xss.py | 41 +- bandit/plugins/exec.py | 11 +- .../plugins/general_bad_file_permissions.py | 22 +- bandit/plugins/general_bind_all_interfaces.py | 11 +- bandit/plugins/general_hardcoded_password.py | 45 +- bandit/plugins/general_hardcoded_tmp.py | 19 +- .../plugins/hashlib_new_insecure_functions.py | 21 +- bandit/plugins/injection_paramiko.py | 25 +- bandit/plugins/injection_shell.py | 189 ++--- bandit/plugins/injection_sql.py | 32 +- bandit/plugins/injection_wildcard.py | 38 +- bandit/plugins/insecure_ssl_tls.py | 85 +- bandit/plugins/jinja2_templates.py | 55 +- bandit/plugins/mako_templates.py | 19 +- .../plugins/ssh_no_host_key_verification.py | 25 +- bandit/plugins/try_except_continue.py | 21 +- bandit/plugins/try_except_pass.py | 21 +- bandit/plugins/weak_cryptographic_key.py | 106 +-- bandit/plugins/yaml_load.py | 27 +- doc/source/conf.py | 50 +- scripts/main.py | 6 +- setup.py | 6 +- test-requirements.txt | 2 +- tests/functional/test_baseline.py | 238 +++--- tests/functional/test_functional.py | 750 +++++++++--------- tests/functional/test_runtime.py | 70 +- tests/unit/cli/test_baseline.py | 160 ++-- tests/unit/cli/test_config_generator.py | 36 +- tests/unit/cli/test_main.py | 265 ++++--- tests/unit/core/test_blacklisting.py | 28 +- tests/unit/core/test_config.py | 196 +++-- tests/unit/core/test_context.py | 135 ++-- tests/unit/core/test_docs_util.py | 21 +- tests/unit/core/test_issue.py | 47 +- tests/unit/core/test_manager.py | 311 ++++---- tests/unit/core/test_meta_ast.py | 18 +- tests/unit/core/test_test_set.py | 154 ++-- tests/unit/core/test_util.py | 305 ++++--- tests/unit/formatters/test_csv.py | 56 +- tests/unit/formatters/test_custom.py | 50 +- tests/unit/formatters/test_html.py | 144 ++-- tests/unit/formatters/test_json.py | 102 +-- tests/unit/formatters/test_screen.py | 226 +++--- tests/unit/formatters/test_text.py | 202 ++--- tests/unit/formatters/test_xml.py | 60 +- tests/unit/formatters/test_yaml.py | 102 +-- tools/openstack_coverage.py | 22 +- tox.ini | 10 +- 89 files changed, 4499 insertions(+), 3583 deletions(-) create mode 100644 .pre-commit-config.yaml diff --git a/.github/workflows/pythonpackage.yml b/.github/workflows/pythonpackage.yml index 6f806d514..fa0687bc4 100644 --- a/.github/workflows/pythonpackage.yml +++ b/.github/workflows/pythonpackage.yml @@ -3,11 +3,11 @@ name: Build and Test Bandit on: [push, pull_request] jobs: - pylint: + format: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.5] + python-version: [3.6] steps: - uses: actions/checkout@v1 - name: Set up Python ${{ matrix.python-version }} @@ -21,13 +21,13 @@ jobs: pip install -r test-requirements.txt pip install tox - name: Run tox - run: tox -e pylint + run: tox -e format pep8: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.5] + python-version: [3.6] steps: - uses: actions/checkout@v1 - name: Set up Python ${{ matrix.python-version }} @@ -43,11 +43,11 @@ jobs: - name: Run tox run: tox -e pep8 - py35: + py36: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.5] + python-version: [3.6] steps: - uses: actions/checkout@v1 - name: Set up Python ${{ matrix.python-version }} @@ -61,13 +61,13 @@ jobs: pip install -r test-requirements.txt pip install tox - name: Run tox - run: tox -e py35 + run: tox -e py36 - py36: + py37: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.6] + python-version: [3.7] steps: - uses: actions/checkout@v1 - name: Set up Python ${{ matrix.python-version }} @@ -81,13 +81,13 @@ jobs: pip install -r test-requirements.txt pip install tox - name: Run tox - run: tox -e py36 + run: tox -e py37 - py37: + py38: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.7] + python-version: [3.8] steps: - uses: actions/checkout@v1 - name: Set up Python ${{ matrix.python-version }} @@ -101,13 +101,13 @@ jobs: pip install -r test-requirements.txt pip install tox - name: Run tox - run: tox -e py37 + run: tox -e py38 - py38: + py39: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.8] + python-version: [3.9] steps: - uses: actions/checkout@v1 - name: Set up Python ${{ matrix.python-version }} @@ -121,13 +121,13 @@ jobs: pip install -r test-requirements.txt pip install tox - name: Run tox - run: tox -e py38 + run: tox -e py39 - py39: + py310: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.9] + python-version: ['3.10'] steps: - uses: actions/checkout@v1 - name: Set up Python ${{ matrix.python-version }} @@ -141,4 +141,4 @@ jobs: pip install -r test-requirements.txt pip install tox - name: Run tox - run: tox -e py39 + run: tox -e py310 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..f61261a9b --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,33 @@ +exclude: ^(examples|tools|doc|releasenotes) +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: check-yaml + - id: debug-statements + - id: end-of-file-fixer + - id: trailing-whitespace +- repo: https://github.com/asottile/reorder_python_imports + rev: v2.6.0 + hooks: + - id: reorder-python-imports + args: [--application-directories, '.:src', --py36-plus] +- repo: https://github.com/psf/black + rev: 21.10b0 + hooks: + - id: black + args: [--line-length=79, --target-version=py36] +- repo: https://github.com/asottile/pyupgrade + rev: v2.29.0 + hooks: + - id: pyupgrade + args: [--py36-plus] +- repo: https://github.com/jorisroovers/gitlint + rev: v0.16.0 + hooks: + - id: gitlint +#- repo: https://github.com/pre-commit/mirrors-mypy +# rev: v0.910-1 +# hooks: +# - id: mypy +# exclude: ^(docs/|example-plugin/) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 46abdf5e2..7f962a5be 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,5 +1,5 @@ # Contributing to Bandit -Thanks for considering to take part in the improvement of the Bandit project. Contributions are always welcome! +Thanks for considering to take part in the improvement of the Bandit project. Contributions are always welcome! Here are guidelines and rules that can be helpful if you plan to want to get involved in the project. #### Table Of Contents @@ -27,7 +27,7 @@ If you encounter a bug, please let us know about it. See the guide here [GitHub to add a comment to the existing issue instead of creating a new one. ### Submitting your first issue -We encourage using the issue template to improve quality of reported issues. +We encourage using the issue template to improve quality of reported issues. Navigate to the issues tab and select `New issue`, then select the **Bug report** template and fill out the form. To submit a good bug report keep in mind to: * Use a descriptive title so other people can understand what the issue is about. diff --git a/LICENSE b/LICENSE index 68c771a09..67db85882 100644 --- a/LICENSE +++ b/LICENSE @@ -173,4 +173,3 @@ defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - diff --git a/bandit/__init__.py b/bandit/__init__.py index ea247ecf5..accd94bcd 100644 --- a/bandit/__init__.py +++ b/bandit/__init__.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import pbr.version from bandit.core import config # noqa @@ -18,4 +16,4 @@ from bandit.core.issue import * # noqa from bandit.core.test_properties import * # noqa -__version__ = pbr.version.VersionInfo('bandit').version_string() +__version__ = pbr.version.VersionInfo("bandit").version_string() diff --git a/bandit/__main__.py b/bandit/__main__.py index da8a3ddef..f43c06a25 100644 --- a/bandit/__main__.py +++ b/bandit/__main__.py @@ -13,4 +13,5 @@ https://bandit.readthedocs.io/ """ from bandit.cli import main + main.main() diff --git a/bandit/blacklists/calls.py b/bandit/blacklists/calls.py index ee8df2078..0b82d1b99 100644 --- a/bandit/blacklists/calls.py +++ b/bandit/blacklists/calls.py @@ -1,4 +1,3 @@ -# -*- coding:utf-8 -*- # # Copyright 2016 Hewlett-Packard Development Company, L.P. # @@ -314,7 +313,6 @@ +------+---------------------+------------------------------------+-----------+ """ - from bandit.blacklists import utils @@ -330,238 +328,333 @@ def gen_blacklist(): :return: a dictionary mapping node types to a list of blacklist data """ sets = [] - sets.append(utils.build_conf_dict( - 'pickle', 'B301', - ['pickle.loads', - 'pickle.load', - 'pickle.Unpickler', - 'cPickle.loads', - 'cPickle.load', - 'cPickle.Unpickler', - 'dill.loads', - 'dill.load', - 'dill.Unpickler', - 'shelve.open', - 'shelve.DbfilenameShelf'], - 'Pickle and modules that wrap it can be unsafe when used to ' - 'deserialize untrusted data, possible security issue.' - )) - - sets.append(utils.build_conf_dict( - 'marshal', 'B302', ['marshal.load', 'marshal.loads'], - 'Deserialization with the marshal module is possibly dangerous.' - )) - - sets.append(utils.build_conf_dict( - 'md5', 'B303', - ['hashlib.md5', - 'hashlib.sha1', - 'Crypto.Hash.MD2.new', - 'Crypto.Hash.MD4.new', - 'Crypto.Hash.MD5.new', - 'Crypto.Hash.SHA.new', - 'Cryptodome.Hash.MD2.new', - 'Cryptodome.Hash.MD4.new', - 'Cryptodome.Hash.MD5.new', - 'Cryptodome.Hash.SHA.new', - 'cryptography.hazmat.primitives.hashes.MD5', - 'cryptography.hazmat.primitives.hashes.SHA1'], - 'Use of insecure MD2, MD4, MD5, or SHA1 hash function.' - )) - - sets.append(utils.build_conf_dict( - 'ciphers', 'B304', - ['Crypto.Cipher.ARC2.new', - 'Crypto.Cipher.ARC4.new', - 'Crypto.Cipher.Blowfish.new', - 'Crypto.Cipher.DES.new', - 'Crypto.Cipher.XOR.new', - 'Cryptodome.Cipher.ARC2.new', - 'Cryptodome.Cipher.ARC4.new', - 'Cryptodome.Cipher.Blowfish.new', - 'Cryptodome.Cipher.DES.new', - 'Cryptodome.Cipher.XOR.new', - 'cryptography.hazmat.primitives.ciphers.algorithms.ARC4', - 'cryptography.hazmat.primitives.ciphers.algorithms.Blowfish', - 'cryptography.hazmat.primitives.ciphers.algorithms.IDEA'], - 'Use of insecure cipher {name}. Replace with a known secure' - ' cipher such as AES.', - 'HIGH' - )) - - sets.append(utils.build_conf_dict( - 'cipher_modes', 'B305', - ['cryptography.hazmat.primitives.ciphers.modes.ECB'], - 'Use of insecure cipher mode {name}.' - )) - - sets.append(utils.build_conf_dict( - 'mktemp_q', 'B306', ['tempfile.mktemp'], - 'Use of insecure and deprecated function (mktemp).' - )) - - sets.append(utils.build_conf_dict( - 'eval', 'B307', ['eval'], - 'Use of possibly insecure function - consider using safer ' - 'ast.literal_eval.' - )) - - sets.append(utils.build_conf_dict( - 'mark_safe', 'B308', ['django.utils.safestring.mark_safe'], - 'Use of mark_safe() may expose cross-site scripting ' - 'vulnerabilities and should be reviewed.' - )) - - sets.append(utils.build_conf_dict( - 'httpsconnection', 'B309', - ['httplib.HTTPSConnection', - 'http.client.HTTPSConnection', - 'six.moves.http_client.HTTPSConnection'], - 'Use of HTTPSConnection on older versions of Python prior to 2.7.9 ' - 'and 3.4.3 do not provide security, see ' - 'https://wiki.openstack.org/wiki/OSSN/OSSN-0033' - )) - - sets.append(utils.build_conf_dict( - 'urllib_urlopen', 'B310', - ['urllib.urlopen', - 'urllib.request.urlopen', - 'urllib.urlretrieve', - 'urllib.request.urlretrieve', - 'urllib.URLopener', - 'urllib.request.URLopener', - 'urllib.FancyURLopener', - 'urllib.request.FancyURLopener', - 'urllib2.urlopen', - 'urllib2.Request', - 'six.moves.urllib.request.urlopen', - 'six.moves.urllib.request.urlretrieve', - 'six.moves.urllib.request.URLopener', - 'six.moves.urllib.request.FancyURLopener'], - 'Audit url open for permitted schemes. Allowing use of file:/ or ' - 'custom schemes is often unexpected.' - )) - - sets.append(utils.build_conf_dict( - 'random', 'B311', - ['random.random', - 'random.randrange', - 'random.randint', - 'random.choice', - 'random.choices', - 'random.uniform', - 'random.triangular'], - 'Standard pseudo-random generators are not suitable for ' - 'security/cryptographic purposes.', - 'LOW' - )) - - sets.append(utils.build_conf_dict( - 'telnetlib', 'B312', ['telnetlib.*'], - 'Telnet-related functions are being called. Telnet is considered ' - 'insecure. Use SSH or some other encrypted protocol.', - 'HIGH' - )) + sets.append( + utils.build_conf_dict( + "pickle", + "B301", + [ + "pickle.loads", + "pickle.load", + "pickle.Unpickler", + "cPickle.loads", + "cPickle.load", + "cPickle.Unpickler", + "dill.loads", + "dill.load", + "dill.Unpickler", + "shelve.open", + "shelve.DbfilenameShelf", + ], + "Pickle and modules that wrap it can be unsafe when used to " + "deserialize untrusted data, possible security issue.", + ) + ) + + sets.append( + utils.build_conf_dict( + "marshal", + "B302", + ["marshal.load", "marshal.loads"], + "Deserialization with the marshal module is possibly dangerous.", + ) + ) + + sets.append( + utils.build_conf_dict( + "md5", + "B303", + [ + "hashlib.md5", + "hashlib.sha1", + "Crypto.Hash.MD2.new", + "Crypto.Hash.MD4.new", + "Crypto.Hash.MD5.new", + "Crypto.Hash.SHA.new", + "Cryptodome.Hash.MD2.new", + "Cryptodome.Hash.MD4.new", + "Cryptodome.Hash.MD5.new", + "Cryptodome.Hash.SHA.new", + "cryptography.hazmat.primitives.hashes.MD5", + "cryptography.hazmat.primitives.hashes.SHA1", + ], + "Use of insecure MD2, MD4, MD5, or SHA1 hash function.", + ) + ) + + sets.append( + utils.build_conf_dict( + "ciphers", + "B304", + [ + "Crypto.Cipher.ARC2.new", + "Crypto.Cipher.ARC4.new", + "Crypto.Cipher.Blowfish.new", + "Crypto.Cipher.DES.new", + "Crypto.Cipher.XOR.new", + "Cryptodome.Cipher.ARC2.new", + "Cryptodome.Cipher.ARC4.new", + "Cryptodome.Cipher.Blowfish.new", + "Cryptodome.Cipher.DES.new", + "Cryptodome.Cipher.XOR.new", + "cryptography.hazmat.primitives.ciphers.algorithms.ARC4", + "cryptography.hazmat.primitives.ciphers.algorithms.Blowfish", + "cryptography.hazmat.primitives.ciphers.algorithms.IDEA", + ], + "Use of insecure cipher {name}. Replace with a known secure" + " cipher such as AES.", + "HIGH", + ) + ) + + sets.append( + utils.build_conf_dict( + "cipher_modes", + "B305", + ["cryptography.hazmat.primitives.ciphers.modes.ECB"], + "Use of insecure cipher mode {name}.", + ) + ) + + sets.append( + utils.build_conf_dict( + "mktemp_q", + "B306", + ["tempfile.mktemp"], + "Use of insecure and deprecated function (mktemp).", + ) + ) + + sets.append( + utils.build_conf_dict( + "eval", + "B307", + ["eval"], + "Use of possibly insecure function - consider using safer " + "ast.literal_eval.", + ) + ) + + sets.append( + utils.build_conf_dict( + "mark_safe", + "B308", + ["django.utils.safestring.mark_safe"], + "Use of mark_safe() may expose cross-site scripting " + "vulnerabilities and should be reviewed.", + ) + ) + + sets.append( + utils.build_conf_dict( + "httpsconnection", + "B309", + [ + "httplib.HTTPSConnection", + "http.client.HTTPSConnection", + "six.moves.http_client.HTTPSConnection", + ], + "Use of HTTPSConnection on older versions of Python prior to 2.7.9" + " and 3.4.3 do not provide security, see " + "https://wiki.openstack.org/wiki/OSSN/OSSN-0033", + ) + ) + + sets.append( + utils.build_conf_dict( + "urllib_urlopen", + "B310", + [ + "urllib.urlopen", + "urllib.request.urlopen", + "urllib.urlretrieve", + "urllib.request.urlretrieve", + "urllib.URLopener", + "urllib.request.URLopener", + "urllib.FancyURLopener", + "urllib.request.FancyURLopener", + "urllib2.urlopen", + "urllib2.Request", + "six.moves.urllib.request.urlopen", + "six.moves.urllib.request.urlretrieve", + "six.moves.urllib.request.URLopener", + "six.moves.urllib.request.FancyURLopener", + ], + "Audit url open for permitted schemes. Allowing use of file:/ or " + "custom schemes is often unexpected.", + ) + ) + + sets.append( + utils.build_conf_dict( + "random", + "B311", + [ + "random.random", + "random.randrange", + "random.randint", + "random.choice", + "random.choices", + "random.uniform", + "random.triangular", + ], + "Standard pseudo-random generators are not suitable for " + "security/cryptographic purposes.", + "LOW", + ) + ) + + sets.append( + utils.build_conf_dict( + "telnetlib", + "B312", + ["telnetlib.*"], + "Telnet-related functions are being called. Telnet is considered " + "insecure. Use SSH or some other encrypted protocol.", + "HIGH", + ) + ) # Most of this is based off of Christian Heimes' work on defusedxml: # https://pypi.org/project/defusedxml/#defusedxml-sax - xml_msg = ('Using {name} to parse untrusted XML data is known to be ' - 'vulnerable to XML attacks. Replace {name} with its ' - 'defusedxml equivalent function or make sure ' - 'defusedxml.defuse_stdlib() is called') - - sets.append(utils.build_conf_dict( - 'xml_bad_cElementTree', 'B313', - ['xml.etree.cElementTree.parse', - 'xml.etree.cElementTree.iterparse', - 'xml.etree.cElementTree.fromstring', - 'xml.etree.cElementTree.XMLParser'], - xml_msg - )) - - sets.append(utils.build_conf_dict( - 'xml_bad_ElementTree', 'B314', - ['xml.etree.ElementTree.parse', - 'xml.etree.ElementTree.iterparse', - 'xml.etree.ElementTree.fromstring', - 'xml.etree.ElementTree.XMLParser'], - xml_msg - )) - - sets.append(utils.build_conf_dict( - 'xml_bad_expatreader', 'B315', ['xml.sax.expatreader.create_parser'], - xml_msg - )) - - sets.append(utils.build_conf_dict( - 'xml_bad_expatbuilder', 'B316', - ['xml.dom.expatbuilder.parse', - 'xml.dom.expatbuilder.parseString'], - xml_msg - )) - - sets.append(utils.build_conf_dict( - 'xml_bad_sax', 'B317', - ['xml.sax.parse', - 'xml.sax.parseString', - 'xml.sax.make_parser'], - xml_msg - )) - - sets.append(utils.build_conf_dict( - 'xml_bad_minidom', 'B318', - ['xml.dom.minidom.parse', - 'xml.dom.minidom.parseString'], - xml_msg - )) - - sets.append(utils.build_conf_dict( - 'xml_bad_pulldom', 'B319', - ['xml.dom.pulldom.parse', - 'xml.dom.pulldom.parseString'], - xml_msg - )) - - sets.append(utils.build_conf_dict( - 'xml_bad_etree', 'B320', - ['lxml.etree.parse', - 'lxml.etree.fromstring', - 'lxml.etree.RestrictedElement', - 'lxml.etree.GlobalParserTLS', - 'lxml.etree.getDefaultParser', - 'lxml.etree.check_docinfo'], - ('Using {name} to parse untrusted XML data is known to be ' - 'vulnerable to XML attacks. Replace {name} with its ' - 'defusedxml equivalent function.') - )) + xml_msg = ( + "Using {name} to parse untrusted XML data is known to be " + "vulnerable to XML attacks. Replace {name} with its " + "defusedxml equivalent function or make sure " + "defusedxml.defuse_stdlib() is called" + ) + + sets.append( + utils.build_conf_dict( + "xml_bad_cElementTree", + "B313", + [ + "xml.etree.cElementTree.parse", + "xml.etree.cElementTree.iterparse", + "xml.etree.cElementTree.fromstring", + "xml.etree.cElementTree.XMLParser", + ], + xml_msg, + ) + ) + + sets.append( + utils.build_conf_dict( + "xml_bad_ElementTree", + "B314", + [ + "xml.etree.ElementTree.parse", + "xml.etree.ElementTree.iterparse", + "xml.etree.ElementTree.fromstring", + "xml.etree.ElementTree.XMLParser", + ], + xml_msg, + ) + ) + + sets.append( + utils.build_conf_dict( + "xml_bad_expatreader", + "B315", + ["xml.sax.expatreader.create_parser"], + xml_msg, + ) + ) + + sets.append( + utils.build_conf_dict( + "xml_bad_expatbuilder", + "B316", + ["xml.dom.expatbuilder.parse", "xml.dom.expatbuilder.parseString"], + xml_msg, + ) + ) + + sets.append( + utils.build_conf_dict( + "xml_bad_sax", + "B317", + ["xml.sax.parse", "xml.sax.parseString", "xml.sax.make_parser"], + xml_msg, + ) + ) + + sets.append( + utils.build_conf_dict( + "xml_bad_minidom", + "B318", + ["xml.dom.minidom.parse", "xml.dom.minidom.parseString"], + xml_msg, + ) + ) + + sets.append( + utils.build_conf_dict( + "xml_bad_pulldom", + "B319", + ["xml.dom.pulldom.parse", "xml.dom.pulldom.parseString"], + xml_msg, + ) + ) + + sets.append( + utils.build_conf_dict( + "xml_bad_etree", + "B320", + [ + "lxml.etree.parse", + "lxml.etree.fromstring", + "lxml.etree.RestrictedElement", + "lxml.etree.GlobalParserTLS", + "lxml.etree.getDefaultParser", + "lxml.etree.check_docinfo", + ], + ( + "Using {name} to parse untrusted XML data is known to be " + "vulnerable to XML attacks. Replace {name} with its " + "defusedxml equivalent function." + ), + ) + ) # end of XML tests - sets.append(utils.build_conf_dict( - 'ftplib', 'B321', ['ftplib.*'], - 'FTP-related functions are being called. FTP is considered ' - 'insecure. Use SSH/SFTP/SCP or some other encrypted protocol.', - 'HIGH' - )) + sets.append( + utils.build_conf_dict( + "ftplib", + "B321", + ["ftplib.*"], + "FTP-related functions are being called. FTP is considered " + "insecure. Use SSH/SFTP/SCP or some other encrypted protocol.", + "HIGH", + ) + ) # skipped B322 as the check for a call to input() has been removed - sets.append(utils.build_conf_dict( - 'unverified_context', 'B323', ['ssl._create_unverified_context'], - 'By default, Python will create a secure, verified ssl context for ' - 'use in such classes as HTTPSConnection. However, it still allows ' - 'using an insecure context via the _create_unverified_context that ' - 'reverts to the previous behavior that does not validate certificates ' - 'or perform hostname checks.' - )) + sets.append( + utils.build_conf_dict( + "unverified_context", + "B323", + ["ssl._create_unverified_context"], + "By default, Python will create a secure, verified ssl context for" + " use in such classes as HTTPSConnection. However, it still allows" + " using an insecure context via the _create_unverified_context " + "that reverts to the previous behavior that does not validate " + "certificates or perform hostname checks.", + ) + ) # skipped B324 (used in bandit/plugins/hashlib_new_insecure_functions.py) - sets.append(utils.build_conf_dict( - 'tempnam', 'B325', ['os.tempnam', 'os.tmpnam'], - 'Use of os.tempnam() and os.tmpnam() is vulnerable to symlink ' - 'attacks. Consider using tmpfile() instead.' - )) - - return {'Call': sets} + sets.append( + utils.build_conf_dict( + "tempnam", + "B325", + ["os.tempnam", "os.tmpnam"], + "Use of os.tempnam() and os.tmpnam() is vulnerable to symlink " + "attacks. Consider using tmpfile() instead.", + ) + ) + + return {"Call": sets} diff --git a/bandit/blacklists/imports.py b/bandit/blacklists/imports.py index 7181ec496..c8bac64a0 100644 --- a/bandit/blacklists/imports.py +++ b/bandit/blacklists/imports.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2016 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" ====================================================== Blacklist various Python imports known to be dangerous @@ -230,89 +228,154 @@ def gen_blacklist(): :return: a dictionary mapping node types to a list of blacklist data """ sets = [] - sets.append(utils.build_conf_dict( - 'import_telnetlib', 'B401', ['telnetlib'], - 'A telnet-related module is being imported. Telnet is ' - 'considered insecure. Use SSH or some other encrypted protocol.', - 'HIGH' - )) - - sets.append(utils.build_conf_dict( - 'import_ftplib', 'B402', ['ftplib'], - 'A FTP-related module is being imported. FTP is considered ' - 'insecure. Use SSH/SFTP/SCP or some other encrypted protocol.', - 'HIGH' - )) - - sets.append(utils.build_conf_dict( - 'import_pickle', 'B403', ['pickle', 'cPickle', 'dill', 'shelve'], - 'Consider possible security implications associated with ' - '{name} module.', 'LOW' - )) - - sets.append(utils.build_conf_dict( - 'import_subprocess', 'B404', ['subprocess'], - 'Consider possible security implications associated with the ' - 'subprocess module.', 'LOW' - )) + sets.append( + utils.build_conf_dict( + "import_telnetlib", + "B401", + ["telnetlib"], + "A telnet-related module is being imported. Telnet is " + "considered insecure. Use SSH or some other encrypted protocol.", + "HIGH", + ) + ) + + sets.append( + utils.build_conf_dict( + "import_ftplib", + "B402", + ["ftplib"], + "A FTP-related module is being imported. FTP is considered " + "insecure. Use SSH/SFTP/SCP or some other encrypted protocol.", + "HIGH", + ) + ) + + sets.append( + utils.build_conf_dict( + "import_pickle", + "B403", + ["pickle", "cPickle", "dill", "shelve"], + "Consider possible security implications associated with " + "{name} module.", + "LOW", + ) + ) + + sets.append( + utils.build_conf_dict( + "import_subprocess", + "B404", + ["subprocess"], + "Consider possible security implications associated with the " + "subprocess module.", + "LOW", + ) + ) # Most of this is based off of Christian Heimes' work on defusedxml: # https://pypi.org/project/defusedxml/#defusedxml-sax - xml_msg = ('Using {name} to parse untrusted XML data is known to be ' - 'vulnerable to XML attacks. Replace {name} with the equivalent ' - 'defusedxml package, or make sure defusedxml.defuse_stdlib() ' - 'is called.') - lxml_msg = ('Using {name} to parse untrusted XML data is known to be ' - 'vulnerable to XML attacks. Replace {name} with the ' - 'equivalent defusedxml package.') - - sets.append(utils.build_conf_dict( - 'import_xml_etree', 'B405', - ['xml.etree.cElementTree', 'xml.etree.ElementTree'], xml_msg, 'LOW')) - - sets.append(utils.build_conf_dict( - 'import_xml_sax', 'B406', ['xml.sax'], xml_msg, 'LOW')) - - sets.append(utils.build_conf_dict( - 'import_xml_expat', 'B407', ['xml.dom.expatbuilder'], xml_msg, 'LOW')) - - sets.append(utils.build_conf_dict( - 'import_xml_minidom', 'B408', ['xml.dom.minidom'], xml_msg, 'LOW')) - - sets.append(utils.build_conf_dict( - 'import_xml_pulldom', 'B409', ['xml.dom.pulldom'], xml_msg, 'LOW')) - - sets.append(utils.build_conf_dict( - 'import_lxml', 'B410', ['lxml'], lxml_msg, 'LOW')) - - sets.append(utils.build_conf_dict( - 'import_xmlrpclib', 'B411', ['xmlrpclib'], - 'Using {name} to parse untrusted XML data is known to be ' - 'vulnerable to XML attacks. Use defused.xmlrpc.monkey_patch() ' - 'function to monkey-patch xmlrpclib and mitigate XML ' - 'vulnerabilities.', 'HIGH')) - - sets.append(utils.build_conf_dict( - 'import_httpoxy', 'B412', - ['wsgiref.handlers.CGIHandler', 'twisted.web.twcgi.CGIScript', - 'twisted.web.twcgi.CGIDirectory'], - 'Consider possible security implications associated with ' - '{name} module.', 'HIGH' - )) - - sets.append(utils.build_conf_dict( - 'import_pycrypto', 'B413', - ['Crypto.Cipher', - 'Crypto.Hash', - 'Crypto.IO', - 'Crypto.Protocol', - 'Crypto.PublicKey', - 'Crypto.Random', - 'Crypto.Signature', - 'Crypto.Util'], - 'The pyCrypto library and its module {name} are no longer actively ' - 'maintained and have been deprecated. ' - 'Consider using pyca/cryptography library.', 'HIGH')) - - return {'Import': sets, 'ImportFrom': sets, 'Call': sets} + xml_msg = ( + "Using {name} to parse untrusted XML data is known to be " + "vulnerable to XML attacks. Replace {name} with the equivalent " + "defusedxml package, or make sure defusedxml.defuse_stdlib() " + "is called." + ) + lxml_msg = ( + "Using {name} to parse untrusted XML data is known to be " + "vulnerable to XML attacks. Replace {name} with the " + "equivalent defusedxml package." + ) + + sets.append( + utils.build_conf_dict( + "import_xml_etree", + "B405", + ["xml.etree.cElementTree", "xml.etree.ElementTree"], + xml_msg, + "LOW", + ) + ) + + sets.append( + utils.build_conf_dict( + "import_xml_sax", "B406", ["xml.sax"], xml_msg, "LOW" + ) + ) + + sets.append( + utils.build_conf_dict( + "import_xml_expat", + "B407", + ["xml.dom.expatbuilder"], + xml_msg, + "LOW", + ) + ) + + sets.append( + utils.build_conf_dict( + "import_xml_minidom", "B408", ["xml.dom.minidom"], xml_msg, "LOW" + ) + ) + + sets.append( + utils.build_conf_dict( + "import_xml_pulldom", "B409", ["xml.dom.pulldom"], xml_msg, "LOW" + ) + ) + + sets.append( + utils.build_conf_dict("import_lxml", "B410", ["lxml"], lxml_msg, "LOW") + ) + + sets.append( + utils.build_conf_dict( + "import_xmlrpclib", + "B411", + ["xmlrpclib"], + "Using {name} to parse untrusted XML data is known to be " + "vulnerable to XML attacks. Use defused.xmlrpc.monkey_patch() " + "function to monkey-patch xmlrpclib and mitigate XML " + "vulnerabilities.", + "HIGH", + ) + ) + + sets.append( + utils.build_conf_dict( + "import_httpoxy", + "B412", + [ + "wsgiref.handlers.CGIHandler", + "twisted.web.twcgi.CGIScript", + "twisted.web.twcgi.CGIDirectory", + ], + "Consider possible security implications associated with " + "{name} module.", + "HIGH", + ) + ) + + sets.append( + utils.build_conf_dict( + "import_pycrypto", + "B413", + [ + "Crypto.Cipher", + "Crypto.Hash", + "Crypto.IO", + "Crypto.Protocol", + "Crypto.PublicKey", + "Crypto.Random", + "Crypto.Signature", + "Crypto.Util", + ], + "The pyCrypto library and its module {name} are no longer actively" + " maintained and have been deprecated. " + "Consider using pyca/cryptography library.", + "HIGH", + ) + ) + + return {"Import": sets, "ImportFrom": sets, "Call": sets} diff --git a/bandit/blacklists/utils.py b/bandit/blacklists/utils.py index 0ebc6021d..8832b49c3 100644 --- a/bandit/blacklists/utils.py +++ b/bandit/blacklists/utils.py @@ -1,4 +1,3 @@ -# -*- coding:utf-8 -*- # # Copyright 2016 Hewlett-Packard Development Company, L.P. # @@ -6,7 +5,12 @@ r"""Utils module.""" -def build_conf_dict(name, bid, qualnames, message, level='MEDIUM'): +def build_conf_dict(name, bid, qualnames, message, level="MEDIUM"): """Build and return a blacklist configuration dict.""" - return {'name': name, 'id': bid, 'message': message, - 'qualnames': qualnames, 'level': level} + return { + "name": name, + "id": bid, + "message": message, + "qualnames": qualnames, + "level": level, + } diff --git a/bandit/cli/baseline.py b/bandit/cli/baseline.py index b33c1aafa..ac92d3680 100644 --- a/bandit/cli/baseline.py +++ b/bandit/cli/baseline.py @@ -1,21 +1,15 @@ -# -*- coding:utf-8 -*- # # Copyright 2015 Hewlett-Packard Enterprise # # SPDX-License-Identifier: Apache-2.0 - # ############################################################################# # Bandit Baseline is a tool that runs Bandit against a Git commit, and compares # the current commit findings to the parent commit findings. - # To do this it checks out the parent commit, runs Bandit (with any provided # filters or profiles), checks out the current commit, runs Bandit, and then # reports on any new findings. # ############################################################################# - """Bandit is a tool designed to find common security issues in Python code.""" - - import argparse import contextlib import logging @@ -28,13 +22,13 @@ import git bandit_args = sys.argv[1:] -baseline_tmp_file = '_bandit_baseline_run.json_' +baseline_tmp_file = "_bandit_baseline_run.json_" current_commit = None -default_output_format = 'terminal' +default_output_format = "terminal" LOG = logging.getLogger(__name__) repo = None -report_basename = 'bandit_baseline_result' -valid_baseline_formats = ['txt', 'html', 'json'] +report_basename = "bandit_baseline_result" +valid_baseline_formats = ["txt", "html", "json"] """baseline.py""" @@ -61,11 +55,11 @@ def main(): try: commit = repo.commit() current_commit = commit.hexsha - LOG.info('Got current commit: [%s]', commit.name_rev) + LOG.info("Got current commit: [%s]", commit.name_rev) commit = commit.parents[0] parent_commit = commit.hexsha - LOG.info('Got parent commit: [%s]', commit.name_rev) + LOG.info("Got parent commit: [%s]", commit.name_rev) except git.GitCommandError: LOG.error("Unable to get current or parent commit") @@ -75,29 +69,37 @@ def main(): sys.exit(2) # #################### Run Bandit against both commits #################### - output_type = (['-f', 'txt'] if output_format == default_output_format - else ['-o', report_fname]) + output_type = ( + ["-f", "txt"] + if output_format == default_output_format + else ["-o", report_fname] + ) with baseline_setup() as t: - bandit_tmpfile = "{}/{}".format(t, baseline_tmp_file) - - steps = [{'message': 'Getting Bandit baseline results', - 'commit': parent_commit, - 'args': bandit_args + ['-f', 'json', '-o', bandit_tmpfile]}, - - {'message': 'Comparing Bandit results to baseline', - 'commit': current_commit, - 'args': bandit_args + ['-b', bandit_tmpfile] + output_type}] + bandit_tmpfile = f"{t}/{baseline_tmp_file}" + + steps = [ + { + "message": "Getting Bandit baseline results", + "commit": parent_commit, + "args": bandit_args + ["-f", "json", "-o", bandit_tmpfile], + }, + { + "message": "Comparing Bandit results to baseline", + "commit": current_commit, + "args": bandit_args + ["-b", bandit_tmpfile] + output_type, + }, + ] return_code = None for step in steps: - repo.head.reset(commit=step['commit'], working_tree=True) + repo.head.reset(commit=step["commit"], working_tree=True) - LOG.info(step['message']) + LOG.info(step["message"]) - bandit_command = ['bandit'] + step['args'] + bandit_command = ["bandit"] + step["args"] try: output = subprocess.check_output(bandit_command) @@ -106,11 +108,14 @@ def main(): return_code = e.returncode else: return_code = 0 - output = output.decode('utf-8') # subprocess returns bytes + output = output.decode("utf-8") # subprocess returns bytes if return_code not in [0, 1]: - LOG.error("Error running command: %s\nOutput: %s\n", - bandit_args, output) + LOG.error( + "Error running command: %s\nOutput: %s\n", + bandit_args, + output, + ) # #################### Output and exit #################################### # print output or display message about written report @@ -156,31 +161,42 @@ def initialize(): # #################### Parse Args ######################################### parser = argparse.ArgumentParser( description='Bandit Baseline - Generates Bandit results compared to "' - 'a baseline', + "a baseline", formatter_class=argparse.RawDescriptionHelpFormatter, - epilog='Additional Bandit arguments such as severity filtering (-ll) ' - 'can be added and will be passed to Bandit.' + epilog="Additional Bandit arguments such as severity filtering (-ll) " + "can be added and will be passed to Bandit.", ) - parser.add_argument('targets', metavar='targets', type=str, nargs='+', - help='source file(s) or directory(s) to be tested') + parser.add_argument( + "targets", + metavar="targets", + type=str, + nargs="+", + help="source file(s) or directory(s) to be tested", + ) - parser.add_argument('-f', dest='output_format', action='store', - default='terminal', help='specify output format', - choices=valid_baseline_formats) + parser.add_argument( + "-f", + dest="output_format", + action="store", + default="terminal", + help="specify output format", + choices=valid_baseline_formats, + ) args, _ = parser.parse_known_args() # #################### Setup Output ####################################### # set the output format, or use a default if not provided - output_format = (args.output_format if args.output_format - else default_output_format) + output_format = ( + args.output_format if args.output_format else default_output_format + ) if output_format == default_output_format: LOG.info("No output format specified, using %s", default_output_format) # set the report name based on the output format - report_fname = "{}.{}".format(report_basename, output_format) + report_fname = f"{report_basename}.{output_format}" # #################### Check Requirements ################################# try: @@ -196,8 +212,9 @@ def initialize(): else: if repo.is_dirty(): - LOG.error("Current working directory is dirty and must be " - "resolved") + LOG.error( + "Current working directory is dirty and must be " "resolved" + ) valid = False # if output format is specified, we need to be able to write the report @@ -207,17 +224,19 @@ def initialize(): # Bandit needs to be able to create this temp file if os.path.exists(baseline_tmp_file): - LOG.error("Temporary file %s needs to be removed prior to running", - baseline_tmp_file) + LOG.error( + "Temporary file %s needs to be removed prior to running", + baseline_tmp_file, + ) valid = False # we must validate -o is not provided, as it will mess up Bandit baseline - if '-o' in bandit_args: + if "-o" in bandit_args: LOG.error("Bandit baseline must not be called with the -o option") valid = False return (output_format, repo, report_fname) if valid else (None, None, None) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/bandit/cli/config_generator.py b/bandit/cli/config_generator.py index 1c6dd089d..e46867f5e 100644 --- a/bandit/cli/config_generator.py +++ b/bandit/cli/config_generator.py @@ -2,10 +2,6 @@ # # SPDX-License-Identifier: Apache-2.0 """Bandit is a tool designed to find common security issues in Python code.""" - - -from __future__ import print_function - import argparse import importlib import logging @@ -16,7 +12,7 @@ from bandit.core import extension_loader -PROG_NAME = 'bandit_conf_generator' +PROG_NAME = "bandit_conf_generator" LOG = logging.getLogger(__name__) @@ -78,23 +74,41 @@ def parse_args(): parser = argparse.ArgumentParser( description=help_description, - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('--show-defaults', dest='show_defaults', - action='store_true', - help='show the default settings values for each ' - 'plugin but do not output a profile') - parser.add_argument('-o', '--out', dest='output_file', - action='store', - help='output file to save profile') + formatter_class=argparse.RawTextHelpFormatter, + ) + parser.add_argument( - '-t', '--tests', dest='tests', - action='store', default=None, type=str, - help='list of test names to run') + "--show-defaults", + dest="show_defaults", + action="store_true", + help="show the default settings values for each " + "plugin but do not output a profile", + ) parser.add_argument( - '-s', '--skip', dest='skips', - action='store', default=None, type=str, - help='list of test names to skip') + "-o", + "--out", + dest="output_file", + action="store", + help="output file to save profile", + ) + parser.add_argument( + "-t", + "--tests", + dest="tests", + action="store", + default=None, + type=str, + help="list of test names to run", + ) + parser.add_argument( + "-s", + "--skip", + dest="skips", + action="store", + default=None, + type=str, + help="list of test names to skip", + ) args = parser.parse_args() if not args.output_file and not args.show_defaults: @@ -112,11 +126,11 @@ def get_config_settings(): function = plugin.plugin # if a function takes config... - if hasattr(function, '_takes_config'): + if hasattr(function, "_takes_config"): fn_module = importlib.import_module(function.__module__) # call the config generator if it exists - if hasattr(fn_module, 'gen_config'): + if hasattr(fn_module, "gen_config"): config[fn_name] = fn_module.gen_config(function._takes_config) return yaml.safe_dump(config, default_flow_style=False) @@ -138,24 +152,30 @@ def main(): sys.exit(2) try: - with open(args.output_file, 'w') as f: - skips = args.skips.split(',') if args.skips else [] - tests = args.tests.split(',') if args.tests else [] + with open(args.output_file, "w") as f: + skips = args.skips.split(",") if args.skips else [] + tests = args.tests.split(",") if args.tests else [] for skip in skips: if not extension_loader.MANAGER.check_id(skip): - raise RuntimeError('unknown ID in skips: %s' % skip) + raise RuntimeError("unknown ID in skips: %s" % skip) for test in tests: if not extension_loader.MANAGER.check_id(test): - raise RuntimeError('unknown ID in tests: %s' % test) + raise RuntimeError("unknown ID in tests: %s" % test) tpl = "# {0} : {1}" - test_list = [tpl.format(t.plugin._test_id, t.name) - for t in extension_loader.MANAGER.plugins] - - others = [tpl.format(k, v['name']) for k, v in ( - extension_loader.MANAGER.blacklist_by_id.items())] + test_list = [ + tpl.format(t.plugin._test_id, t.name) + for t in extension_loader.MANAGER.plugins + ] + + others = [ + tpl.format(k, v["name"]) + for k, v in ( + extension_loader.MANAGER.blacklist_by_id.items() + ) + ] test_list.extend(others) test_list.sort() @@ -163,11 +183,12 @@ def main(): cli=" ".join(sys.argv), settings=yaml_settings, test_list="\n".join(test_list), - skip='skips: ' + str(skips) if skips else 'skips:', - test='tests: ' + str(tests) if tests else 'tests:') + skip="skips: " + str(skips) if skips else "skips:", + test="tests: " + str(tests) if tests else "tests:", + ) f.write(contents) - except IOError: + except OSError: LOG.error("Unable to open %s for writing", args.output_file) except Exception as e: @@ -179,5 +200,5 @@ def main(): return 0 -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/bandit/cli/main.py b/bandit/cli/main.py index bbefc931d..fd436d1f5 100644 --- a/bandit/cli/main.py +++ b/bandit/cli/main.py @@ -1,11 +1,8 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 """Bandit is a tool designed to find common security issues in Python code.""" - - import argparse import fnmatch import logging @@ -19,7 +16,7 @@ from bandit.core import manager as b_manager from bandit.core import utils -BASE_CONFIG = 'bandit.yaml' +BASE_CONFIG = "bandit.yaml" LOG = logging.getLogger() @@ -57,17 +54,20 @@ def _get_options_from_ini(ini_path, target): for t in target: for root, _, filenames in os.walk(t): - for filename in fnmatch.filter(filenames, '.bandit'): + for filename in fnmatch.filter(filenames, ".bandit"): bandit_files.append(os.path.join(root, filename)) if len(bandit_files) > 1: - LOG.error('Multiple .bandit files found - scan separately or ' - 'choose one with --ini\n\t%s', ', '.join(bandit_files)) + LOG.error( + "Multiple .bandit files found - scan separately or " + "choose one with --ini\n\t%s", + ", ".join(bandit_files), + ) sys.exit(2) elif len(bandit_files) == 1: ini_file = bandit_files[0] - LOG.info('Found project level .bandit file: %s', bandit_files[0]) + LOG.info("Found project level .bandit file: %s", bandit_files[0]) if ini_file: return utils.parse_ini_file(ini_file) @@ -77,6 +77,7 @@ def _get_options_from_ini(ini_path, target): def _init_extensions(): from bandit.core import extension_loader as ext_loader + return ext_loader.MANAGER @@ -101,29 +102,29 @@ def _log_option_source(default_val, arg_val, ini_val, option_name): def _running_under_virtualenv(): - if hasattr(sys, 'real_prefix'): + if hasattr(sys, "real_prefix"): return True - elif sys.prefix != getattr(sys, 'base_prefix', sys.prefix): + elif sys.prefix != getattr(sys, "base_prefix", sys.prefix): return True def _get_profile(config, profile_name, config_path): profile = {} if profile_name: - profiles = config.get_option('profiles') or {} + profiles = config.get_option("profiles") or {} profile = profiles.get(profile_name) if profile is None: raise utils.ProfileNotFound(config_path, profile_name) LOG.debug("read in legacy profile '%s': %s", profile_name, profile) else: - profile['include'] = set(config.get_option('tests') or []) - profile['exclude'] = set(config.get_option('skips') or []) + profile["include"] = set(config.get_option("tests") or []) + profile["exclude"] = set(config.get_option("skips") or []) return profile def _log_info(args, profile): - inc = ",".join([t for t in profile['include']]) or "None" - exc = ",".join([t for t in profile['exclude']]) or "None" + inc = ",".join([t for t in profile["include"]]) or "None" + exc = ",".join([t for t in profile["exclude"]]) or "None" LOG.info("profile include tests: %s", inc) LOG.info("profile exclude tests: %s", exc) LOG.info("cli include tests: %s", args.tests) @@ -133,150 +134,238 @@ def _log_info(args, profile): def main(): """Bandit CLI.""" # bring our logging stuff up as early as possible - debug = (logging.DEBUG if '-d' in sys.argv or '--debug' in sys.argv else - logging.INFO) + debug = ( + logging.DEBUG + if "-d" in sys.argv or "--debug" in sys.argv + else logging.INFO + ) _init_logger(debug) extension_mgr = _init_extensions() - baseline_formatters = [f.name for f in filter(lambda x: - hasattr(x.plugin, - '_accepts_baseline'), - extension_mgr.formatters)] + baseline_formatters = [ + f.name + for f in filter( + lambda x: hasattr(x.plugin, "_accepts_baseline"), + extension_mgr.formatters, + ) + ] # now do normal startup parser = argparse.ArgumentParser( - description='Bandit - a Python source code security analyzer', - formatter_class=argparse.RawDescriptionHelpFormatter + description="Bandit - a Python source code security analyzer", + formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( - 'targets', metavar='targets', type=str, nargs='*', - help='source file(s) or directory(s) to be tested' + "targets", + metavar="targets", + type=str, + nargs="*", + help="source file(s) or directory(s) to be tested", ) parser.add_argument( - '-r', '--recursive', dest='recursive', - action='store_true', help='find and process files in subdirectories' + "-r", + "--recursive", + dest="recursive", + action="store_true", + help="find and process files in subdirectories", ) parser.add_argument( - '-a', '--aggregate', dest='agg_type', - action='store', default='file', type=str, - choices=['file', 'vuln'], - help='aggregate output by vulnerability (default) or by filename' + "-a", + "--aggregate", + dest="agg_type", + action="store", + default="file", + type=str, + choices=["file", "vuln"], + help="aggregate output by vulnerability (default) or by filename", ) parser.add_argument( - '-n', '--number', dest='context_lines', - action='store', default=3, type=int, - help='maximum number of code lines to output for each issue' + "-n", + "--number", + dest="context_lines", + action="store", + default=3, + type=int, + help="maximum number of code lines to output for each issue", ) parser.add_argument( - '-c', '--configfile', dest='config_file', - action='store', default=None, type=str, - help='optional config file to use for selecting plugins and ' - 'overriding defaults' + "-c", + "--configfile", + dest="config_file", + action="store", + default=None, + type=str, + help="optional config file to use for selecting plugins and " + "overriding defaults", ) parser.add_argument( - '-p', '--profile', dest='profile', - action='store', default=None, type=str, - help='profile to use (defaults to executing all tests)' + "-p", + "--profile", + dest="profile", + action="store", + default=None, + type=str, + help="profile to use (defaults to executing all tests)", ) parser.add_argument( - '-t', '--tests', dest='tests', - action='store', default=None, type=str, - help='comma-separated list of test IDs to run' + "-t", + "--tests", + dest="tests", + action="store", + default=None, + type=str, + help="comma-separated list of test IDs to run", ) parser.add_argument( - '-s', '--skip', dest='skips', - action='store', default=None, type=str, - help='comma-separated list of test IDs to skip' + "-s", + "--skip", + dest="skips", + action="store", + default=None, + type=str, + help="comma-separated list of test IDs to skip", ) severity_group = parser.add_mutually_exclusive_group(required=False) severity_group.add_argument( - '-l', '--level', dest='severity', action='count', - default=1, help='report only issues of a given severity level or ' - 'higher (-l for LOW, -ll for MEDIUM, -lll for HIGH)' + "-l", + "--level", + dest="severity", + action="count", + default=1, + help="report only issues of a given severity level or " + "higher (-l for LOW, -ll for MEDIUM, -lll for HIGH)", ) severity_group.add_argument( - '--severity-level', dest='severity_string', action='store', - help='report only issues of a given severity level or higher.' - ' "all" and "low" are likely to produce the same results, but it' - ' is possible for rules to be undefined which will' - ' not be listed in "low".', - choices=['all', 'low', 'medium', 'high'] + "--severity-level", + dest="severity_string", + action="store", + help="report only issues of a given severity level or higher." + ' "all" and "low" are likely to produce the same results, but it' + " is possible for rules to be undefined which will" + ' not be listed in "low".', + choices=["all", "low", "medium", "high"], ) confidence_group = parser.add_mutually_exclusive_group(required=False) confidence_group.add_argument( - '-i', '--confidence', dest='confidence', action='count', - default=1, help='report only issues of a given confidence level or ' - 'higher (-i for LOW, -ii for MEDIUM, -iii for HIGH)' + "-i", + "--confidence", + dest="confidence", + action="count", + default=1, + help="report only issues of a given confidence level or " + "higher (-i for LOW, -ii for MEDIUM, -iii for HIGH)", ) confidence_group.add_argument( - '--confidence-level', dest='confidence_string', action='store', - help='report only issues of a given confidence level or higher.' - ' "all" and "low" are likely to produce the same results, but it' - ' is possible for rules to be undefined which will' - ' not be listed in "low".', - choices=["all", "low", "medium", "high"] + "--confidence-level", + dest="confidence_string", + action="store", + help="report only issues of a given confidence level or higher." + ' "all" and "low" are likely to produce the same results, but it' + " is possible for rules to be undefined which will" + ' not be listed in "low".', + choices=["all", "low", "medium", "high"], ) - output_format = 'screen' if sys.stdout.isatty() else 'txt' + output_format = "screen" if sys.stdout.isatty() else "txt" parser.add_argument( - '-f', '--format', dest='output_format', action='store', - default=output_format, help='specify output format', - choices=sorted(extension_mgr.formatter_names) + "-f", + "--format", + dest="output_format", + action="store", + default=output_format, + help="specify output format", + choices=sorted(extension_mgr.formatter_names), ) parser.add_argument( - '--msg-template', action='store', - default=None, help='specify output message template' - ' (only usable with --format custom),' - ' see CUSTOM FORMAT section' - ' for list of available values', + "--msg-template", + action="store", + default=None, + help="specify output message template" + " (only usable with --format custom)," + " see CUSTOM FORMAT section" + " for list of available values", ) parser.add_argument( - '-o', '--output', dest='output_file', action='store', nargs='?', - type=argparse.FileType('w', encoding='utf-8'), default=sys.stdout, - help='write report to filename' + "-o", + "--output", + dest="output_file", + action="store", + nargs="?", + type=argparse.FileType("w", encoding="utf-8"), + default=sys.stdout, + help="write report to filename", ) group = parser.add_mutually_exclusive_group(required=False) group.add_argument( - '-v', '--verbose', dest='verbose', action='store_true', - help='output extra information like excluded and included files' + "-v", + "--verbose", + dest="verbose", + action="store_true", + help="output extra information like excluded and included files", ) parser.add_argument( - '-d', '--debug', dest='debug', action='store_true', - help='turn on debug mode' + "-d", + "--debug", + dest="debug", + action="store_true", + help="turn on debug mode", ) group.add_argument( - '-q', '--quiet', '--silent', dest='quiet', action='store_true', - help='only show output in the case of an error' + "-q", + "--quiet", + "--silent", + dest="quiet", + action="store_true", + help="only show output in the case of an error", + ) + parser.add_argument( + "--ignore-nosec", + dest="ignore_nosec", + action="store_true", + help="do not skip lines with # nosec comments", ) parser.add_argument( - '--ignore-nosec', dest='ignore_nosec', action='store_true', - help='do not skip lines with # nosec comments' + "-x", + "--exclude", + dest="excluded_paths", + action="store", + default=",".join(constants.EXCLUDE), + help="comma-separated list of paths (glob patterns " + "supported) to exclude from scan " + "(note that these are in addition to the excluded " + "paths provided in the config file) (default: " + + ",".join(constants.EXCLUDE) + + ")", ) parser.add_argument( - '-x', '--exclude', dest='excluded_paths', action='store', - default=','.join(constants.EXCLUDE), - help='comma-separated list of paths (glob patterns ' - 'supported) to exclude from scan ' - '(note that these are in addition to the excluded ' - 'paths provided in the config file) (default: ' + - ','.join(constants.EXCLUDE) + ')' + "-b", + "--baseline", + dest="baseline", + action="store", + default=None, + help="path of a baseline report to compare against " + "(only JSON-formatted files are accepted)", ) parser.add_argument( - '-b', '--baseline', dest='baseline', action='store', - default=None, help='path of a baseline report to compare against ' - '(only JSON-formatted files are accepted)' + "--ini", + dest="ini_path", + action="store", + default=None, + help="path to a .bandit file that supplies command line arguments", ) parser.add_argument( - '--ini', dest='ini_path', action='store', default=None, - help='path to a .bandit file that supplies command line arguments' + "--exit-zero", + action="store_true", + dest="exit_zero", + default=False, + help="exit with 0, " "even with results found", ) - parser.add_argument('--exit-zero', action='store_true', dest='exit_zero', - default=False, help='exit with 0, ' - 'even with results found') - python_ver = sys.version.replace('\n', '') + python_ver = sys.version.replace("\n", "") parser.add_argument( - '--version', action='version', - version='%(prog)s {version}\n python version = {python}'.format( - version=bandit.__version__, python=python_ver) + "--version", + action="version", + version="%(prog)s {version}\n python version = {python}".format( + version=bandit.__version__, python=python_ver + ), ) parser.set_defaults(debug=False) @@ -284,15 +373,17 @@ def main(): parser.set_defaults(quiet=False) parser.set_defaults(ignore_nosec=False) - plugin_info = ["%s\t%s" % (a[0], a[1].name) for a in - extension_mgr.plugins_by_id.items()] + plugin_info = [ + f"{a[0]}\t{a[1].name}" for a in extension_mgr.plugins_by_id.items() + ] blacklist_info = [] for a in extension_mgr.blacklist.items(): for b in a[1]: - blacklist_info.append('%s\t%s' % (b['id'], b['name'])) + blacklist_info.append("{}\t{}".format(b["id"], b["name"])) - plugin_list = '\n\t'.join(sorted(set(plugin_info + blacklist_info))) - dedent_text = textwrap.dedent(''' + plugin_list = "\n\t".join(sorted(set(plugin_info + blacklist_info))) + dedent_text = textwrap.dedent( + """ CUSTOM FORMATTING ----------------- @@ -319,13 +410,14 @@ def main(): The following tests were discovered and loaded: ----------------------------------------------- - ''') - parser.epilog = dedent_text + "\t{0}".format(plugin_list) + """ + ) + parser.epilog = dedent_text + f"\t{plugin_list}" # setup work - parse arguments, and initialize BanditManager args = parser.parse_args() # Check if `--msg-template` is not present without custom formatter - if args.output_format != 'custom' and args.msg_template is not None: + if args.output_format != "custom" and args.msg_template is not None: parser.error("--msg-template can only be used with --format=custom") # Check if confidence or severity level have been specified with strings @@ -362,125 +454,143 @@ def main(): if ini_options: # prefer command line, then ini file args.excluded_paths = _log_option_source( - parser.get_default('excluded_paths'), + parser.get_default("excluded_paths"), args.excluded_paths, - ini_options.get('exclude'), - 'excluded paths') + ini_options.get("exclude"), + "excluded paths", + ) args.skips = _log_option_source( - parser.get_default('skips'), + parser.get_default("skips"), args.skips, - ini_options.get('skips'), - 'skipped tests') + ini_options.get("skips"), + "skipped tests", + ) args.tests = _log_option_source( - parser.get_default('tests'), + parser.get_default("tests"), args.tests, - ini_options.get('tests'), - 'selected tests') + ini_options.get("tests"), + "selected tests", + ) - ini_targets = ini_options.get('targets') + ini_targets = ini_options.get("targets") if ini_targets: - ini_targets = ini_targets.split(',') + ini_targets = ini_targets.split(",") args.targets = _log_option_source( - parser.get_default('targets'), + parser.get_default("targets"), args.targets, ini_targets, - 'selected targets') + "selected targets", + ) # TODO(tmcpeak): any other useful options to pass from .bandit? args.recursive = _log_option_source( - parser.get_default('recursive'), + parser.get_default("recursive"), args.recursive, - ini_options.get('recursive'), - 'recursive scan') + ini_options.get("recursive"), + "recursive scan", + ) args.agg_type = _log_option_source( - parser.get_default('agg_type'), + parser.get_default("agg_type"), args.agg_type, - ini_options.get('aggregate'), - 'aggregate output type') + ini_options.get("aggregate"), + "aggregate output type", + ) args.context_lines = _log_option_source( - parser.get_default('context_lines'), + parser.get_default("context_lines"), args.context_lines, - ini_options.get('number'), - 'max code lines output for issue') + ini_options.get("number"), + "max code lines output for issue", + ) args.profile = _log_option_source( - parser.get_default('profile'), + parser.get_default("profile"), args.profile, - ini_options.get('profile'), - 'profile') + ini_options.get("profile"), + "profile", + ) args.severity = _log_option_source( - parser.get_default('severity'), + parser.get_default("severity"), args.severity, - ini_options.get('level'), - 'severity level') + ini_options.get("level"), + "severity level", + ) args.confidence = _log_option_source( - parser.get_default('confidence'), + parser.get_default("confidence"), args.confidence, - ini_options.get('confidence'), - 'confidence level') + ini_options.get("confidence"), + "confidence level", + ) args.output_format = _log_option_source( - parser.get_default('output_format'), + parser.get_default("output_format"), args.output_format, - ini_options.get('format'), - 'output format') + ini_options.get("format"), + "output format", + ) args.msg_template = _log_option_source( - parser.get_default('msg_template'), + parser.get_default("msg_template"), args.msg_template, - ini_options.get('msg-template'), - 'output message template') + ini_options.get("msg-template"), + "output message template", + ) args.output_file = _log_option_source( - parser.get_default('output_file'), + parser.get_default("output_file"), args.output_file, - ini_options.get('output'), - 'output file') + ini_options.get("output"), + "output file", + ) args.verbose = _log_option_source( - parser.get_default('verbose'), + parser.get_default("verbose"), args.verbose, - ini_options.get('verbose'), - 'output extra information') + ini_options.get("verbose"), + "output extra information", + ) args.debug = _log_option_source( - parser.get_default('debug'), + parser.get_default("debug"), args.debug, - ini_options.get('debug'), - 'debug mode') + ini_options.get("debug"), + "debug mode", + ) args.quiet = _log_option_source( - parser.get_default('quiet'), + parser.get_default("quiet"), args.quiet, - ini_options.get('quiet'), - 'silent mode') + ini_options.get("quiet"), + "silent mode", + ) args.ignore_nosec = _log_option_source( - parser.get_default('ignore_nosec'), + parser.get_default("ignore_nosec"), args.ignore_nosec, - ini_options.get('ignore-nosec'), - 'do not skip lines with # nosec') + ini_options.get("ignore-nosec"), + "do not skip lines with # nosec", + ) args.baseline = _log_option_source( - parser.get_default('baseline'), + parser.get_default("baseline"), args.baseline, - ini_options.get('baseline'), - 'path of a baseline report') + ini_options.get("baseline"), + "path of a baseline report", + ) if not args.targets: LOG.error("No targets found in CLI or ini files, exiting.") sys.exit(2) # if the log format string was set in the options, reinitialize - if b_conf.get_option('log_format'): - log_format = b_conf.get_option('log_format') + if b_conf.get_option("log_format"): + log_format = b_conf.get_option("log_format") _init_logger(log_level=logging.DEBUG, log_format=log_format) if args.quiet: @@ -490,45 +600,56 @@ def main(): profile = _get_profile(b_conf, args.profile, args.config_file) _log_info(args, profile) - profile['include'].update(args.tests.split(',') if args.tests else []) - profile['exclude'].update(args.skips.split(',') if args.skips else []) + profile["include"].update(args.tests.split(",") if args.tests else []) + profile["exclude"].update(args.skips.split(",") if args.skips else []) extension_mgr.validate_profile(profile) except (utils.ProfileNotFound, ValueError) as e: LOG.error(e) sys.exit(2) - b_mgr = b_manager.BanditManager(b_conf, args.agg_type, args.debug, - profile=profile, verbose=args.verbose, - quiet=args.quiet, - ignore_nosec=args.ignore_nosec) + b_mgr = b_manager.BanditManager( + b_conf, + args.agg_type, + args.debug, + profile=profile, + verbose=args.verbose, + quiet=args.quiet, + ignore_nosec=args.ignore_nosec, + ) if args.baseline is not None: try: with open(args.baseline) as bl: data = bl.read() b_mgr.populate_baseline(data) - except IOError: + except OSError: LOG.warning("Could not open baseline report: %s", args.baseline) sys.exit(2) if args.output_format not in baseline_formatters: - LOG.warning('Baseline must be used with one of the following ' - 'formats: ' + str(baseline_formatters)) + LOG.warning( + "Baseline must be used with one of the following " + "formats: " + str(baseline_formatters) + ) sys.exit(2) if args.output_format != "json": if args.config_file: LOG.info("using config: %s", args.config_file) - LOG.info("running on Python %d.%d.%d", sys.version_info.major, - sys.version_info.minor, sys.version_info.micro) + LOG.info( + "running on Python %d.%d.%d", + sys.version_info.major, + sys.version_info.minor, + sys.version_info.micro, + ) # initiate file discovery step within Bandit Manager b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths) if not b_mgr.b_ts.tests: - LOG.error('No tests would be run, please check the profile.') + LOG.error("No tests would be run, please check the profile.") sys.exit(2) # initiate execution of tests within Bandit Manager @@ -539,19 +660,23 @@ def main(): # trigger output of results by Bandit Manager sev_level = constants.RANKING[args.severity - 1] conf_level = constants.RANKING[args.confidence - 1] - b_mgr.output_results(args.context_lines, - sev_level, - conf_level, - args.output_file, - args.output_format, - args.msg_template) - - if (b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0 - and not args.exit_zero): + b_mgr.output_results( + args.context_lines, + sev_level, + conf_level, + args.output_file, + args.output_format, + args.msg_template, + ) + + if ( + b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0 + and not args.exit_zero + ): sys.exit(1) else: sys.exit(0) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/bandit/core/__init__.py b/bandit/core/__init__.py index ed9639d6f..2efdc4dc2 100644 --- a/bandit/core/__init__.py +++ b/bandit/core/__init__.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - from bandit.core import config # noqa from bandit.core import context # noqa from bandit.core import manager # noqa diff --git a/bandit/core/blacklisting.py b/bandit/core/blacklisting.py index 81cac5687..6e015176e 100644 --- a/bandit/core/blacklisting.py +++ b/bandit/core/blacklisting.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2016 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import ast import fnmatch @@ -12,9 +10,12 @@ def report_issue(check, name): return issue.Issue( - severity=check.get('level', 'MEDIUM'), confidence='HIGH', - text=check['message'].replace('{name}', name), - ident=name, test_id=check.get("id", 'LEGACY')) + severity=check.get("level", "MEDIUM"), + confidence="HIGH", + text=check["message"].replace("{name}", name), + ident=name, + test_id=check.get("id", "LEGACY"), + ) def blacklist(context, config): @@ -30,9 +31,9 @@ def blacklist(context, config): blacklists = config node_type = context.node.__class__.__name__ - if node_type == 'Call': + if node_type == "Call": func = context.node.func - if isinstance(func, ast.Name) and func.id == '__import__': + if isinstance(func, ast.Name) and func.id == "__import__": if len(context.node.args): if isinstance(context.node.args[0], ast.Str): name = context.node.args[0].s @@ -50,13 +51,13 @@ def blacklist(context, config): if context.call_args_count > 0: name = context.call_args[0] else: - name = context.call_keywords['name'] + name = context.call_keywords["name"] for check in blacklists[node_type]: - for qn in check['qualnames']: + for qn in check["qualnames"]: if name is not None and fnmatch.fnmatch(name, qn): return report_issue(check, name) - if node_type.startswith('Import'): + if node_type.startswith("Import"): prefix = "" if node_type == "ImportFrom": if context.node.module is not None: @@ -64,6 +65,6 @@ def blacklist(context, config): for check in blacklists[node_type]: for name in context.node.names: - for qn in check['qualnames']: + for qn in check["qualnames"]: if (prefix + name.name).startswith(qn): return report_issue(check, name.name) diff --git a/bandit/core/config.py b/bandit/core/config.py index be42537d2..7dbf0a8bb 100644 --- a/bandit/core/config.py +++ b/bandit/core/config.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import logging import yaml @@ -16,31 +14,33 @@ LOG = logging.getLogger(__name__) -class BanditConfig(object): +class BanditConfig: def __init__(self, config_file=None): - '''Attempt to initialize a config dictionary from a yaml file. + """Attempt to initialize a config dictionary from a yaml file. Error out if loading the yaml file fails for any reason. :param config_file: The Bandit yaml config file :raises bandit.utils.ConfigError: If the config is invalid or unreadable. - ''' + """ self.config_file = config_file self._config = {} if config_file: try: - f = open(config_file, 'r') - except IOError: - raise utils.ConfigError("Could not read config file.", - config_file) + f = open(config_file) + except OSError: + raise utils.ConfigError( + "Could not read config file.", config_file + ) - if config_file.endswith('.toml'): + if config_file.endswith(".toml"): import toml + try: with f: - self._config = toml.load(f)['tool']['bandit'] + self._config = toml.load(f)["tool"]["bandit"] except toml.TomlDecodeError as err: LOG.error(err) raise utils.ConfigError("Error parsing file.", config_file) @@ -62,21 +62,21 @@ def __init__(self, config_file=None): else: # use sane defaults - self._config['plugin_name_pattern'] = '*.py' - self._config['include'] = ['*.py', '*.pyw'] + self._config["plugin_name_pattern"] = "*.py" + self._config["include"] = ["*.py", "*.pyw"] self._init_settings() def get_option(self, option_string): - '''Returns the option from the config specified by the option_string. + """Returns the option from the config specified by the option_string. '.' can be used to denote levels, for example to retrieve the options from the 'a' profile you can use 'profiles.a' :param option_string: The string specifying the option to retrieve :return: The object specified by the option_string, or None if it can't be found. - ''' - option_levels = option_string.split('.') + """ + option_levels = option_string.split(".") cur_item = self._config for level in option_levels: if cur_item and (level in cur_item): @@ -94,151 +94,164 @@ def get_setting(self, setting_name): @property def config(self): - '''Property to return the config dictionary + """Property to return the config dictionary :return: Config dictionary - ''' + """ return self._config def _init_settings(self): - '''This function calls a set of other functions (one per setting) + """This function calls a set of other functions (one per setting) This function calls a set of other functions (one per setting) to build out the _settings dictionary. Each other function will set values from the config (if set), otherwise use defaults (from constants if possible). :return: - - ''' + """ self._settings = {} self._init_plugin_name_pattern() def _init_plugin_name_pattern(self): - '''Sets settings['plugin_name_pattern'] from default or config file.''' + """Sets settings['plugin_name_pattern'] from default or config file.""" plugin_name_pattern = constants.plugin_name_pattern - if self.get_option('plugin_name_pattern'): - plugin_name_pattern = self.get_option('plugin_name_pattern') - self._settings['plugin_name_pattern'] = plugin_name_pattern + if self.get_option("plugin_name_pattern"): + plugin_name_pattern = self.get_option("plugin_name_pattern") + self._settings["plugin_name_pattern"] = plugin_name_pattern def convert_legacy_config(self): updated_profiles = self.convert_names_to_ids() bad_calls, bad_imports = self.convert_legacy_blacklist_data() if updated_profiles: - self.convert_legacy_blacklist_tests(updated_profiles, - bad_calls, bad_imports) - self._config['profiles'] = updated_profiles + self.convert_legacy_blacklist_tests( + updated_profiles, bad_calls, bad_imports + ) + self._config["profiles"] = updated_profiles def convert_names_to_ids(self): - '''Convert test names to IDs, unknown names are left unchanged.''' + """Convert test names to IDs, unknown names are left unchanged.""" extman = extension_loader.MANAGER updated_profiles = {} - for name, profile in (self.get_option('profiles') or {}).items(): + for name, profile in (self.get_option("profiles") or {}).items(): # NOTE(tkelsey): can't use default of get() because value is # sometimes explicity 'None', for example when the list if given in # yaml but not populated with any values. - include = set((extman.get_plugin_id(i) or i) - for i in (profile.get('include') or [])) - exclude = set((extman.get_plugin_id(i) or i) - for i in (profile.get('exclude') or [])) - updated_profiles[name] = {'include': include, 'exclude': exclude} + include = { + (extman.get_plugin_id(i) or i) + for i in (profile.get("include") or []) + } + exclude = { + (extman.get_plugin_id(i) or i) + for i in (profile.get("exclude") or []) + } + updated_profiles[name] = {"include": include, "exclude": exclude} return updated_profiles def convert_legacy_blacklist_data(self): - '''Detect legacy blacklist data and convert it to new format.''' + """Detect legacy blacklist data and convert it to new format.""" bad_calls_list = [] bad_imports_list = [] - bad_calls = self.get_option('blacklist_calls') or {} - bad_calls = bad_calls.get('bad_name_sets', {}) + bad_calls = self.get_option("blacklist_calls") or {} + bad_calls = bad_calls.get("bad_name_sets", {}) for item in bad_calls: for key, val in item.items(): - val['name'] = key - val['message'] = val['message'].replace('{func}', '{name}') + val["name"] = key + val["message"] = val["message"].replace("{func}", "{name}") bad_calls_list.append(val) - bad_imports = self.get_option('blacklist_imports') or {} - bad_imports = bad_imports.get('bad_import_sets', {}) + bad_imports = self.get_option("blacklist_imports") or {} + bad_imports = bad_imports.get("bad_import_sets", {}) for item in bad_imports: for key, val in item.items(): - val['name'] = key - val['message'] = val['message'].replace('{module}', '{name}') - val['qualnames'] = val['imports'] - del val['imports'] + val["name"] = key + val["message"] = val["message"].replace("{module}", "{name}") + val["qualnames"] = val["imports"] + del val["imports"] bad_imports_list.append(val) if bad_imports_list or bad_calls_list: - LOG.warning('Legacy blacklist data found in config, overriding ' - 'data plugins') + LOG.warning( + "Legacy blacklist data found in config, overriding " + "data plugins" + ) return bad_calls_list, bad_imports_list @staticmethod def convert_legacy_blacklist_tests(profiles, bad_imports, bad_calls): - '''Detect old blacklist tests, convert to use new builtin.''' + """Detect old blacklist tests, convert to use new builtin.""" + def _clean_set(name, data): if name in data: data.remove(name) - data.add('B001') + data.add("B001") for name, profile in profiles.items(): blacklist = {} - include = profile['include'] - exclude = profile['exclude'] + include = profile["include"] + exclude = profile["exclude"] - name = 'blacklist_calls' + name = "blacklist_calls" if name in include and name not in exclude: - blacklist.setdefault('Call', []).extend(bad_calls) + blacklist.setdefault("Call", []).extend(bad_calls) _clean_set(name, include) _clean_set(name, exclude) - name = 'blacklist_imports' + name = "blacklist_imports" if name in include and name not in exclude: - blacklist.setdefault('Import', []).extend(bad_imports) - blacklist.setdefault('ImportFrom', []).extend(bad_imports) - blacklist.setdefault('Call', []).extend(bad_imports) + blacklist.setdefault("Import", []).extend(bad_imports) + blacklist.setdefault("ImportFrom", []).extend(bad_imports) + blacklist.setdefault("Call", []).extend(bad_imports) _clean_set(name, include) _clean_set(name, exclude) - _clean_set('blacklist_import_func', include) - _clean_set('blacklist_import_func', exclude) + _clean_set("blacklist_import_func", include) + _clean_set("blacklist_import_func", exclude) # This can happen with a legacy config that includes # blacklist_calls but exclude blacklist_imports for example - if 'B001' in include and 'B001' in exclude: - exclude.remove('B001') + if "B001" in include and "B001" in exclude: + exclude.remove("B001") - profile['blacklist'] = blacklist + profile["blacklist"] = blacklist def validate(self, path): - '''Validate the config data.''' + """Validate the config data.""" legacy = False - message = ("Config file has an include or exclude reference " - "to legacy test '{0}' but no configuration data for " - "it. Configuration data is required for this test. " - "Please consider switching to the new config file " - "format, the tool 'bandit-config-generator' can help " - "you with this.") + message = ( + "Config file has an include or exclude reference " + "to legacy test '{0}' but no configuration data for " + "it. Configuration data is required for this test. " + "Please consider switching to the new config file " + "format, the tool 'bandit-config-generator' can help " + "you with this." + ) def _test(key, block, exclude, include): if key in exclude or key in include: if self._config.get(block) is None: raise utils.ConfigError(message.format(key), path) - if 'profiles' in self._config: + if "profiles" in self._config: legacy = True - for profile in self._config['profiles'].values(): - inc = profile.get('include') or set() - exc = profile.get('exclude') or set() + for profile in self._config["profiles"].values(): + inc = profile.get("include") or set() + exc = profile.get("exclude") or set() - _test('blacklist_imports', 'blacklist_imports', inc, exc) - _test('blacklist_import_func', 'blacklist_imports', inc, exc) - _test('blacklist_calls', 'blacklist_calls', inc, exc) + _test("blacklist_imports", "blacklist_imports", inc, exc) + _test("blacklist_import_func", "blacklist_imports", inc, exc) + _test("blacklist_calls", "blacklist_calls", inc, exc) # show deprecation message if legacy: - LOG.warning("Config file '%s' contains deprecated legacy config " - "data. Please consider upgrading to the new config " - "format. The tool 'bandit-config-generator' can help " - "you with this. Support for legacy configs will be " - "removed in a future bandit version.", path) + LOG.warning( + "Config file '%s' contains deprecated legacy config " + "data. Please consider upgrading to the new config " + "format. The tool 'bandit-config-generator' can help " + "you with this. Support for legacy configs will be " + "removed in a future bandit version.", + path, + ) diff --git a/bandit/core/constants.py b/bandit/core/constants.py index d6864557c..4019cab55 100644 --- a/bandit/core/constants.py +++ b/bandit/core/constants.py @@ -1,24 +1,22 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - # default plugin name pattern -plugin_name_pattern = '*.py' +plugin_name_pattern = "*.py" # default progress increment progress_increment = 50 -RANKING = ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH'] -RANKING_VALUES = {'UNDEFINED': 1, 'LOW': 3, 'MEDIUM': 5, 'HIGH': 10} -CRITERIA = [('SEVERITY', 'UNDEFINED'), ('CONFIDENCE', 'UNDEFINED')] +RANKING = ["UNDEFINED", "LOW", "MEDIUM", "HIGH"] +RANKING_VALUES = {"UNDEFINED": 1, "LOW": 3, "MEDIUM": 5, "HIGH": 10} +CRITERIA = [("SEVERITY", "UNDEFINED"), ("CONFIDENCE", "UNDEFINED")] # add each ranking to globals, to allow direct access in module name space for rank in RANKING: globals()[rank] = rank -CONFIDENCE_DEFAULT = 'UNDEFINED' +CONFIDENCE_DEFAULT = "UNDEFINED" # A list of values Python considers to be False. # These can be useful in tests to check if a value is True or False. @@ -26,10 +24,10 @@ # These are only useful when we have a constant in code. If we # have a variable we cannot determine if False. # See https://docs.python.org/3/library/stdtypes.html#truth-value-testing -FALSE_VALUES = [None, False, 'False', 0, 0.0, 0j, '', (), [], {}] +FALSE_VALUES = [None, False, "False", 0, 0.0, 0j, "", (), [], {}] # override with "log_format" option in config file -log_format_string = '[%(module)s]\t%(levelname)s\t%(message)s' +log_format_string = "[%(module)s]\t%(levelname)s\t%(message)s" # Directories to exclude by default EXCLUDE = ( diff --git a/bandit/core/context.py b/bandit/core/context.py index 139deb55e..a0b2fadf1 100644 --- a/bandit/core/context.py +++ b/bandit/core/context.py @@ -1,28 +1,26 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import ast from bandit.core import utils -class Context(object): +class Context: def __init__(self, context_object=None): - '''Initialize the class with a context, empty dict otherwise + """Initialize the class with a context, empty dict otherwise :param context_object: The context object to create class from :return: - - ''' + """ if context_object is not None: self._context = context_object else: self._context = dict() def __repr__(self): - '''Generate representation of object for printing / interactive use + """Generate representation of object for printing / interactive use Most likely only interested in non-default properties, so we return the string version of _context. @@ -35,19 +33,19 @@ def __repr__(self): 'import_aliases': {}, 'qualname': 'socket.socket'}> :return: A string representation of the object - ''' + """ return "" % self._context @property def call_args(self): - '''Get a list of function args + """Get a list of function args :return: A list of function args - ''' + """ args = [] - if 'call' in self._context and hasattr(self._context['call'], 'args'): - for arg in self._context['call'].args: - if hasattr(arg, 'attr'): + if "call" in self._context and hasattr(self._context["call"], "args"): + for arg in self._context["call"].args: + if hasattr(arg, "attr"): args.append(arg.attr) else: args.append(self._get_literal_value(arg)) @@ -55,42 +53,43 @@ def call_args(self): @property def call_args_count(self): - '''Get the number of args a function call has + """Get the number of args a function call has :return: The number of args a function call has or None - ''' - if 'call' in self._context and hasattr(self._context['call'], 'args'): - return len(self._context['call'].args) + """ + if "call" in self._context and hasattr(self._context["call"], "args"): + return len(self._context["call"].args) else: return None @property def call_function_name(self): - '''Get the name (not FQ) of a function call + """Get the name (not FQ) of a function call :return: The name (not FQ) of a function call - ''' - return self._context.get('name') + """ + return self._context.get("name") @property def call_function_name_qual(self): - '''Get the FQ name of a function call + """Get the FQ name of a function call :return: The FQ name of a function call - ''' - return self._context.get('qualname') + """ + return self._context.get("qualname") @property def call_keywords(self): - '''Get a dictionary of keyword parameters + """Get a dictionary of keyword parameters :return: A dictionary of keyword parameters for a call as strings - ''' - if ('call' in self._context and - hasattr(self._context['call'], 'keywords')): + """ + if "call" in self._context and hasattr( + self._context["call"], "keywords" + ): return_dict = {} - for li in self._context['call'].keywords: - if hasattr(li.value, 'attr'): + for li in self._context["call"].keywords: + if hasattr(li.value, "attr"): return_dict[li.arg] = li.value.attr else: return_dict[li.arg] = self._get_literal_value(li.value) @@ -100,31 +99,31 @@ def call_keywords(self): @property def node(self): - '''Get the raw AST node associated with the context + """Get the raw AST node associated with the context :return: The raw AST node associated with the context - ''' - return self._context.get('node') + """ + return self._context.get("node") @property def string_val(self): - '''Get the value of a standalone unicode or string object + """Get the value of a standalone unicode or string object :return: value of a standalone unicode or string object - ''' - return self._context.get('str') + """ + return self._context.get("str") @property def bytes_val(self): - '''Get the value of a standalone bytes object (py3 only) + """Get the value of a standalone bytes object (py3 only) :return: value of a standalone bytes object - ''' - return self._context.get('bytes') + """ + return self._context.get("bytes") @property def string_val_as_escaped_bytes(self): - '''Get escaped value of the object. + """Get escaped value of the object. Turn the value of a string or bytes object into byte sequence with unknown, control, and \\ characters escaped. @@ -133,11 +132,11 @@ def string_val_as_escaped_bytes(self): potentially badly encoded string in the code. :return: sequence of printable ascii bytes representing original string - ''' + """ val = self.string_val if val is not None: # it's any of str or unicode in py2, or str in py3 - return val.encode('unicode_escape') + return val.encode("unicode_escape") val = self.bytes_val if val is not None: @@ -147,34 +146,38 @@ def string_val_as_escaped_bytes(self): @property def statement(self): - '''Get the raw AST for the current statement + """Get the raw AST for the current statement :return: The raw AST for the current statement - ''' - return self._context.get('statement') + """ + return self._context.get("statement") @property def function_def_defaults_qual(self): - '''Get a list of fully qualified default values in a function def + """Get a list of fully qualified default values in a function def :return: List of defaults - ''' + """ defaults = [] - if ('node' in self._context and - hasattr(self._context['node'], 'args') and - hasattr(self._context['node'].args, 'defaults')): - for default in self._context['node'].args.defaults: - defaults.append(utils.get_qual_attr( - default, - self._context['import_aliases'])) + if ( + "node" in self._context + and hasattr(self._context["node"], "args") + and hasattr(self._context["node"].args, "defaults") + ): + for default in self._context["node"].args.defaults: + defaults.append( + utils.get_qual_attr( + default, self._context["import_aliases"] + ) + ) return defaults def _get_literal_value(self, literal): - '''Utility function to turn AST literals into native Python types + """Utility function to turn AST literals into native Python types :param literal: The AST literal to convert :return: The value of the AST literal - ''' + """ if isinstance(literal, ast.Num): literal_value = literal.n @@ -221,23 +224,23 @@ def _get_literal_value(self, literal): return literal_value def get_call_arg_value(self, argument_name): - '''Gets the value of a named argument in a function call. + """Gets the value of a named argument in a function call. :return: named argument value - ''' + """ kwd_values = self.call_keywords if kwd_values is not None and argument_name in kwd_values: return kwd_values[argument_name] def check_call_arg_value(self, argument_name, argument_values=None): - '''Checks for a value of a named argument in a function call. + """Checks for a value of a named argument in a function call. Returns none if the specified argument is not found. :param argument_name: A string - name of the argument to look for :param argument_values: the value, or list of values to test against :return: Boolean True if argument found and matched, False if found and not matched, None if argument not found at all - ''' + """ arg_value = self.get_call_arg_value(argument_name) if arg_value is not None: if not isinstance(argument_values, list): @@ -253,62 +256,62 @@ def check_call_arg_value(self, argument_name, argument_values=None): return None def get_lineno_for_call_arg(self, argument_name): - '''Get the line number for a specific named argument + """Get the line number for a specific named argument In case the call is split over multiple lines, get the correct one for the argument. :param argument_name: A string - name of the argument to look for :return: Integer - the line number of the found argument, or -1 - ''' - if hasattr(self.node, 'keywords'): + """ + if hasattr(self.node, "keywords"): for key in self.node.keywords: if key.arg == argument_name: return key.value.lineno def get_call_arg_at_position(self, position_num): - '''Returns positional argument at the specified position (if it exists) + """Returns positional argument at the specified position (if it exists) :param position_num: The index of the argument to return the value for :return: Value of the argument at the specified position if it exists - ''' + """ max_args = self.call_args_count if max_args and position_num < max_args: return self._get_literal_value( - self._context['call'].args[position_num] + self._context["call"].args[position_num] ) else: return None def is_module_being_imported(self, module): - '''Check for the specified module is currently being imported + """Check for the specified module is currently being imported :param module: The module name to look for :return: True if the module is found, False otherwise - ''' - return self._context.get('module') == module + """ + return self._context.get("module") == module def is_module_imported_exact(self, module): - '''Check if a specified module has been imported; only exact matches. + """Check if a specified module has been imported; only exact matches. :param module: The module name to look for :return: True if the module is found, False otherwise - ''' - return module in self._context.get('imports', []) + """ + return module in self._context.get("imports", []) def is_module_imported_like(self, module): - '''Check if a specified module has been imported + """Check if a specified module has been imported Check if a specified module has been imported; specified module exists as part of any import statement. :param module: The module name to look for :return: True if the module is found, False otherwise - ''' - if 'imports' in self._context: - for imp in self._context['imports']: + """ + if "imports" in self._context: + for imp in self._context["imports"]: if module in imp: return True return False @property def filename(self): - return self._context.get('filename') + return self._context.get("filename") diff --git a/bandit/core/docs_utils.py b/bandit/core/docs_utils.py index f41722652..6733ad03a 100644 --- a/bandit/core/docs_utils.py +++ b/bandit/core/docs_utils.py @@ -1,11 +1,9 @@ -# -*- coding:utf-8 -*- # # Copyright 2016 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - # where our docs are hosted -BASE_URL = 'https://bandit.readthedocs.io/en/latest/' +BASE_URL = "https://bandit.readthedocs.io/en/latest/" def get_url(bid): @@ -16,29 +14,42 @@ def get_url(bid): info = extension_loader.MANAGER.plugins_by_id.get(bid) if info is not None: - return '%splugins/%s_%s.html' % (BASE_URL, bid.lower(), - info.plugin.__name__) + return "{}plugins/{}_{}.html".format( + BASE_URL, + bid.lower(), + info.plugin.__name__, + ) info = extension_loader.MANAGER.blacklist_by_id.get(bid) if info is not None: - template = 'blacklists/blacklist_{kind}.html#{id}-{name}' - info['name'] = info['name'].replace('_', '-') + template = "blacklists/blacklist_{kind}.html#{id}-{name}" + info["name"] = info["name"].replace("_", "-") - if info['id'].startswith('B3'): # B3XX + if info["id"].startswith("B3"): # B3XX # Some of the links are combined, so we have exception cases - if info['id'] in ['B304', 'B305']: + if info["id"] in ["B304", "B305"]: info = info.copy() - info['id'] = 'b304-b305' - info['name'] = 'ciphers-and-modes' - elif info['id'] in ['B313', 'B314', 'B315', 'B316', 'B317', - 'B318', 'B319', 'B320']: + info["id"] = "b304-b305" + info["name"] = "ciphers-and-modes" + elif info["id"] in [ + "B313", + "B314", + "B315", + "B316", + "B317", + "B318", + "B319", + "B320", + ]: info = info.copy() - info['id'] = 'b313-b320' + info["id"] = "b313-b320" ext = template.format( - kind='calls', id=info['id'], name=info['name']) + kind="calls", id=info["id"], name=info["name"] + ) else: ext = template.format( - kind='imports', id=info['id'], name=info['name']) + kind="imports", id=info["id"], name=info["name"] + ) return BASE_URL + ext.lower() diff --git a/bandit/core/extension_loader.py b/bandit/core/extension_loader.py index 65b723bc2..7730ee8e4 100644 --- a/bandit/core/extension_loader.py +++ b/bandit/core/extension_loader.py @@ -1,9 +1,5 @@ -# -*- coding:utf-8 -*- # # SPDX-License-Identifier: Apache-2.0 - -from __future__ import print_function - import sys from stevedore import extension @@ -11,15 +7,16 @@ from bandit.core import utils -class Manager(object): +class Manager: # These IDs are for bandit built in tests - builtin = [ - 'B001' # Built in blacklist test - ] - - def __init__(self, formatters_namespace='bandit.formatters', - plugins_namespace='bandit.plugins', - blacklists_namespace='bandit.blacklists'): + builtin = ["B001"] # Built in blacklist test + + def __init__( + self, + formatters_namespace="bandit.formatters", + plugins_namespace="bandit.plugins", + blacklists_namespace="bandit.blacklists", + ): # Cache the extension managers, loaded extensions, and extension names self.load_formatters(formatters_namespace) self.load_plugins(plugins_namespace) @@ -30,7 +27,7 @@ def load_formatters(self, formatters_namespace): namespace=formatters_namespace, invoke_on_load=False, verify_requirements=False, - ) + ) self.formatters = list(self.formatters_mgr) self.formatter_names = self.formatters_mgr.names() @@ -39,13 +36,15 @@ def load_plugins(self, plugins_namespace): namespace=plugins_namespace, invoke_on_load=False, verify_requirements=False, - ) + ) def test_has_id(plugin): if not hasattr(plugin.plugin, "_test_id"): # logger not setup yet, so using print - print("WARNING: Test '%s' has no ID, skipping." % plugin.name, - file=sys.stderr) + print( + "WARNING: Test '%s' has no ID, skipping." % plugin.name, + file=sys.stderr, + ) return False return True @@ -64,7 +63,7 @@ def load_blacklists(self, blacklist_namespace): namespace=blacklist_namespace, invoke_on_load=False, verify_requirements=False, - ) + ) self.blacklist = {} blacklist = list(self.blacklists_mgr) for item in blacklist: @@ -76,29 +75,31 @@ def load_blacklists(self, blacklist_namespace): self.blacklist_by_name = {} for val in self.blacklist.values(): for b in val: - self.blacklist_by_id[b['id']] = b - self.blacklist_by_name[b['name']] = b + self.blacklist_by_id[b["id"]] = b + self.blacklist_by_name[b["name"]] = b def validate_profile(self, profile): - '''Validate that everything in the configured profiles looks good.''' - for inc in profile['include']: + """Validate that everything in the configured profiles looks good.""" + for inc in profile["include"]: if not self.check_id(inc): - raise ValueError('Unknown test found in profile: %s' % inc) + raise ValueError("Unknown test found in profile: %s" % inc) - for exc in profile['exclude']: + for exc in profile["exclude"]: if not self.check_id(exc): - raise ValueError('Unknown test found in profile: %s' % exc) + raise ValueError("Unknown test found in profile: %s" % exc) - union = set(profile['include']) & set(profile['exclude']) + union = set(profile["include"]) & set(profile["exclude"]) if len(union) > 0: - raise ValueError('Non-exclusive include/exclude test sets: %s' % - union) + raise ValueError( + "Non-exclusive include/exclude test sets: %s" % union + ) def check_id(self, test): return ( - test in self.plugins_by_id or - test in self.blacklist_by_id or - test in self.builtin) + test in self.plugins_by_id + or test in self.blacklist_by_id + or test in self.builtin + ) # Using entry-points and pkg_resources *can* be expensive. So let's load these diff --git a/bandit/core/issue.py b/bandit/core/issue.py index 98b1d0bb2..ef997b6d0 100644 --- a/bandit/core/issue.py +++ b/bandit/core/issue.py @@ -1,24 +1,27 @@ -# -*- coding:utf-8 -*- # # Copyright 2015 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - -from __future__ import division -from __future__ import unicode_literals - import linecache from bandit.core import constants -class Issue(object): - def __init__(self, severity, confidence=constants.CONFIDENCE_DEFAULT, - text="", ident=None, lineno=None, test_id="", col_offset=0): +class Issue: + def __init__( + self, + severity, + confidence=constants.CONFIDENCE_DEFAULT, + text="", + ident=None, + lineno=None, + test_id="", + col_offset=0, + ): self.severity = severity self.confidence = confidence if isinstance(text, bytes): - text = text.decode('utf-8') + text = text.decode("utf-8") self.text = text self.ident = ident self.fname = "" @@ -29,18 +32,33 @@ def __init__(self, severity, confidence=constants.CONFIDENCE_DEFAULT, self.linerange = [] def __str__(self): - return ("Issue: '%s' from %s:%s: Severity: %s Confidence: " - "%s at %s:%i") % (self.text, self.test_id, - (self.ident or self.test), self.severity, - self.confidence, self.fname, self.lineno) + return ( + "Issue: '%s' from %s:%s: Severity: %s Confidence: " "%s at %s:%i" + ) % ( + self.text, + self.test_id, + (self.ident or self.test), + self.severity, + self.confidence, + self.fname, + self.lineno, + ) def __eq__(self, other): # if the issue text, severity, confidence, and filename match, it's # the same issue from our perspective - match_types = ['text', 'severity', 'confidence', 'fname', 'test', - 'test_id'] - return all(getattr(self, field) == getattr(other, field) - for field in match_types) + match_types = [ + "text", + "severity", + "confidence", + "fname", + "test", + "test_id", + ] + return all( + getattr(self, field) == getattr(other, field) + for field in match_types + ) def __ne__(self, other): return not self.__eq__(other) @@ -49,7 +67,7 @@ def __hash__(self): return id(self) def filter(self, severity, confidence): - '''Utility to filter on confidence and severity + """Utility to filter on confidence and severity This function determines whether an issue should be included by comparing the severity and confidence rating of the issue to minimum @@ -64,18 +82,19 @@ def filter(self, severity, confidence): :param confidence: Confidence threshold :return: True/False depending on whether issue meets threshold - ''' + """ rank = constants.RANKING - return (rank.index(self.severity) >= rank.index(severity) and - rank.index(self.confidence) >= rank.index(confidence)) + return rank.index(self.severity) >= rank.index( + severity + ) and rank.index(self.confidence) >= rank.index(confidence) def get_code(self, max_lines=3, tabbed=False): - '''Gets lines of code from a file the generated this issue. + """Gets lines of code from a file the generated this issue. :param max_lines: Max lines of context to return :param tabbed: Use tabbing in the output :return: strings of code - ''' + """ lines = [] max_lines = max(max_lines, 1) lmin = max(1, self.lineno - max_lines // 2) @@ -86,29 +105,29 @@ def get_code(self, max_lines=3, tabbed=False): text = linecache.getline(self.fname, line) if isinstance(text, bytes): - text = text.decode('utf-8') + text = text.decode("utf-8") if not len(text): break lines.append(tmplt % (line, text)) - return ''.join(lines) + return "".join(lines) def as_dict(self, with_code=True): - '''Convert the issue to a dict of values for outputting.''' + """Convert the issue to a dict of values for outputting.""" out = { - 'filename': self.fname, - 'test_name': self.test, - 'test_id': self.test_id, - 'issue_severity': self.severity, - 'issue_confidence': self.confidence, - 'issue_text': self.text.encode('utf-8').decode('utf-8'), - 'line_number': self.lineno, - 'line_range': self.linerange, - 'col_offset': self.col_offset - } + "filename": self.fname, + "test_name": self.test, + "test_id": self.test_id, + "issue_severity": self.severity, + "issue_confidence": self.confidence, + "issue_text": self.text.encode("utf-8").decode("utf-8"), + "line_number": self.lineno, + "line_range": self.linerange, + "col_offset": self.col_offset, + } if with_code: - out['code'] = self.get_code() + out["code"] = self.get_code() return out def from_dict(self, data, with_code=True): diff --git a/bandit/core/manager.py b/bandit/core/manager.py index 466670c8a..0febb8b53 100644 --- a/bandit/core/manager.py +++ b/bandit/core/manager.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import collections import fnmatch import json @@ -25,13 +23,21 @@ LOG = logging.getLogger(__name__) -class BanditManager(object): +class BanditManager: scope = [] - def __init__(self, config, agg_type, debug=False, verbose=False, - quiet=False, profile=None, ignore_nosec=False): - '''Get logger, config, AST handler, and result store ready + def __init__( + self, + config, + agg_type, + debug=False, + verbose=False, + quiet=False, + profile=None, + ignore_nosec=False, + ): + """Get logger, config, AST handler, and result store ready :param config: config options object :type config: bandit.core.BanditConfig @@ -42,7 +48,7 @@ def __init__(self, config, agg_type, debug=False, verbose=False, :param profile_name: Optional name of profile to use (from cmd line) :param ignore_nosec: Whether to ignore #nosec or not :return: - ''' + """ self.debug = debug self.verbose = verbose self.quiet = quiet @@ -69,23 +75,23 @@ def get_skipped(self): # "skip" is a tuple of name and reason, decode just the name for skip in self.skipped: if isinstance(skip[0], bytes): - ret.append((skip[0].decode('utf-8'), skip[1])) + ret.append((skip[0].decode("utf-8"), skip[1])) else: ret.append(skip) return ret - def get_issue_list(self, - sev_level=b_constants.LOW, - conf_level=b_constants.LOW): + def get_issue_list( + self, sev_level=b_constants.LOW, conf_level=b_constants.LOW + ): return self.filter_results(sev_level, conf_level) def populate_baseline(self, data): - '''Populate a baseline set of issues from a JSON report + """Populate a baseline set of issues from a JSON report This will populate a list of baseline issues discovered from a previous run of bandit. Later this baseline can be used to filter out the result set, see filter_results. - ''' + """ items = [] try: jdata = json.loads(data) @@ -95,7 +101,7 @@ def populate_baseline(self, data): self.baseline = items def filter_results(self, sev_filter, conf_filter): - '''Returns a list of results filtered by the baseline + """Returns a list of results filtered by the baseline This works by checking the number of results returned from each file we process. If the number of results is different to the number reported @@ -105,10 +111,11 @@ def filter_results(self, sev_filter, conf_filter): :param sev_filter: severity level filter to apply :param conf_filter: confidence level filter to apply - ''' + """ - results = [i for i in self.results if - i.filter(sev_filter, conf_filter)] + results = [ + i for i in self.results if i.filter(sev_filter, conf_filter) + ] if not self.baseline: return results @@ -118,19 +125,27 @@ def filter_results(self, sev_filter, conf_filter): # candidate issues return _find_candidate_matches(unmatched, results) - def results_count(self, sev_filter=b_constants.LOW, - conf_filter=b_constants.LOW): - '''Return the count of results + def results_count( + self, sev_filter=b_constants.LOW, conf_filter=b_constants.LOW + ): + """Return the count of results :param sev_filter: Severity level to filter lower :param conf_filter: Confidence level to filter :return: Number of results in the set - ''' + """ return len(self.get_issue_list(sev_filter, conf_filter)) - def output_results(self, lines, sev_level, conf_level, output_file, - output_format, template=None): - '''Outputs results from the result store + def output_results( + self, + lines, + sev_level, + conf_level, + output_file, + output_format, + template=None, + ): + """Outputs results from the result store :param lines: How many surrounding lines to show per result :param sev_level: Which severity levels to show (LOW, MEDIUM, HIGH) @@ -141,45 +156,57 @@ def output_results(self, lines, sev_level, conf_level, output_file, (default: {abspath}:{line}: {test_id}[bandit]: {severity}: {msg}) :return: - - ''' + """ try: formatters_mgr = extension_loader.MANAGER.formatters_mgr if output_format not in formatters_mgr: - output_format = 'screen' if sys.stdout.isatty() else 'txt' + output_format = "screen" if sys.stdout.isatty() else "txt" formatter = formatters_mgr[output_format] report_func = formatter.plugin - if output_format == 'custom': - report_func(self, fileobj=output_file, sev_level=sev_level, - conf_level=conf_level, template=template) + if output_format == "custom": + report_func( + self, + fileobj=output_file, + sev_level=sev_level, + conf_level=conf_level, + template=template, + ) else: - report_func(self, fileobj=output_file, sev_level=sev_level, - conf_level=conf_level, lines=lines) + report_func( + self, + fileobj=output_file, + sev_level=sev_level, + conf_level=conf_level, + lines=lines, + ) except Exception as e: - raise RuntimeError("Unable to output report using '%s' formatter: " - "%s" % (output_format, str(e))) + raise RuntimeError( + "Unable to output report using '%s' formatter: " + "%s" % (output_format, str(e)) + ) - def discover_files(self, targets, recursive=False, excluded_paths=''): - '''Add tests directly and from a directory to the test set + def discover_files(self, targets, recursive=False, excluded_paths=""): + """Add tests directly and from a directory to the test set :param targets: The command line list of files and directories :param recursive: True/False - whether to add all files from dirs :return: - ''' + """ # We'll mantain a list of files which are added, and ones which have # been explicitly excluded files_list = set() excluded_files = set() - excluded_path_globs = self.b_conf.get_option('exclude_dirs') or [] - included_globs = self.b_conf.get_option('include') or ['*.py'] + excluded_path_globs = self.b_conf.get_option("exclude_dirs") or [] + included_globs = self.b_conf.get_option("include") or ["*.py"] # if there are command line provided exclusions add them to the list if excluded_paths: - for path in excluded_paths.split(','): + for path in excluded_paths.split(","): if os.path.isdir(path): - path = os.path.join(path, '*') + path = os.path.join(path, "*") excluded_path_globs.append(path) @@ -191,21 +218,27 @@ def discover_files(self, targets, recursive=False, excluded_paths=''): new_files, newly_excluded = _get_files_from_dir( fname, included_globs=included_globs, - excluded_path_strings=excluded_path_globs + excluded_path_strings=excluded_path_globs, ) files_list.update(new_files) excluded_files.update(newly_excluded) else: - LOG.warning("Skipping directory (%s), use -r flag to " - "scan contents", fname) + LOG.warning( + "Skipping directory (%s), use -r flag to " + "scan contents", + fname, + ) else: # if the user explicitly mentions a file on command line, # we'll scan it, regardless of whether it's in the included # file types list - if _is_file_included(fname, included_globs, - excluded_path_globs, - enforce_glob=False): + if _is_file_included( + fname, + included_globs, + excluded_path_globs, + enforce_glob=False, + ): files_list.add(fname) else: excluded_files.add(fname) @@ -214,10 +247,10 @@ def discover_files(self, targets, recursive=False, excluded_paths=''): self.excluded_files = sorted(excluded_files) def run_tests(self): - '''Runs through all files in the scope + """Runs through all files in the scope :return: - - ''' + """ self._show_progress("%s [" % len(self.files_list)) # if we have problems with a file, we'll remove it from the files_list @@ -232,13 +265,13 @@ def run_tests(self): if count % self.progress == 0: self._show_progress("%s.. " % count, flush=True) try: - if fname == '-': - sys.stdin = os.fdopen(sys.stdin.fileno(), 'rb', 0) - self._parse_file('', sys.stdin, new_files_list) + if fname == "-": + sys.stdin = os.fdopen(sys.stdin.fileno(), "rb", 0) + self._parse_file("", sys.stdin, new_files_list) else: - with open(fname, 'rb') as fdata: + with open(fname, "rb") as fdata: self._parse_file(fname, fdata, new_files_list) - except IOError as e: + except OSError as e: self.skipped.append((fname, e.strerror)) new_files_list.remove(fname) @@ -251,7 +284,7 @@ def run_tests(self): self.metrics.aggregate() def _show_progress(self, message, flush=False): - '''Show progress on stderr + """Show progress on stderr Write progress message to stderr, if number of files warrants it and log level is high enough. @@ -259,9 +292,11 @@ def _show_progress(self, message, flush=False): :param message: The message to write to stderr :param flush: Whether to flush stderr after writing the message :return: - ''' - if len(self.files_list) > self.progress and \ - LOG.getEffectiveLevel() <= logging.INFO: + """ + if ( + len(self.files_list) > self.progress + and LOG.getEffectiveLevel() <= logging.INFO + ): sys.stderr.write(message) if flush: sys.stderr.flush() @@ -279,52 +314,65 @@ def _parse_file(self, fname, fdata, new_files_list): try: fdata.seek(0) tokens = tokenize.tokenize(fdata.readline) - nosec_lines = set( - lineno for toktype, tokval, (lineno, _), _, _ in tokens - if toktype == tokenize.COMMENT and - '#nosec' in tokval or '# nosec' in tokval) + nosec_lines = { + lineno + for toktype, tokval, (lineno, _), _, _ in tokens + if toktype == tokenize.COMMENT + and "#nosec" in tokval + or "# nosec" in tokval + } except tokenize.TokenError: nosec_lines = set() score = self._execute_ast_visitor(fname, data, nosec_lines) self.scores.append(score) - self.metrics.count_issues([score, ]) + self.metrics.count_issues( + [ + score, + ] + ) except KeyboardInterrupt: sys.exit(2) except SyntaxError: - self.skipped.append((fname, - "syntax error while parsing AST from file")) + self.skipped.append( + (fname, "syntax error while parsing AST from file") + ) new_files_list.remove(fname) except Exception as e: - LOG.error("Exception occurred when executing tests against " - "%s. Run \"bandit --debug %s\" to see the full " - "traceback.", fname, fname) - self.skipped.append((fname, 'exception while scanning file')) + LOG.error( + "Exception occurred when executing tests against " + '%s. Run "bandit --debug %s" to see the full ' + "traceback.", + fname, + fname, + ) + self.skipped.append((fname, "exception while scanning file")) new_files_list.remove(fname) LOG.debug(" Exception string: %s", e) LOG.debug(" Exception traceback: %s", traceback.format_exc()) def _execute_ast_visitor(self, fname, data, nosec_lines): - '''Execute AST parse on each file + """Execute AST parse on each file :param fname: The name of the file being parsed :param data: Original file contents :param lines: The lines of code to process :return: The accumulated test score - ''' + """ score = [] - res = b_node_visitor.BanditNodeVisitor(fname, self.b_ma, - self.b_ts, self.debug, - nosec_lines, self.metrics) + res = b_node_visitor.BanditNodeVisitor( + fname, self.b_ma, self.b_ts, self.debug, nosec_lines, self.metrics + ) score = res.process(data) self.results.extend(res.tester.results) return score -def _get_files_from_dir(files_dir, included_globs=None, - excluded_path_strings=None): +def _get_files_from_dir( + files_dir, included_globs=None, excluded_path_strings=None +): if not included_globs: - included_globs = ['*.py'] + included_globs = ["*.py"] if not excluded_path_strings: excluded_path_strings = [] @@ -342,9 +390,10 @@ def _get_files_from_dir(files_dir, included_globs=None, return files_list, excluded_files -def _is_file_included(path, included_globs, excluded_path_strings, - enforce_glob=True): - '''Determine if a file should be included based on filename +def _is_file_included( + path, included_globs, excluded_path_strings, enforce_glob=True +): + """Determine if a file should be included based on filename This utility function determines if a file should be included based on the file name, a list of parsed extensions, excluded paths, and a flag @@ -356,14 +405,15 @@ def _is_file_included(path, included_globs, excluded_path_strings, should not include files :param enforce_glob: Can set to false to bypass extension check :return: Boolean indicating whether a file should be included - ''' + """ return_value = False # if this is matches a glob of files we look at, and it isn't in an # excluded path if _matches_glob_list(path, included_globs) or not enforce_glob: - if (not _matches_glob_list(path, excluded_path_strings) and - not any(x in path for x in excluded_path_strings)): + if not _matches_glob_list(path, excluded_path_strings) and not any( + x in path for x in excluded_path_strings + ): return_value = True return return_value @@ -405,7 +455,8 @@ def _find_candidate_matches(unmatched_issues, results_list): issue_candidates = collections.OrderedDict() for unmatched in unmatched_issues: - issue_candidates[unmatched] = ([i for i in results_list if - unmatched == i]) + issue_candidates[unmatched] = [ + i for i in results_list if unmatched == i + ] return issue_candidates diff --git a/bandit/core/meta_ast.py b/bandit/core/meta_ast.py index 2b2cf3ecc..bf94540c6 100644 --- a/bandit/core/meta_ast.py +++ b/bandit/core/meta_ast.py @@ -1,10 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - - import collections import logging @@ -12,7 +9,7 @@ LOG = logging.getLogger(__name__) -class BanditMetaAst(object): +class BanditMetaAst: nodes = collections.OrderedDict() @@ -20,25 +17,27 @@ def __init__(self): pass def add_node(self, node, parent_id, depth): - '''Add a node to the AST node collection + """Add a node to the AST node collection :param node: The AST node to add :param parent_id: The ID of the node's parent :param depth: The depth of the node :return: - - ''' + """ node_id = hex(id(node)) - LOG.debug('adding node : %s [%s]', node_id, depth) + LOG.debug("adding node : %s [%s]", node_id, depth) self.nodes[node_id] = { - 'raw': node, 'parent_id': parent_id, 'depth': depth + "raw": node, + "parent_id": parent_id, + "depth": depth, } def __str__(self): - '''Dumps a listing of all of the nodes + """Dumps a listing of all of the nodes Dumps a listing of all of the nodes for debugging purposes :return: - - ''' + """ tmpstr = "" for k, v in self.nodes.items(): tmpstr += "Node: %s\n" % k diff --git a/bandit/core/metrics.py b/bandit/core/metrics.py index ec26e234a..14d50d27b 100644 --- a/bandit/core/metrics.py +++ b/bandit/core/metrics.py @@ -1,15 +1,13 @@ -# -*- coding:utf-8 -*- # # Copyright 2015 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import collections from bandit.core import constants -class Metrics(object): +class Metrics: """Bandit metric gathering. This class is a singleton used to gather and process metrics collected when @@ -21,12 +19,12 @@ class Metrics(object): def __init__(self): self.data = dict() - self.data['_totals'] = {'loc': 0, 'nosec': 0} + self.data["_totals"] = {"loc": 0, "nosec": 0} # initialize 0 totals for criteria and rank; this will be reset later for rank in constants.RANKING: for criteria in constants.CRITERIA: - self.data['_totals']['{0}.{1}'.format(criteria[0], rank)] = 0 + self.data["_totals"][f"{criteria[0]}.{rank}"] = 0 def begin(self, fname): """Begin a new metric block. @@ -35,7 +33,7 @@ def begin(self, fname): :param fname: the metrics unique name, normally the file name. """ - self.data[fname] = {'loc': 0, 'nosec': 0} + self.data[fname] = {"loc": 0, "nosec": 0} self.current = self.data[fname] def note_nosec(self, num=1): @@ -45,7 +43,7 @@ def note_nosec(self, num=1): :param num: number of nosecs seen, defaults to 1 """ - self.current['nosec'] += num + self.current["nosec"] += num def count_locs(self, lines): """Count lines of code. @@ -55,11 +53,12 @@ def count_locs(self, lines): :param lines: lines in the file to process """ + def proc(line): tmp = line.strip() - return bool(tmp and not tmp.startswith(b'#')) + return bool(tmp and not tmp.startswith(b"#")) - self.current['loc'] += sum(proc(line) for line in lines) + self.current["loc"] += sum(proc(line) for line in lines) def count_issues(self, scores): self.current.update(self._get_issue_counts(scores)) @@ -69,7 +68,7 @@ def aggregate(self): c = collections.Counter() for fname in self.data: c.update(self.data[fname]) - self.data['_totals'] = dict(c) + self.data["_totals"] = dict(c) @staticmethod def _get_issue_counts(scores): @@ -82,12 +81,11 @@ def _get_issue_counts(scores): for score in scores: for (criteria, _) in constants.CRITERIA: for i, rank in enumerate(constants.RANKING): - label = '{0}.{1}'.format(criteria, rank) + label = f"{criteria}.{rank}" if label not in issue_counts: issue_counts[label] = 0 count = ( - score[criteria][i] / - constants.RANKING_VALUES[rank] + score[criteria][i] / constants.RANKING_VALUES[rank] ) issue_counts[label] += count return issue_counts diff --git a/bandit/core/node_visitor.py b/bandit/core/node_visitor.py index eba93e0d7..6acf587e4 100644 --- a/bandit/core/node_visitor.py +++ b/bandit/core/node_visitor.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import ast import logging import operator @@ -16,15 +14,14 @@ LOG = logging.getLogger(__name__) -class BanditNodeVisitor(object): - def __init__(self, fname, metaast, testset, - debug, nosec_lines, metrics): +class BanditNodeVisitor: + def __init__(self, fname, metaast, testset, debug, nosec_lines, metrics): self.debug = debug self.nosec_lines = nosec_lines self.seen = 0 self.scores = { - 'SEVERITY': [0] * len(constants.RANKING), - 'CONFIDENCE': [0] * len(constants.RANKING) + "SEVERITY": [0] * len(constants.RANKING), + "CONFIDENCE": [0] * len(constants.RANKING), } self.depth = 0 self.fname = fname @@ -33,91 +30,93 @@ def __init__(self, fname, metaast, testset, self.imports = set() self.import_aliases = {} self.tester = b_tester.BanditTester( - self.testset, self.debug, nosec_lines) + self.testset, self.debug, nosec_lines + ) # in some cases we can't determine a qualified name try: self.namespace = b_utils.get_module_qualname_from_path(fname) except b_utils.InvalidModulePath: - LOG.info('Unable to find qualified name for module: %s', - self.fname) + LOG.info( + "Unable to find qualified name for module: %s", self.fname + ) self.namespace = "" - LOG.debug('Module qualified name: %s', self.namespace) + LOG.debug("Module qualified name: %s", self.namespace) self.metrics = metrics def visit_ClassDef(self, node): - '''Visitor for AST ClassDef node + """Visitor for AST ClassDef node Add class name to current namespace for all descendants. :param node: Node being inspected :return: - - ''' + """ # For all child nodes, add this class name to current namespace self.namespace = b_utils.namespace_path_join(self.namespace, node.name) def visit_FunctionDef(self, node): - '''Visitor for AST FunctionDef nodes + """Visitor for AST FunctionDef nodes add relevant information about the node to the context for use in tests which inspect function definitions. Add the function name to the current namespace for all descendants. :param node: The node that is being inspected :return: - - ''' + """ - self.context['function'] = node - qualname = self.namespace + '.' + b_utils.get_func_name(node) - name = qualname.split('.')[-1] + self.context["function"] = node + qualname = self.namespace + "." + b_utils.get_func_name(node) + name = qualname.split(".")[-1] - self.context['qualname'] = qualname - self.context['name'] = name + self.context["qualname"] = qualname + self.context["name"] = name # For all child nodes and any tests run, add this function name to # current namespace self.namespace = b_utils.namespace_path_join(self.namespace, name) - self.update_scores(self.tester.run_tests(self.context, 'FunctionDef')) + self.update_scores(self.tester.run_tests(self.context, "FunctionDef")) def visit_Call(self, node): - '''Visitor for AST Call nodes + """Visitor for AST Call nodes add relevant information about the node to the context for use in tests which inspect function calls. :param node: The node that is being inspected :return: - - ''' + """ - self.context['call'] = node + self.context["call"] = node qualname = b_utils.get_call_name(node, self.import_aliases) - name = qualname.split('.')[-1] + name = qualname.split(".")[-1] - self.context['qualname'] = qualname - self.context['name'] = name + self.context["qualname"] = qualname + self.context["name"] = name - self.update_scores(self.tester.run_tests(self.context, 'Call')) + self.update_scores(self.tester.run_tests(self.context, "Call")) def visit_Import(self, node): - '''Visitor for AST Import nodes + """Visitor for AST Import nodes add relevant information about node to the context for use in tests which inspect imports. :param node: The node that is being inspected :return: - - ''' + """ for nodename in node.names: if nodename.asname: self.import_aliases[nodename.asname] = nodename.name self.imports.add(nodename.name) - self.context['module'] = nodename.name - self.update_scores(self.tester.run_tests(self.context, 'Import')) + self.context["module"] = nodename.name + self.update_scores(self.tester.run_tests(self.context, "Import")) def visit_ImportFrom(self, node): - '''Visitor for AST ImportFrom nodes + """Visitor for AST ImportFrom nodes add relevant information about node to the context for use in tests which inspect imports. :param node: The node that is being inspected :return: - - ''' + """ module = node.module if module is None: return self.visit_Import(node) @@ -135,15 +134,16 @@ def visit_ImportFrom(self, node): # Even if import is not aliased we need an entry that maps # name to module.name. For example, with 'from a import b' # b should be aliased to the qualified name a.b - self.import_aliases[nodename.name] = (module + '.' + - nodename.name) + self.import_aliases[nodename.name] = ( + module + "." + nodename.name + ) self.imports.add(module + "." + nodename.name) - self.context['module'] = module - self.context['name'] = nodename.name - self.update_scores(self.tester.run_tests(self.context, 'ImportFrom')) + self.context["module"] = module + self.context["name"] = nodename.name + self.update_scores(self.tester.run_tests(self.context, "ImportFrom")) def visit_Constant(self, node): - '''Visitor for AST Constant nodes + """Visitor for AST Constant nodes call the appropriate method for the node type. this maintains compatibility with <3.6 and 3.8+ @@ -153,75 +153,76 @@ def visit_Constant(self, node): :param node: The node that is being inspected :return: - - ''' + """ if isinstance(node.value, str): self.visit_Str(node) elif isinstance(node.value, bytes): self.visit_Bytes(node) def visit_Str(self, node): - '''Visitor for AST String nodes + """Visitor for AST String nodes add relevant information about node to the context for use in tests which inspect strings. :param node: The node that is being inspected :return: - - ''' - self.context['str'] = node.s + """ + self.context["str"] = node.s if not isinstance(node._bandit_parent, ast.Expr): # docstring - self.context['linerange'] = b_utils.linerange_fix( + self.context["linerange"] = b_utils.linerange_fix( node._bandit_parent ) - self.update_scores(self.tester.run_tests(self.context, 'Str')) + self.update_scores(self.tester.run_tests(self.context, "Str")) def visit_Bytes(self, node): - '''Visitor for AST Bytes nodes + """Visitor for AST Bytes nodes add relevant information about node to the context for use in tests which inspect strings. :param node: The node that is being inspected :return: - - ''' - self.context['bytes'] = node.s + """ + self.context["bytes"] = node.s if not isinstance(node._bandit_parent, ast.Expr): # docstring - self.context['linerange'] = b_utils.linerange_fix( + self.context["linerange"] = b_utils.linerange_fix( node._bandit_parent ) - self.update_scores(self.tester.run_tests(self.context, 'Bytes')) + self.update_scores(self.tester.run_tests(self.context, "Bytes")) def pre_visit(self, node): self.context = {} - self.context['imports'] = self.imports - self.context['import_aliases'] = self.import_aliases + self.context["imports"] = self.imports + self.context["import_aliases"] = self.import_aliases if self.debug: LOG.debug(ast.dump(node)) - self.metaast.add_node(node, '', self.depth) + self.metaast.add_node(node, "", self.depth) - if hasattr(node, 'lineno'): - self.context['lineno'] = node.lineno + if hasattr(node, "lineno"): + self.context["lineno"] = node.lineno if node.lineno in self.nosec_lines: LOG.debug("skipped, nosec") self.metrics.note_nosec() return False - if hasattr(node, 'col_offset'): - self.context['col_offset'] = node.col_offset + if hasattr(node, "col_offset"): + self.context["col_offset"] = node.col_offset - self.context['node'] = node - self.context['linerange'] = b_utils.linerange_fix(node) - self.context['filename'] = self.fname + self.context["node"] = node + self.context["linerange"] = b_utils.linerange_fix(node) + self.context["filename"] = self.fname self.seen += 1 - LOG.debug("entering: %s %s [%s]", hex(id(node)), type(node), - self.depth) + LOG.debug( + "entering: %s %s [%s]", hex(id(node)), type(node), self.depth + ) self.depth += 1 LOG.debug(self.context) return True def visit(self, node): name = node.__class__.__name__ - method = 'visit_' + name + method = "visit_" + name visitor = getattr(self, method, None) if visitor is not None: if self.debug: @@ -266,26 +267,26 @@ def generic_visit(self, node): self.post_visit(value) def update_scores(self, scores): - '''Score updater + """Score updater Since we moved from a single score value to a map of scores per severity, this is needed to update the stored list. :param score: The score list to update our scores with - ''' + """ # we'll end up with something like: # SEVERITY: {0, 0, 0, 10} where 10 is weighted by finding and level for score_type in self.scores: - self.scores[score_type] = list(map( - operator.add, self.scores[score_type], scores[score_type] - )) + self.scores[score_type] = list( + map(operator.add, self.scores[score_type], scores[score_type]) + ) def process(self, data): - '''Main process loop + """Main process loop Build and process the AST :param lines: lines code to process :return score: the aggregated score for the current file - ''' + """ f_ast = ast.parse(data) self.generic_visit(f_ast) return self.scores diff --git a/bandit/core/test_properties.py b/bandit/core/test_properties.py index be328a38f..cf969952f 100644 --- a/bandit/core/test_properties.py +++ b/bandit/core/test_properties.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import logging from bandit.core import utils @@ -12,25 +10,27 @@ def checks(*args): - '''Decorator function to set checks to be run.''' + """Decorator function to set checks to be run.""" + def wrapper(func): if not hasattr(func, "_checks"): func._checks = [] func._checks.extend(utils.check_ast_node(a) for a in args) - LOG.debug('checks() decorator executed') - LOG.debug(' func._checks: %s', func._checks) + LOG.debug("checks() decorator executed") + LOG.debug(" func._checks: %s", func._checks) return func + return wrapper def takes_config(*args): - '''Test function takes config + """Test function takes config Use of this delegate before a test function indicates that it should be passed data from the config file. Passing a name parameter allows aliasing tests and thus sharing config options. - ''' + """ name = "" def _takes_config(func): @@ -47,14 +47,16 @@ def _takes_config(func): def test_id(id_val): - '''Test function identifier + """Test function identifier Use this decorator before a test function indicates its simple ID - ''' + """ + def _has_id(func): if not hasattr(func, "_test_id"): func._test_id = id_val return func + return _has_id @@ -65,11 +67,13 @@ def accepts_baseline(*args): with baseline results. Specifically this means it has a way to display candidate results and know when it should do so. """ + def wrapper(func): - if not hasattr(func, '_accepts_baseline'): + if not hasattr(func, "_accepts_baseline"): func._accepts_baseline = True - LOG.debug('accepts_baseline() decorator executed on %s', func.__name__) + LOG.debug("accepts_baseline() decorator executed on %s", func.__name__) return func + return wrapper(args[0]) diff --git a/bandit/core/test_set.py b/bandit/core/test_set.py index 2a472a0a8..8fa2ef236 100644 --- a/bandit/core/test_set.py +++ b/bandit/core/test_set.py @@ -1,14 +1,10 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - - import importlib import logging - from bandit.core import blacklisting from bandit.core import extension_loader @@ -16,14 +12,15 @@ LOG = logging.getLogger(__name__) -class BanditTestSet(object): +class BanditTestSet: def __init__(self, config, profile=None): if not profile: profile = {} extman = extension_loader.MANAGER filtering = self._get_filter(config, profile) - self.plugins = [p for p in extman.plugins - if p.plugin._test_id in filtering] + self.plugins = [ + p for p in extman.plugins if p.plugin._test_id in filtering + ] self.plugins.extend(self._load_builtins(filtering, profile)) self._load_tests(config, self.plugins) @@ -31,26 +28,26 @@ def __init__(self, config, profile=None): def _get_filter(config, profile): extman = extension_loader.MANAGER - inc = set(profile.get('include', [])) - exc = set(profile.get('exclude', [])) + inc = set(profile.get("include", [])) + exc = set(profile.get("exclude", [])) all_blacklist_tests = set() for _, tests in extman.blacklist.items(): - all_blacklist_tests.update(t['id'] for t in tests) + all_blacklist_tests.update(t["id"] for t in tests) # this block is purely for backwards compatibility, the rules are as # follows: # B001,B401 means B401 # B401 means B401 # B001 means all blacklist tests - if 'B001' in inc: + if "B001" in inc: if not inc.intersection(all_blacklist_tests): inc.update(all_blacklist_tests) - inc.discard('B001') - if 'B001' in exc: + inc.discard("B001") + if "B001" in exc: if not exc.intersection(all_blacklist_tests): exc.update(all_blacklist_tests) - exc.discard('B001') + exc.discard("B001") if inc: filtered = inc @@ -61,19 +58,19 @@ def _get_filter(config, profile): return filtered - exc def _load_builtins(self, filtering, profile): - '''loads up builtin functions, so they can be filtered.''' + """loads up builtin functions, so they can be filtered.""" - class Wrapper(object): + class Wrapper: def __init__(self, name, plugin): self.name = name self.plugin = plugin extman = extension_loader.MANAGER - blacklist = profile.get('blacklist') + blacklist = profile.get("blacklist") if not blacklist: # not overridden by legacy data blacklist = {} for node, tests in extman.blacklist.items(): - values = [t for t in tests if t['id'] in filtering] + values = [t for t in tests if t["id"] in filtering] if values: blacklist[node] = values @@ -87,13 +84,13 @@ def __init__(self, name, plugin): blacklisting.blacklist._checks = blacklist.keys() blacklisting.blacklist._config = blacklist - return [Wrapper('blacklist', blacklisting.blacklist)] + return [Wrapper("blacklist", blacklisting.blacklist)] def _load_tests(self, config, plugins): - '''Builds a dict mapping tests to node types.''' + """Builds a dict mapping tests to node types.""" self.tests = {} for plugin in plugins: - if hasattr(plugin.plugin, '_takes_config'): + if hasattr(plugin.plugin, "_takes_config"): # TODO(??): config could come from profile ... cfg = config.get_option(plugin.plugin._takes_config) if cfg is None: @@ -102,13 +99,17 @@ def _load_tests(self, config, plugins): plugin.plugin._config = cfg for check in plugin.plugin._checks: self.tests.setdefault(check, []).append(plugin.plugin) - LOG.debug('added function %s (%s) targeting %s', - plugin.name, plugin.plugin._test_id, check) + LOG.debug( + "added function %s (%s) targeting %s", + plugin.name, + plugin.plugin._test_id, + check, + ) def get_tests(self, checktype): - '''Returns all tests that are of type checktype + """Returns all tests that are of type checktype :param checktype: The type of test to filter on :return: A list of tests which are of the specified type - ''' + """ return self.tests.get(checktype) or [] diff --git a/bandit/core/tester.py b/bandit/core/tester.py index fc46655a1..322320919 100644 --- a/bandit/core/tester.py +++ b/bandit/core/tester.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import copy import logging import warnings @@ -16,7 +14,7 @@ LOG = logging.getLogger(__name__) -class BanditTester(object): +class BanditTester: def __init__(self, testset, debug, nosec_lines): self.results = [] self.testset = testset @@ -25,7 +23,7 @@ def __init__(self, testset, debug, nosec_lines): self.nosec_lines = nosec_lines def run_tests(self, raw_context, checktype): - '''Runs all tests for a certain type of check, for example + """Runs all tests for a certain type of check, for example Runs all tests for a certain type of check, for example 'functions' store results in results. @@ -34,11 +32,11 @@ def run_tests(self, raw_context, checktype): :param checktype: The type of checks to run :param nosec_lines: Lines which should be skipped because of nosec :return: a score based on the number and type of test results - ''' + """ scores = { - 'SEVERITY': [0] * len(constants.RANKING), - 'CONFIDENCE': [0] * len(constants.RANKING) + "SEVERITY": [0] * len(constants.RANKING), + "CONFIDENCE": [0] * len(constants.RANKING), } tests = self.testset.get_tests(checktype) @@ -48,25 +46,27 @@ def run_tests(self, raw_context, checktype): temp_context = copy.copy(raw_context) context = b_context.Context(temp_context) try: - if hasattr(test, '_config'): + if hasattr(test, "_config"): result = test(context, test._config) else: result = test(context) # if we have a result, record it and update scores - if (result is not None and - result.lineno not in self.nosec_lines and - temp_context['lineno'] not in self.nosec_lines): - - if isinstance(temp_context['filename'], bytes): - result.fname = temp_context['filename'].decode('utf-8') + if ( + result is not None + and result.lineno not in self.nosec_lines + and temp_context["lineno"] not in self.nosec_lines + ): + + if isinstance(temp_context["filename"], bytes): + result.fname = temp_context["filename"].decode("utf-8") else: - result.fname = temp_context['filename'] + result.fname = temp_context["filename"] if result.lineno is None: - result.lineno = temp_context['lineno'] - result.linerange = temp_context['linerange'] - result.col_offset = temp_context['col_offset'] + result.lineno = temp_context["lineno"] + result.linerange = temp_context["linerange"] + result.col_offset = temp_context["col_offset"] result.test = name if result.test_id == "": result.test_id = test._test_id @@ -76,10 +76,10 @@ def run_tests(self, raw_context, checktype): LOG.debug("Issue identified by %s: %s", name, result) sev = constants.RANKING.index(result.severity) val = constants.RANKING_VALUES[result.severity] - scores['SEVERITY'][sev] += val + scores["SEVERITY"][sev] += val con = constants.RANKING.index(result.confidence) val = constants.RANKING_VALUES[result.confidence] - scores['CONFIDENCE'][con] += val + scores["CONFIDENCE"][con] += val except Exception as e: self.report_error(name, context, e) @@ -93,10 +93,11 @@ def report_error(test, context, error): what = "Bandit internal error running: " what += "%s " % test what += "on file %s at line %i: " % ( - context._context['filename'], - context._context['lineno'] + context._context["filename"], + context._context["lineno"], ) what += str(error) import traceback + what += traceback.format_exc() LOG.error(what) diff --git a/bandit/core/utils.py b/bandit/core/utils.py index 7a0a3f275..c05011e25 100644 --- a/bandit/core/utils.py +++ b/bandit/core/utils.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import ast import logging import os.path @@ -21,7 +19,7 @@ def _get_attr_qual_name(node, aliases): - '''Get a the full name for the attribute node. + """Get a the full name for the attribute node. This will resolve a pseudo-qualified name for the attribute rooted at node as long as all the deeper nodes are Names or @@ -34,13 +32,13 @@ def _get_attr_qual_name(node, aliases): :param node: AST Name or Attribute node :param aliases: Import aliases dictionary :returns: Qualified name referred to by the attribute or name. - ''' + """ if isinstance(node, ast.Name): if node.id in aliases: return aliases[node.id] return node.id elif isinstance(node, ast.Attribute): - name = '%s.%s' % (_get_attr_qual_name(node.value, aliases), node.attr) + name = f"{_get_attr_qual_name(node.value, aliases)}.{node.attr}" if name in aliases: return aliases[name] return name @@ -50,9 +48,9 @@ def _get_attr_qual_name(node, aliases): def get_call_name(node, aliases): if isinstance(node.func, ast.Name): - if deepgetattr(node, 'func.id') in aliases: - return aliases[deepgetattr(node, 'func.id')] - return deepgetattr(node, 'func.id') + if deepgetattr(node, "func.id") in aliases: + return aliases[deepgetattr(node, "func.id")] + return deepgetattr(node, "func.id") elif isinstance(node.func, ast.Attribute): return _get_attr_qual_name(node.func, aliases) else: @@ -67,24 +65,24 @@ def get_qual_attr(node, aliases): prefix = "" if isinstance(node, ast.Attribute): try: - val = deepgetattr(node, 'value.id') + val = deepgetattr(node, "value.id") if val in aliases: prefix = aliases[val] else: - prefix = deepgetattr(node, 'value.id') + prefix = deepgetattr(node, "value.id") except Exception: # NOTE(tkelsey): degrade gracefully when we can't get the fully # qualified name for an attr, just return its base name. pass - return "%s.%s" % (prefix, node.attr) + return f"{prefix}.{node.attr}" else: return "" # TODO(tkelsey): process other node types def deepgetattr(obj, attr): """Recurses through an attribute chain to get the ultimate value.""" - for key in attr.split('.'): + for key in attr.split("."): obj = getattr(obj, key) return obj @@ -95,30 +93,35 @@ class InvalidModulePath(Exception): class ConfigError(Exception): """Raised when the config file fails validation.""" + def __init__(self, message, config_file): self.config_file = config_file - self.message = "{0} : {1}".format(config_file, message) - super(ConfigError, self).__init__(self.message) + self.message = f"{config_file} : {message}" + super().__init__(self.message) class ProfileNotFound(Exception): """Raised when chosen profile cannot be found.""" + def __init__(self, config_file, profile): self.config_file = config_file self.profile = profile - message = 'Unable to find profile (%s) in config file: %s' % ( - self.profile, self.config_file) - super(ProfileNotFound, self).__init__(message) + message = "Unable to find profile ({}) in config file: {}".format( + self.profile, + self.config_file, + ) + super().__init__(message) -def warnings_formatter(message, category=UserWarning, filename='', lineno=-1, - line=''): - '''Monkey patch for warnings.warn to suppress cruft output.''' - return "{0}\n".format(message) +def warnings_formatter( + message, category=UserWarning, filename="", lineno=-1, line="" +): + """Monkey patch for warnings.warn to suppress cruft output.""" + return f"{message}\n" def get_module_qualname_from_path(path): - '''Get the module's qualified name by analysis of the path. + """Get the module's qualified name by analysis of the path. Resolve the absolute pathname and eliminate symlinks. This could result in an incorrect name if symlinks are used to restructure the python lib @@ -132,27 +135,29 @@ def get_module_qualname_from_path(path): :param: Path to module file. Relative paths will be resolved relative to current working directory. :return: fully qualified module name - ''' + """ (head, tail) = os.path.split(path) - if head == '' or tail == '': - raise InvalidModulePath('Invalid python file path: "%s"' - ' Missing path or file name' % (path)) + if head == "" or tail == "": + raise InvalidModulePath( + 'Invalid python file path: "%s"' + " Missing path or file name" % (path) + ) qname = [os.path.splitext(tail)[0]] - while head not in ['/', '.', '']: - if os.path.isfile(os.path.join(head, '__init__.py')): + while head not in ["/", ".", ""]: + if os.path.isfile(os.path.join(head, "__init__.py")): (head, tail) = os.path.split(head) qname.insert(0, tail) else: break - qualname = '.'.join(qname) + qualname = ".".join(qname) return qualname def namespace_path_join(base, name): - '''Extend the current namespace path with an additional name + """Extend the current namespace path with an additional name Take a namespace path (i.e., package.module.class) and extends it with an additional name (i.e., package.module.class.subclass). @@ -162,12 +167,12 @@ def namespace_path_join(base, name): :param name: (String) The new name to append to the base path. :returns: (String) A new namespace path resulting from combination of base and name. - ''' - return '%s.%s' % (base, name) + """ + return f"{base}.{name}" def namespace_path_split(path): - '''Split the namespace path into a pair (head, tail). + """Split the namespace path into a pair (head, tail). Tail will be the last namespace path component and head will be everything leading up to that in the path. This is similar to @@ -176,12 +181,12 @@ def namespace_path_split(path): :param path: (String) A namespace path. :returns: (String, String) A tuple where the first component is the base path and the second is the last path component. - ''' - return tuple(path.rsplit('.', 1)) + """ + return tuple(path.rsplit(".", 1)) def escaped_bytes_representation(b): - '''PY3 bytes need escaping for comparison with other strings. + """PY3 bytes need escaping for comparison with other strings. In practice it turns control characters into acceptable codepoints then encodes them into bytes again to turn unprintable bytes into printable @@ -189,14 +194,13 @@ def escaped_bytes_representation(b): This is safe to do for the whole range 0..255 and result matches unicode_escape on a unicode string. - ''' - return b.decode('unicode_escape').encode('unicode_escape') + """ + return b.decode("unicode_escape").encode("unicode_escape") def linerange(node): """Get line number range from a node.""" - strip = {"body": None, "orelse": None, - "handlers": None, "finalbody": None} + strip = {"body": None, "orelse": None, "handlers": None, "finalbody": None} for key in strip.keys(): if hasattr(node, key): strip[key] = getattr(node, key) @@ -205,7 +209,7 @@ def linerange(node): lines_min = 9999999999 lines_max = -1 for n in ast.walk(node): - if hasattr(n, 'lineno'): + if hasattr(n, "lineno"): lines_min = min(lines_min, n.lineno) lines_max = max(lines_max, n.lineno) @@ -222,8 +226,8 @@ def linerange_fix(node): """Try and work around a known Python bug with multi-line strings.""" # deal with multiline strings lineno behavior (Python issue #16806) lines = linerange(node) - if hasattr(node, '_bandit_sibling') and hasattr( - node._bandit_sibling, 'lineno' + if hasattr(node, "_bandit_sibling") and hasattr( + node._bandit_sibling, "lineno" ): start = min(lines) delta = node._bandit_sibling.lineno - start @@ -233,7 +237,7 @@ def linerange_fix(node): def concat_string(node, stop=None): - '''Builds a string from a ast.BinOp chain. + """Builds a string from a ast.BinOp chain. This will build a string from a series of ast.Str nodes wrapped in ast.BinOp nodes. Something like "a" + "b" + "c" or "a %s" % val etc. @@ -242,17 +246,20 @@ def concat_string(node, stop=None): :param node: (ast.Str or ast.BinOp) The node to process :param stop: (ast.Str or ast.BinOp) Optional base node to stop at :returns: (Tuple) the root node of the expression, the string value - ''' + """ + def _get(node, bits, stop=None): if node != stop: bits.append( _get(node.left, bits, stop) if isinstance(node.left, ast.BinOp) - else node.left) + else node.left + ) bits.append( _get(node.right, bits, stop) if isinstance(node.right, ast.BinOp) - else node.right) + else node.right + ) bits = [node] while isinstance(node._bandit_parent, ast.BinOp): @@ -263,7 +270,7 @@ def _get(node, bits, stop=None): def get_called_name(node): - '''Get a function name from an ast.Call node. + """Get a function name from an ast.Call node. An ast.Call node representing a method call with present differently to one wrapping a function call: thing.call() vs call(). This helper will grab the @@ -271,7 +278,7 @@ def get_called_name(node): :param node: (ast.Call) the call node :returns: (String) the function name - ''' + """ func = node.func try: return func.attr if isinstance(func, ast.Attribute) else func.id @@ -280,11 +287,11 @@ def get_called_name(node): def get_path_for_function(f): - '''Get the path of the file where the function is defined. + """Get the path of the file where the function is defined. :returns: the path, or None if one could not be found or f is not a real function - ''' + """ if hasattr(f, "__module__"): module_name = f.__module__ @@ -306,17 +313,19 @@ def parse_ini_file(f_loc): config = configparser.ConfigParser() try: config.read(f_loc) - return {k: v for k, v in config.items('bandit')} + return {k: v for k, v in config.items("bandit")} except (configparser.Error, KeyError, TypeError): - LOG.warning("Unable to parse config file %s or missing [bandit] " - "section", f_loc) + LOG.warning( + "Unable to parse config file %s or missing [bandit] " "section", + f_loc, + ) return None def check_ast_node(name): - 'Check if the given name is that of a valid AST node.' + "Check if the given name is that of a valid AST node." try: node = getattr(ast, name) if issubclass(node, ast.AST): diff --git a/bandit/formatters/csv.py b/bandit/formatters/csv.py index 14e18c597..f0dbc08bf 100644 --- a/bandit/formatters/csv.py +++ b/bandit/formatters/csv.py @@ -1,7 +1,5 @@ -# -*- coding:utf-8 -*- # # SPDX-License-Identifier: Apache-2.0 - r""" ============= CSV Formatter @@ -27,8 +25,6 @@ """ # Necessary for this formatter to work when imported on Python 2. Importing # the standard library's csv module conflicts with the name of this module. -from __future__ import absolute_import - import csv import logging import sys @@ -39,36 +35,40 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1): - '''Prints issues in CSV format + """Prints issues in CSV format :param manager: the bandit manager object :param fileobj: The output file object, which may be sys.stdout :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all - ''' + """ - results = manager.get_issue_list(sev_level=sev_level, - conf_level=conf_level) + results = manager.get_issue_list( + sev_level=sev_level, conf_level=conf_level + ) with fileobj: - fieldnames = ['filename', - 'test_name', - 'test_id', - 'issue_severity', - 'issue_confidence', - 'issue_text', - 'line_number', - 'col_offset', - 'line_range', - 'more_info'] - - writer = csv.DictWriter(fileobj, fieldnames=fieldnames, - extrasaction='ignore') + fieldnames = [ + "filename", + "test_name", + "test_id", + "issue_severity", + "issue_confidence", + "issue_text", + "line_number", + "col_offset", + "line_range", + "more_info", + ] + + writer = csv.DictWriter( + fileobj, fieldnames=fieldnames, extrasaction="ignore" + ) writer.writeheader() for result in results: r = result.as_dict(with_code=False) - r['more_info'] = docs_utils.get_url(r['test_id']) + r["more_info"] = docs_utils.get_url(r["test_id"]) writer.writerow(r) if fileobj.name != sys.stdout.name: diff --git a/bandit/formatters/custom.py b/bandit/formatters/custom.py index 17740c5c1..fb9e8c34a 100644 --- a/bandit/formatters/custom.py +++ b/bandit/formatters/custom.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright (c) 2017 Hewlett Packard Enterprise # # SPDX-License-Identifier: Apache-2.0 - """ ================ Custom Formatter @@ -24,7 +22,6 @@ .. versionadded:: 1.5.0 """ - import logging import os import re @@ -39,6 +36,7 @@ class SafeMapper(dict): """Safe mapper to handle format key errors""" + @classmethod # To prevent PEP8 warnings in the test suite def __missing__(cls, key): return "{%s}" % key @@ -57,13 +55,13 @@ def report(manager, fileobj, sev_level, conf_level, template=None): {test_id}[bandit]: {severity}: {msg}') """ - machine_output = {'results': [], 'errors': []} + machine_output = {"results": [], "errors": []} for (fname, reason) in manager.get_skipped(): - machine_output['errors'].append({'filename': fname, - 'reason': reason}) + machine_output["errors"].append({"filename": fname, "reason": reason}) - results = manager.get_issue_list(sev_level=sev_level, - conf_level=conf_level) + results = manager.get_issue_list( + sev_level=sev_level, conf_level=conf_level + ) msg_template = template if template is None: @@ -71,21 +69,19 @@ def report(manager, fileobj, sev_level, conf_level, template=None): # Dictionary of non-terminal tags that will be expanded tag_mapper = { - 'abspath': lambda issue: os.path.abspath(issue.fname), - 'relpath': lambda issue: os.path.relpath(issue.fname), - 'line': lambda issue: issue.lineno, - 'col': lambda issue: issue.col_offset, - 'test_id': lambda issue: issue.test_id, - 'severity': lambda issue: issue.severity, - 'msg': lambda issue: issue.text, - 'confidence': lambda issue: issue.confidence, - 'range': lambda issue: issue.linerange + "abspath": lambda issue: os.path.abspath(issue.fname), + "relpath": lambda issue: os.path.relpath(issue.fname), + "line": lambda issue: issue.lineno, + "col": lambda issue: issue.col_offset, + "test_id": lambda issue: issue.test_id, + "severity": lambda issue: issue.severity, + "msg": lambda issue: issue.text, + "confidence": lambda issue: issue.confidence, + "range": lambda issue: issue.linerange, } # Create dictionary with tag sets to speed up search for similar tags - tag_sim_dict = dict( - [(tag, set(tag)) for tag, _ in tag_mapper.items()] - ) + tag_sim_dict = {tag: set(tag) for tag, _ in tag_mapper.items()} # Parse the format_string template and check the validity of tags try: @@ -104,8 +100,9 @@ def report(manager, fileobj, sev_level, conf_level, template=None): sys.exit(2) def get_similar_tag(tag): - similarity_list = [(len(set(tag) & t_set), t) - for t, t_set in tag_sim_dict.items()] + similarity_list = [ + (len(set(tag) & t_set), t) for t, t_set in tag_sim_dict.items() + ] return sorted(similarity_list)[-1][1] tag_blacklist = [] @@ -115,7 +112,9 @@ def get_similar_tag(tag): similar_tag = get_similar_tag(tag) LOG.warning( "Tag '%s' was not recognized and will be skipped, " - "did you mean to use '%s'?", tag, similar_tag + "did you mean to use '%s'?", + tag, + similar_tag, ) tag_blacklist += [tag] @@ -124,8 +123,8 @@ def get_similar_tag(tag): for literal_text, field_name, fmt_spec, conversion in parsed_template_orig: if literal_text: # if there is '{' or '}', double it to prevent expansion - literal_text = re.sub('{', '{{', literal_text) - literal_text = re.sub('}', '}}', literal_text) + literal_text = re.sub("{", "{{", literal_text) + literal_text = re.sub("}", "}}", literal_text) msg_parsed_template_list.append(literal_text) if field_name is not None: @@ -134,16 +133,20 @@ def get_similar_tag(tag): continue # Append the fmt_spec part params = [field_name, fmt_spec, conversion] - markers = ['', ':', '!'] + markers = ["", ":", "!"] msg_parsed_template_list.append( - ['{'] + - ["%s" % (m + p) if p else '' - for m, p in zip(markers, params)] + - ['}'] + ["{"] + + [ + "%s" % (m + p) if p else "" + for m, p in zip(markers, params) + ] + + ["}"] ) - msg_parsed_template = "".join([item for lst in msg_parsed_template_list - for item in lst]) + "\n" + msg_parsed_template = ( + "".join([item for lst in msg_parsed_template_list for item in lst]) + + "\n" + ) with fileobj: for defect in results: evaluated_tags = SafeMapper( diff --git a/bandit/formatters/html.py b/bandit/formatters/html.py index 3c15aad2f..93262b4ef 100644 --- a/bandit/formatters/html.py +++ b/bandit/formatters/html.py @@ -2,7 +2,6 @@ # Copyright (c) 2015 Hewlett Packard Enterprise # # SPDX-License-Identifier: Apache-2.0 - r""" ============== HTML formatter @@ -140,11 +139,9 @@ .. versionadded:: 0.14.0 """ -from __future__ import absolute_import - -from html import escape as html_escape import logging import sys +from html import escape as html_escape from bandit.core import docs_utils from bandit.core import test_properties @@ -165,7 +162,7 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1): :param lines: Number of lines to report, -1 for all """ - header_block = u""" + header_block = """ @@ -241,7 +238,7 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1): """ - report_block = u""" + report_block = """ {metrics} {skipped} @@ -255,7 +252,7 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1): """ - issue_block = u""" + issue_block = """
{test_name}: {test_text}
@@ -271,7 +268,7 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1):
""" - code_block = u""" + code_block = """
 {code}
@@ -279,7 +276,7 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1):
 
""" - candidate_block = u""" + candidate_block = """

Candidates: @@ -287,7 +284,7 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1):
""" - candidate_issue = u""" + candidate_issue = """
{code}
@@ -295,7 +292,7 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1):
""" - skipped_block = u""" + skipped_block = """
@@ -305,7 +302,7 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1):
""" - metrics_block = u""" + metrics_block = """
@@ -323,54 +320,61 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1): baseline = not isinstance(issues, list) # build the skipped string to insert in the report - skipped_str = ''.join('%s reason: %s
' % (fname, reason) - for fname, reason in manager.get_skipped()) + skipped_str = "".join( + f"{fname} reason: {reason}
" + for fname, reason in manager.get_skipped() + ) if skipped_str: skipped_text = skipped_block.format(files_list=skipped_str) else: - skipped_text = '' + skipped_text = "" # build the results string to insert in the report - results_str = '' + results_str = "" for index, issue in enumerate(issues): if not baseline or len(issues[issue]) == 1: - candidates = '' - safe_code = html_escape(issue.get_code(lines, True). - strip('\n').lstrip(' ')) + candidates = "" + safe_code = html_escape( + issue.get_code(lines, True).strip("\n").lstrip(" ") + ) code = code_block.format(code=safe_code) else: - candidates_str = '' - code = '' + candidates_str = "" + code = "" for candidate in issues[issue]: - candidate_code = html_escape(candidate.get_code(lines, True). - strip('\n').lstrip(' ')) + candidate_code = html_escape( + candidate.get_code(lines, True).strip("\n").lstrip(" ") + ) candidates_str += candidate_issue.format(code=candidate_code) candidates = candidate_block.format(candidate_list=candidates_str) url = docs_utils.get_url(issue.test_id) - results_str += issue_block.format(issue_no=index, - issue_class='issue-sev-{}'. - format(issue.severity.lower()), - test_name=issue.test, - test_id=issue.test_id, - test_text=issue.text, - severity=issue.severity, - confidence=issue.confidence, - path=issue.fname, code=code, - candidates=candidates, - url=url, - line_number=issue.lineno) + results_str += issue_block.format( + issue_no=index, + issue_class=f"issue-sev-{issue.severity.lower()}", + test_name=issue.test, + test_id=issue.test_id, + test_text=issue.text, + severity=issue.severity, + confidence=issue.confidence, + path=issue.fname, + code=code, + candidates=candidates, + url=url, + line_number=issue.lineno, + ) # build the metrics string to insert in the report metrics_summary = metrics_block.format( - loc=manager.metrics.data['_totals']['loc'], - nosec=manager.metrics.data['_totals']['nosec']) + loc=manager.metrics.data["_totals"]["loc"], + nosec=manager.metrics.data["_totals"]["nosec"], + ) # build the report and output it - report_contents = report_block.format(metrics=metrics_summary, - skipped=skipped_text, - results=results_str) + report_contents = report_block.format( + metrics=metrics_summary, skipped=skipped_text, results=results_str + ) with fileobj: wrapped_file = utils.wrap_file_object(fileobj) diff --git a/bandit/formatters/json.py b/bandit/formatters/json.py index 4c5e18720..cebe8310f 100644 --- a/bandit/formatters/json.py +++ b/bandit/formatters/json.py @@ -1,7 +1,5 @@ -# -*- coding:utf-8 -*- # # SPDX-License-Identifier: Apache-2.0 - r""" ============== JSON formatter @@ -67,8 +65,6 @@ """ # Necessary so we can import the standard library json module while continuing # to name this file json.py. (Python 2 only) -from __future__ import absolute_import - import datetime import json import logging @@ -83,22 +79,22 @@ @test_properties.accepts_baseline def report(manager, fileobj, sev_level, conf_level, lines=-1): - '''''Prints issues in JSON format + """''Prints issues in JSON format :param manager: the bandit manager object :param fileobj: The output file object, which may be sys.stdout :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all - ''' + """ - machine_output = {'results': [], 'errors': []} + machine_output = {"results": [], "errors": []} for (fname, reason) in manager.get_skipped(): - machine_output['errors'].append({'filename': fname, - 'reason': reason}) + machine_output["errors"].append({"filename": fname, "reason": reason}) - results = manager.get_issue_list(sev_level=sev_level, - conf_level=conf_level) + results = manager.get_issue_list( + sev_level=sev_level, conf_level=conf_level + ) baseline = not isinstance(results, list) @@ -106,34 +102,37 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1): collector = [] for r in results: d = r.as_dict() - d['more_info'] = docs_utils.get_url(d['test_id']) + d["more_info"] = docs_utils.get_url(d["test_id"]) if len(results[r]) > 1: - d['candidates'] = [c.as_dict() for c in results[r]] + d["candidates"] = [c.as_dict() for c in results[r]] collector.append(d) else: collector = [r.as_dict() for r in results] for elem in collector: - elem['more_info'] = docs_utils.get_url(elem['test_id']) + elem["more_info"] = docs_utils.get_url(elem["test_id"]) itemgetter = operator.itemgetter - if manager.agg_type == 'vuln': - machine_output['results'] = sorted(collector, - key=itemgetter('test_name')) + if manager.agg_type == "vuln": + machine_output["results"] = sorted( + collector, key=itemgetter("test_name") + ) else: - machine_output['results'] = sorted(collector, - key=itemgetter('filename')) + machine_output["results"] = sorted( + collector, key=itemgetter("filename") + ) - machine_output['metrics'] = manager.metrics.data + machine_output["metrics"] = manager.metrics.data # timezone agnostic format TS_FORMAT = "%Y-%m-%dT%H:%M:%SZ" time_string = datetime.datetime.utcnow().strftime(TS_FORMAT) - machine_output['generated_at'] = time_string + machine_output["generated_at"] = time_string - result = json.dumps(machine_output, sort_keys=True, - indent=2, separators=(',', ': ')) + result = json.dumps( + machine_output, sort_keys=True, indent=2, separators=(",", ": ") + ) with fileobj: fileobj.write(result) diff --git a/bandit/formatters/screen.py b/bandit/formatters/screen.py index d501dff54..0573b24ac 100644 --- a/bandit/formatters/screen.py +++ b/bandit/formatters/screen.py @@ -1,8 +1,6 @@ # Copyright (c) 2015 Hewlett Packard Enterprise -# -*- coding:utf-8 -*- # # SPDX-License-Identifier: Apache-2.0 - r""" ================ Screen formatter @@ -27,9 +25,6 @@ .. versionadded:: 0.9.0 """ - -from __future__ import print_function - import datetime import logging import sys @@ -38,7 +33,7 @@ from bandit.core import docs_utils from bandit.core import test_properties -IS_WIN_PLATFORM = sys.platform.startswith('win32') +IS_WIN_PLATFORM = sys.platform.startswith("win32") COLORAMA = False # This fixes terminal colors not displaying properly on Windows systems. @@ -56,28 +51,31 @@ LOG = logging.getLogger(__name__) COLOR = { - 'DEFAULT': '\033[0m', - 'HEADER': '\033[95m', - 'LOW': '\033[94m', - 'MEDIUM': '\033[93m', - 'HIGH': '\033[91m', + "DEFAULT": "\033[0m", + "HEADER": "\033[95m", + "LOW": "\033[94m", + "MEDIUM": "\033[93m", + "HIGH": "\033[91m", } def header(text, *args): - return u'%s%s%s' % (COLOR['HEADER'], (text % args), COLOR['DEFAULT']) + return "{}{}{}".format(COLOR["HEADER"], (text % args), COLOR["DEFAULT"]) def get_verbose_details(manager): bits = [] - bits.append(header(u'Files in scope (%i):', len(manager.files_list))) - tpl = u"\t%s (score: {SEVERITY: %i, CONFIDENCE: %i})" - bits.extend([tpl % (item, sum(score['SEVERITY']), sum(score['CONFIDENCE'])) - for (item, score) - in zip(manager.files_list, manager.scores)]) - bits.append(header(u'Files excluded (%i):', len(manager.excluded_files))) - bits.extend([u"\t%s" % fname for fname in manager.excluded_files]) - return '\n'.join([str(bit) for bit in bits]) + bits.append(header("Files in scope (%i):", len(manager.files_list))) + tpl = "\t%s (score: {SEVERITY: %i, CONFIDENCE: %i})" + bits.extend( + [ + tpl % (item, sum(score["SEVERITY"]), sum(score["CONFIDENCE"])) + for (item, score) in zip(manager.files_list, manager.scores) + ] + ) + bits.append(header("Files excluded (%i):", len(manager.excluded_files))) + bits.extend(["\t%s" % fname for fname in manager.excluded_files]) + return "\n".join([str(bit) for bit in bits]) def get_metrics(manager): @@ -86,45 +84,68 @@ def get_metrics(manager): for (criteria, _) in constants.CRITERIA: bits.append("\tTotal issues (by %s):" % (criteria.lower())) for rank in constants.RANKING: - bits.append("\t\t%s: %s" % ( - rank.capitalize(), - manager.metrics.data['_totals']['%s.%s' % (criteria, rank)])) - return '\n'.join([str(bit) for bit in bits]) - - -def _output_issue_str(issue, indent, show_lineno=True, show_code=True, - lines=-1): + bits.append( + "\t\t%s: %s" + % ( + rank.capitalize(), + manager.metrics.data["_totals"][f"{criteria}.{rank}"], + ) + ) + return "\n".join([str(bit) for bit in bits]) + + +def _output_issue_str( + issue, indent, show_lineno=True, show_code=True, lines=-1 +): # returns a list of lines that should be added to the existing lines list bits = [] - bits.append("%s%s>> Issue: [%s:%s] %s" % ( - indent, COLOR[issue.severity], issue.test_id, issue.test, issue.text)) - - bits.append("%s Severity: %s Confidence: %s" % ( - indent, issue.severity.capitalize(), issue.confidence.capitalize())) - - bits.append("%s Location: %s:%s:%s" % ( - indent, issue.fname, - issue.lineno if show_lineno else "", - issue.col_offset if show_lineno else "")) - - bits.append("%s More Info: %s%s" % ( - indent, docs_utils.get_url(issue.test_id), COLOR['DEFAULT'])) + bits.append( + "%s%s>> Issue: [%s:%s] %s" + % ( + indent, + COLOR[issue.severity], + issue.test_id, + issue.test, + issue.text, + ) + ) + + bits.append( + "%s Severity: %s Confidence: %s" + % (indent, issue.severity.capitalize(), issue.confidence.capitalize()) + ) + + bits.append( + "%s Location: %s:%s:%s" + % ( + indent, + issue.fname, + issue.lineno if show_lineno else "", + issue.col_offset if show_lineno else "", + ) + ) + + bits.append( + "%s More Info: %s%s" + % (indent, docs_utils.get_url(issue.test_id), COLOR["DEFAULT"]) + ) if show_code: - bits.extend([indent + line for line in - issue.get_code(lines, True).split('\n')]) + bits.extend( + [indent + line for line in issue.get_code(lines, True).split("\n")] + ) - return '\n'.join([bit for bit in bits]) + return "\n".join([bit for bit in bits]) def get_results(manager, sev_level, conf_level, lines): bits = [] issues = manager.get_issue_list(sev_level, conf_level) baseline = not isinstance(issues, list) - candidate_indent = ' ' * 10 + candidate_indent = " " * 10 if not len(issues): - return u"\tNo issues identified." + return "\tNo issues identified." for issue in issues: # if not a baseline or only one candidate we know the issue @@ -133,24 +154,26 @@ def get_results(manager, sev_level, conf_level, lines): # otherwise show the finding and the candidates else: - bits.append(_output_issue_str(issue, "", - show_lineno=False, - show_code=False)) + bits.append( + _output_issue_str( + issue, "", show_lineno=False, show_code=False + ) + ) - bits.append(u'\n-- Candidate Issues --') + bits.append("\n-- Candidate Issues --") for candidate in issues[issue]: - bits.append(_output_issue_str(candidate, - candidate_indent, - lines=lines)) - bits.append('\n') - bits.append(u'-' * 50) + bits.append( + _output_issue_str(candidate, candidate_indent, lines=lines) + ) + bits.append("\n") + bits.append("-" * 50) - return '\n'.join([bit for bit in bits]) + return "\n".join([bit for bit in bits]) def do_print(bits): # needed so we can mock this stuff - print('\n'.join([bit for bit in bits])) + print("\n".join([bit for bit in bits])) @test_properties.accepts_baseline @@ -179,11 +202,15 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1): bits.append(header("\nTest results:")) bits.append(get_results(manager, sev_level, conf_level, lines)) bits.append(header("\nCode scanned:")) - bits.append('\tTotal lines of code: %i' % - (manager.metrics.data['_totals']['loc'])) + bits.append( + "\tTotal lines of code: %i" + % (manager.metrics.data["_totals"]["loc"]) + ) - bits.append('\tTotal lines skipped (#nosec): %i' % - (manager.metrics.data['_totals']['nosec'])) + bits.append( + "\tTotal lines skipped (#nosec): %i" + % (manager.metrics.data["_totals"]["nosec"]) + ) bits.append(get_metrics(manager)) skipped = manager.get_skipped() @@ -192,8 +219,11 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1): do_print(bits) if fileobj.name != sys.stdout.name: - LOG.info("Screen formatter output was not written to file: %s, " - "consider '-f txt'", fileobj.name) + LOG.info( + "Screen formatter output was not written to file: %s, " + "consider '-f txt'", + fileobj.name, + ) if IS_WIN_PLATFORM and COLORAMA: colorama.deinit() diff --git a/bandit/formatters/text.py b/bandit/formatters/text.py index 1c77a31a4..ec8e0c8bf 100644 --- a/bandit/formatters/text.py +++ b/bandit/formatters/text.py @@ -1,8 +1,6 @@ # Copyright (c) 2015 Hewlett Packard Enterprise -# -*- coding:utf-8 -*- # # SPDX-License-Identifier: Apache-2.0 - r""" ============== Text Formatter @@ -27,9 +25,6 @@ .. versionadded:: 0.9.0 """ - -from __future__ import print_function - import datetime import logging import sys @@ -44,14 +39,17 @@ def get_verbose_details(manager): bits = [] - bits.append(u'Files in scope (%i):' % len(manager.files_list)) - tpl = u"\t%s (score: {SEVERITY: %i, CONFIDENCE: %i})" - bits.extend([tpl % (item, sum(score['SEVERITY']), sum(score['CONFIDENCE'])) - for (item, score) - in zip(manager.files_list, manager.scores)]) - bits.append(u'Files excluded (%i):' % len(manager.excluded_files)) - bits.extend([u"\t%s" % fname for fname in manager.excluded_files]) - return '\n'.join([bit for bit in bits]) + bits.append("Files in scope (%i):" % len(manager.files_list)) + tpl = "\t%s (score: {SEVERITY: %i, CONFIDENCE: %i})" + bits.extend( + [ + tpl % (item, sum(score["SEVERITY"]), sum(score["CONFIDENCE"])) + for (item, score) in zip(manager.files_list, manager.scores) + ] + ) + bits.append("Files excluded (%i):" % len(manager.excluded_files)) + bits.extend(["\t%s" % fname for fname in manager.excluded_files]) + return "\n".join([bit for bit in bits]) def get_metrics(manager): @@ -60,44 +58,59 @@ def get_metrics(manager): for (criteria, _) in constants.CRITERIA: bits.append("\tTotal issues (by %s):" % (criteria.lower())) for rank in constants.RANKING: - bits.append("\t\t%s: %s" % ( - rank.capitalize(), - manager.metrics.data['_totals']['%s.%s' % (criteria, rank)])) - return '\n'.join([bit for bit in bits]) - - -def _output_issue_str(issue, indent, show_lineno=True, show_code=True, - lines=-1): + bits.append( + "\t\t%s: %s" + % ( + rank.capitalize(), + manager.metrics.data["_totals"][f"{criteria}.{rank}"], + ) + ) + return "\n".join([bit for bit in bits]) + + +def _output_issue_str( + issue, indent, show_lineno=True, show_code=True, lines=-1 +): # returns a list of lines that should be added to the existing lines list bits = [] - bits.append("%s>> Issue: [%s:%s] %s" % ( - indent, issue.test_id, issue.test, issue.text)) - - bits.append("%s Severity: %s Confidence: %s" % ( - indent, issue.severity.capitalize(), issue.confidence.capitalize())) - - bits.append("%s Location: %s:%s:%s" % ( - indent, issue.fname, issue.lineno if show_lineno else "", - issue.col_offset if show_lineno else "")) - - bits.append("%s More Info: %s" % ( - indent, docs_utils.get_url(issue.test_id))) + bits.append( + "%s>> Issue: [%s:%s] %s" + % (indent, issue.test_id, issue.test, issue.text) + ) + + bits.append( + "%s Severity: %s Confidence: %s" + % (indent, issue.severity.capitalize(), issue.confidence.capitalize()) + ) + + bits.append( + "%s Location: %s:%s:%s" + % ( + indent, + issue.fname, + issue.lineno if show_lineno else "", + issue.col_offset if show_lineno else "", + ) + ) + + bits.append(f"{indent} More Info: {docs_utils.get_url(issue.test_id)}") if show_code: - bits.extend([indent + line for line in - issue.get_code(lines, True).split('\n')]) + bits.extend( + [indent + line for line in issue.get_code(lines, True).split("\n")] + ) - return '\n'.join([bit for bit in bits]) + return "\n".join([bit for bit in bits]) def get_results(manager, sev_level, conf_level, lines): bits = [] issues = manager.get_issue_list(sev_level, conf_level) baseline = not isinstance(issues, list) - candidate_indent = ' ' * 10 + candidate_indent = " " * 10 if not len(issues): - return u"\tNo issues identified." + return "\tNo issues identified." for issue in issues: # if not a baseline or only one candidate we know the issue @@ -106,18 +119,20 @@ def get_results(manager, sev_level, conf_level, lines): # otherwise show the finding and the candidates else: - bits.append(_output_issue_str(issue, "", - show_lineno=False, - show_code=False)) + bits.append( + _output_issue_str( + issue, "", show_lineno=False, show_code=False + ) + ) - bits.append(u'\n-- Candidate Issues --') + bits.append("\n-- Candidate Issues --") for candidate in issues[issue]: - bits.append(_output_issue_str(candidate, - candidate_indent, - lines=lines)) - bits.append('\n') - bits.append(u'-' * 50) - return '\n'.join([bit for bit in bits]) + bits.append( + _output_issue_str(candidate, candidate_indent, lines=lines) + ) + bits.append("\n") + bits.append("-" * 50) + return "\n".join([bit for bit in bits]) @test_properties.accepts_baseline @@ -142,17 +157,21 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1): bits.append("\nTest results:") bits.append(get_results(manager, sev_level, conf_level, lines)) bits.append("\nCode scanned:") - bits.append('\tTotal lines of code: %i' % - (manager.metrics.data['_totals']['loc'])) + bits.append( + "\tTotal lines of code: %i" + % (manager.metrics.data["_totals"]["loc"]) + ) - bits.append('\tTotal lines skipped (#nosec): %i' % - (manager.metrics.data['_totals']['nosec'])) + bits.append( + "\tTotal lines skipped (#nosec): %i" + % (manager.metrics.data["_totals"]["nosec"]) + ) skipped = manager.get_skipped() bits.append(get_metrics(manager)) bits.append("Files skipped (%i):" % len(skipped)) bits.extend(["\t%s (%s)" % skip for skip in skipped]) - result = '\n'.join([bit for bit in bits]) + '\n' + result = "\n".join([bit for bit in bits]) + "\n" with fileobj: wrapped_file = utils.wrap_file_object(fileobj) diff --git a/bandit/formatters/utils.py b/bandit/formatters/utils.py index eee762f12..834892a26 100644 --- a/bandit/formatters/utils.py +++ b/bandit/formatters/utils.py @@ -2,7 +2,6 @@ # # SPDX-License-Identifier: Apache-2.0 """Utility functions for formatting plugins for Bandit.""" - import io diff --git a/bandit/formatters/xml.py b/bandit/formatters/xml.py index 522571595..f3c2bc2ed 100644 --- a/bandit/formatters/xml.py +++ b/bandit/formatters/xml.py @@ -1,7 +1,5 @@ -# -*- coding:utf-8 -*- # # SPDX-License-Identifier: Apache-2.0 - r""" ============= XML Formatter @@ -29,8 +27,6 @@ """ # This future import is necessary here due to the xml import below on Python # 2.7 -from __future__ import absolute_import - import logging import sys from xml.etree import cElementTree as ET @@ -41,41 +37,51 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1): - '''Prints issues in XML format + """Prints issues in XML format :param manager: the bandit manager object :param fileobj: The output file object, which may be sys.stdout :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all - ''' + """ issues = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level) - root = ET.Element('testsuite', name='bandit', tests=str(len(issues))) + root = ET.Element("testsuite", name="bandit", tests=str(len(issues))) for issue in issues: test = issue.test - testcase = ET.SubElement(root, 'testcase', - classname=issue.fname, name=test) - - text = 'Test ID: %s Severity: %s Confidence: %s\n%s\nLocation %s:%s' - text = text % (issue.test_id, issue.severity, issue.confidence, - issue.text, issue.fname, issue.lineno) - ET.SubElement(testcase, 'error', - more_info=docs_utils.get_url(issue.test_id), - type=issue.severity, - message=issue.text).text = text + testcase = ET.SubElement( + root, "testcase", classname=issue.fname, name=test + ) + + text = "Test ID: %s Severity: %s Confidence: %s\n%s\nLocation %s:%s" + text = text % ( + issue.test_id, + issue.severity, + issue.confidence, + issue.text, + issue.fname, + issue.lineno, + ) + ET.SubElement( + testcase, + "error", + more_info=docs_utils.get_url(issue.test_id), + type=issue.severity, + message=issue.text, + ).text = text tree = ET.ElementTree(root) if fileobj.name == sys.stdout.name: fileobj = sys.stdout.buffer - elif fileobj.mode == 'w': + elif fileobj.mode == "w": fileobj.close() fileobj = open(fileobj.name, "wb") with fileobj: - tree.write(fileobj, encoding='utf-8', xml_declaration=True) + tree.write(fileobj, encoding="utf-8", xml_declaration=True) if fileobj.name != sys.stdout.name: LOG.info("XML output written to file: %s", fileobj.name) diff --git a/bandit/formatters/yaml.py b/bandit/formatters/yaml.py index 4efa5fbd6..bfd1e46ce 100644 --- a/bandit/formatters/yaml.py +++ b/bandit/formatters/yaml.py @@ -1,7 +1,6 @@ # Copyright (c) 2017 VMware, Inc. # # SPDX-License-Identifier: Apache-2.0 - r""" ============== YAML Formatter @@ -59,8 +58,6 @@ """ # Necessary for this formatter to work when imported on Python 2. Importing # the standard library's yaml module conflicts with the name of this module. -from __future__ import absolute_import - import datetime import logging import operator @@ -74,46 +71,49 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1): - '''Prints issues in YAML format + """Prints issues in YAML format :param manager: the bandit manager object :param fileobj: The output file object, which may be sys.stdout :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all - ''' + """ - machine_output = {'results': [], 'errors': []} + machine_output = {"results": [], "errors": []} for (fname, reason) in manager.get_skipped(): - machine_output['errors'].append({'filename': fname, 'reason': reason}) + machine_output["errors"].append({"filename": fname, "reason": reason}) - results = manager.get_issue_list(sev_level=sev_level, - conf_level=conf_level) + results = manager.get_issue_list( + sev_level=sev_level, conf_level=conf_level + ) collector = [r.as_dict() for r in results] for elem in collector: - elem['more_info'] = docs_utils.get_url(elem['test_id']) + elem["more_info"] = docs_utils.get_url(elem["test_id"]) itemgetter = operator.itemgetter - if manager.agg_type == 'vuln': - machine_output['results'] = sorted(collector, - key=itemgetter('test_name')) + if manager.agg_type == "vuln": + machine_output["results"] = sorted( + collector, key=itemgetter("test_name") + ) else: - machine_output['results'] = sorted(collector, - key=itemgetter('filename')) + machine_output["results"] = sorted( + collector, key=itemgetter("filename") + ) - machine_output['metrics'] = manager.metrics.data + machine_output["metrics"] = manager.metrics.data - for result in machine_output['results']: - if 'code' in result: - code = result['code'].replace('\n', '\\n') - result['code'] = code + for result in machine_output["results"]: + if "code" in result: + code = result["code"].replace("\n", "\\n") + result["code"] = code # timezone agnostic format TS_FORMAT = "%Y-%m-%dT%H:%M:%SZ" time_string = datetime.datetime.utcnow().strftime(TS_FORMAT) - machine_output['generated_at'] = time_string + machine_output["generated_at"] = time_string yaml.safe_dump(machine_output, fileobj, default_flow_style=False) diff --git a/bandit/plugins/app_debug.py b/bandit/plugins/app_debug.py index a53f9801f..ae66459f3 100644 --- a/bandit/plugins/app_debug.py +++ b/bandit/plugins/app_debug.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2015 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" ====================================================== B201: Test for use of flask app with debug set to true @@ -38,22 +36,21 @@ .. versionadded:: 0.15.0 """ # noqa: E501 - import bandit from bandit.core import test_properties as test -@test.test_id('B201') -@test.checks('Call') +@test.test_id("B201") +@test.checks("Call") def flask_debug_true(context): - if context.is_module_imported_like('flask'): - if context.call_function_name_qual.endswith('.run'): - if context.check_call_arg_value('debug', 'True'): + if context.is_module_imported_like("flask"): + if context.call_function_name_qual.endswith(".run"): + if context.check_call_arg_value("debug", "True"): return bandit.Issue( severity=bandit.HIGH, confidence=bandit.MEDIUM, text="A Flask app appears to be run with debug=True, " - "which exposes the Werkzeug debugger and allows " - "the execution of arbitrary code.", - lineno=context.get_lineno_for_call_arg('debug'), + "which exposes the Werkzeug debugger and allows " + "the execution of arbitrary code.", + lineno=context.get_lineno_for_call_arg("debug"), ) diff --git a/bandit/plugins/asserts.py b/bandit/plugins/asserts.py index 038936184..7057873eb 100644 --- a/bandit/plugins/asserts.py +++ b/bandit/plugins/asserts.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" ============================ B101: Test for use of assert @@ -56,21 +54,23 @@ def gen_config(name): - if name == 'assert_used': - return {'skips': []} + if name == "assert_used": + return {"skips": []} @test.takes_config -@test.test_id('B101') -@test.checks('Assert') +@test.test_id("B101") +@test.checks("Assert") def assert_used(context, config): - for skip in config.get('skips', []): + for skip in config.get("skips", []): if fnmatch.fnmatch(context.filename, skip): return None return bandit.Issue( severity=bandit.LOW, confidence=bandit.HIGH, - text=("Use of assert detected. The enclosed code " - "will be removed when compiling to optimised byte code.") + text=( + "Use of assert detected. The enclosed code " + "will be removed when compiling to optimised byte code." + ), ) diff --git a/bandit/plugins/crypto_request_no_cert_validation.py b/bandit/plugins/crypto_request_no_cert_validation.py index f44cc7c9e..eed10ecf5 100644 --- a/bandit/plugins/crypto_request_no_cert_validation.py +++ b/bandit/plugins/crypto_request_no_cert_validation.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" ============================================= B501: Test for missing certificate validation @@ -40,23 +38,24 @@ .. versionadded:: 0.9.0 """ - import bandit from bandit.core import test_properties as test -@test.checks('Call') -@test.test_id('B501') +@test.checks("Call") +@test.test_id("B501") def request_with_no_cert_validation(context): - http_verbs = ('get', 'options', 'head', 'post', 'put', 'patch', 'delete') - if ('requests' in context.call_function_name_qual and - context.call_function_name in http_verbs): - if context.check_call_arg_value('verify', 'False'): + http_verbs = ("get", "options", "head", "post", "put", "patch", "delete") + if ( + "requests" in context.call_function_name_qual + and context.call_function_name in http_verbs + ): + if context.check_call_arg_value("verify", "False"): issue = bandit.Issue( severity=bandit.HIGH, confidence=bandit.HIGH, text="Requests call with verify=False disabling SSL " - "certificate checks, security issue.", - lineno=context.get_lineno_for_call_arg('verify'), + "certificate checks, security issue.", + lineno=context.get_lineno_for_call_arg("verify"), ) return issue diff --git a/bandit/plugins/django_sql_injection.py b/bandit/plugins/django_sql_injection.py index 524a3ee87..457bf8251 100644 --- a/bandit/plugins/django_sql_injection.py +++ b/bandit/plugins/django_sql_injection.py @@ -1,10 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright (C) 2018 [Victor Torre](https://github.com/ehooo) # # SPDX-License-Identifier: Apache-2.0 - - import ast import bandit @@ -19,8 +16,8 @@ def keywords2dict(keywords): return kwargs -@test.checks('Call') -@test.test_id('B610') +@test.checks("Call") +@test.test_id("B610") def django_extra_used(context): """**B610: Potential SQL injection on extra function** @@ -33,24 +30,24 @@ def django_extra_used(context): """ description = "Use of extra potential SQL attack vector." - if context.call_function_name == 'extra': + if context.call_function_name == "extra": kwargs = keywords2dict(context.node.keywords) args = context.node.args if args: if len(args) >= 1: - kwargs['select'] = args[0] + kwargs["select"] = args[0] if len(args) >= 2: - kwargs['where'] = args[1] + kwargs["where"] = args[1] if len(args) >= 3: - kwargs['params'] = args[2] + kwargs["params"] = args[2] if len(args) >= 4: - kwargs['tables'] = args[3] + kwargs["tables"] = args[3] if len(args) >= 5: - kwargs['order_by'] = args[4] + kwargs["order_by"] = args[4] if len(args) >= 6: - kwargs['select_params'] = args[5] + kwargs["select_params"] = args[5] insecure = False - for key in ['where', 'tables']: + for key in ["where", "tables"]: if key in kwargs: if isinstance(kwargs[key], ast.List): for val in kwargs[key].elts: @@ -60,14 +57,14 @@ def django_extra_used(context): else: insecure = True break - if not insecure and 'select' in kwargs: - if isinstance(kwargs['select'], ast.Dict): - for k in kwargs['select'].keys: + if not insecure and "select" in kwargs: + if isinstance(kwargs["select"], ast.Dict): + for k in kwargs["select"].keys: if not isinstance(k, ast.Str): insecure = True break if not insecure: - for v in kwargs['select'].values: + for v in kwargs["select"].values: if not isinstance(v, ast.Str): insecure = True break @@ -78,12 +75,12 @@ def django_extra_used(context): return bandit.Issue( severity=bandit.MEDIUM, confidence=bandit.MEDIUM, - text=description + text=description, ) -@test.checks('Call') -@test.test_id('B611') +@test.checks("Call") +@test.test_id("B611") def django_rawsql_used(context): """**B611: Potential SQL injection on RawSQL function** @@ -96,12 +93,12 @@ def django_rawsql_used(context): """ description = "Use of RawSQL potential SQL attack vector." - if context.is_module_imported_like('django.db.models'): - if context.call_function_name == 'RawSQL': + if context.is_module_imported_like("django.db.models"): + if context.call_function_name == "RawSQL": sql = context.node.args[0] if not isinstance(sql, ast.Str): return bandit.Issue( severity=bandit.MEDIUM, confidence=bandit.MEDIUM, - text=description + text=description, ) diff --git a/bandit/plugins/django_xss.py b/bandit/plugins/django_xss.py index fd8bc635e..952d16fae 100644 --- a/bandit/plugins/django_xss.py +++ b/bandit/plugins/django_xss.py @@ -2,15 +2,13 @@ # Copyright 2018 Victor Torre # # SPDX-License-Identifier: Apache-2.0 - - import ast import bandit from bandit.core import test_properties as test -class DeepAssignation(object): +class DeepAssignation: def __init__(self, var_name, ignore_nodes=None): self.var_name = var_name self.ignore_nodes = ignore_nodes @@ -44,7 +42,7 @@ def is_assigned(self, node): assigned = self.is_assigned_in(node.body) elif isinstance(node, ast.With): for withitem in node.items: - var_id = getattr(withitem.optional_vars, 'id', None) + var_id = getattr(withitem.optional_vars, "id", None) if var_id == self.var_name.id: assigned = node else: @@ -98,8 +96,7 @@ def evaluate_var(xss_var, parent, until, ignore_nodes=None): if isinstance(to, ast.Str): secure = True elif isinstance(to, ast.Name): - secure = evaluate_var(to, parent, - to.lineno, ignore_nodes) + secure = evaluate_var(to, parent, to.lineno, ignore_nodes) elif isinstance(to, ast.Call): secure = evaluate_call(to, parent, ignore_nodes) elif isinstance(to, (list, tuple)): @@ -108,8 +105,9 @@ def evaluate_var(xss_var, parent, until, ignore_nodes=None): if isinstance(some_to, ast.Str): num_secure += 1 elif isinstance(some_to, ast.Name): - if evaluate_var(some_to, parent, - node.lineno, ignore_nodes): + if evaluate_var( + some_to, parent, node.lineno, ignore_nodes + ): num_secure += 1 else: break @@ -130,7 +128,7 @@ def evaluate_call(call, parent, ignore_nodes=None): secure = False evaluate = False if isinstance(call, ast.Call) and isinstance(call.func, ast.Attribute): - if isinstance(call.func.value, ast.Str) and call.func.attr == 'format': + if isinstance(call.func.value, ast.Str) and call.func.attr == "format": evaluate = True if call.keywords: evaluate = False # TODO(??) get support for this @@ -152,7 +150,8 @@ def evaluate_call(call, parent, ignore_nodes=None): else: break elif isinstance(arg, ast.Starred) and isinstance( - arg.value, (ast.List, ast.Tuple)): + arg.value, (ast.List, ast.Tuple) + ): args.extend(arg.value.elts) num_secure += 1 else: @@ -174,7 +173,7 @@ def transform2call(var): new_call.lineno = var.lineno new_call.func = ast.Attribute() new_call.func.value = var.left - new_call.func.attr = 'format' + new_call.func.attr = "format" if isinstance(var.right, ast.Tuple): new_call.args = var.right.elts else: @@ -220,14 +219,12 @@ def check_risk(node): if not secure: return bandit.Issue( - severity=bandit.MEDIUM, - confidence=bandit.HIGH, - text=description + severity=bandit.MEDIUM, confidence=bandit.HIGH, text=description ) -@test.checks('Call') -@test.test_id('B703') +@test.checks("Call") +@test.test_id("B703") def django_mark_safe(context): """**B703: Potential XSS on mark_safe function** @@ -243,13 +240,13 @@ def django_mark_safe(context): .. versionadded:: 1.5.0 """ - if context.is_module_imported_like('django.utils.safestring'): + if context.is_module_imported_like("django.utils.safestring"): affected_functions = [ - 'mark_safe', - 'SafeText', - 'SafeUnicode', - 'SafeString', - 'SafeBytes' + "mark_safe", + "SafeText", + "SafeUnicode", + "SafeString", + "SafeBytes", ] if context.call_function_name in affected_functions: xss = context.node.args[0] diff --git a/bandit/plugins/exec.py b/bandit/plugins/exec.py index ee5dfbba2..d42918887 100644 --- a/bandit/plugins/exec.py +++ b/bandit/plugins/exec.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" ============================== B102: Test for the use of exec @@ -31,7 +29,6 @@ .. versionadded:: 0.9.0 """ - import bandit from bandit.core import test_properties as test @@ -40,12 +37,12 @@ def exec_issue(): return bandit.Issue( severity=bandit.MEDIUM, confidence=bandit.HIGH, - text="Use of exec detected." + text="Use of exec detected.", ) -@test.checks('Call') -@test.test_id('B102') +@test.checks("Call") +@test.test_id("B102") def exec_used(context): - if context.call_function_name_qual == 'exec': + if context.call_function_name_qual == "exec": return exec_issue() diff --git a/bandit/plugins/general_bad_file_permissions.py b/bandit/plugins/general_bad_file_permissions.py index 66893dd00..03b20f72b 100644 --- a/bandit/plugins/general_bad_file_permissions.py +++ b/bandit/plugins/general_bad_file_permissions.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" ================================================== B103: Test for setting permissive file permissions @@ -46,22 +44,24 @@ .. versionadded:: 0.9.0 """ # noqa: E501 - import stat import bandit from bandit.core import test_properties as test -@test.checks('Call') -@test.test_id('B103') +@test.checks("Call") +@test.test_id("B103") def set_bad_file_permissions(context): - if 'chmod' in context.call_function_name: + if "chmod" in context.call_function_name: if context.call_args_count == 2: mode = context.get_call_arg_at_position(1) - if (mode is not None and isinstance(mode, int) and - (mode & stat.S_IWOTH or mode & stat.S_IXGRP)): + if ( + mode is not None + and isinstance(mode, int) + and (mode & stat.S_IWOTH or mode & stat.S_IXGRP) + ): # world writable is an HIGH, group executable is a MEDIUM if mode & stat.S_IWOTH: sev_level = bandit.HIGH @@ -70,10 +70,10 @@ def set_bad_file_permissions(context): filename = context.get_call_arg_at_position(0) if filename is None: - filename = 'NOT PARSED' + filename = "NOT PARSED" return bandit.Issue( severity=sev_level, confidence=bandit.HIGH, - text="Chmod setting a permissive mask %s on file (%s)." % - (oct(mode), filename) + text="Chmod setting a permissive mask %s on file (%s)." + % (oct(mode), filename), ) diff --git a/bandit/plugins/general_bind_all_interfaces.py b/bandit/plugins/general_bind_all_interfaces.py index 1971aa540..ffdd02c04 100644 --- a/bandit/plugins/general_bind_all_interfaces.py +++ b/bandit/plugins/general_bind_all_interfaces.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" ======================================== B104: Test for binding to all interfaces @@ -32,17 +30,16 @@ .. versionadded:: 0.9.0 """ - import bandit from bandit.core import test_properties as test -@test.checks('Str') -@test.test_id('B104') +@test.checks("Str") +@test.test_id("B104") def hardcoded_bind_all_interfaces(context): - if context.string_val == '0.0.0.0': + if context.string_val == "0.0.0.0": return bandit.Issue( severity=bandit.MEDIUM, confidence=bandit.MEDIUM, - text="Possible binding to all interfaces." + text="Possible binding to all interfaces.", ) diff --git a/bandit/plugins/general_hardcoded_password.py b/bandit/plugins/general_hardcoded_password.py index 4a3db14a1..2450adccd 100644 --- a/bandit/plugins/general_hardcoded_password.py +++ b/bandit/plugins/general_hardcoded_password.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import ast import re @@ -13,8 +11,7 @@ RE_WORDS = "(pas+wo?r?d|pass(phrase)?|pwd|token|secrete?)" RE_CANDIDATES = re.compile( - '(^{0}$|_{0}_|^{0}_|_{0}$)'.format(RE_WORDS), - re.IGNORECASE + "(^{0}$|_{0}_|^{0}_|_{0}$)".format(RE_WORDS), re.IGNORECASE ) @@ -22,11 +19,12 @@ def _report(value): return bandit.Issue( severity=bandit.LOW, confidence=bandit.MEDIUM, - text=("Possible hardcoded password: '%s'" % value)) + text=("Possible hardcoded password: '%s'" % value), + ) -@test.checks('Str') -@test.test_id('B105') +@test.checks("Str") +@test.test_id("B105") def hardcoded_password_string(context): """**B105: Test for use of hard-coded password strings** @@ -80,22 +78,26 @@ def hardcoded_password_string(context): if isinstance(targ, ast.Name) and RE_CANDIDATES.search(targ.id): return _report(node.s) - elif (isinstance(node._bandit_parent, ast.Subscript) - and RE_CANDIDATES.search(node.s)): + elif isinstance( + node._bandit_parent, ast.Subscript + ) and RE_CANDIDATES.search(node.s): # Py39+: looks for "dict[candidate]='some_string'" # subscript -> index -> string assign = node._bandit_parent._bandit_parent - if isinstance(assign, ast.Assign) and isinstance(assign.value, - ast.Str): + if isinstance(assign, ast.Assign) and isinstance( + assign.value, ast.Str + ): return _report(assign.value.s) - elif (isinstance(node._bandit_parent, ast.Index) - and RE_CANDIDATES.search(node.s)): + elif isinstance(node._bandit_parent, ast.Index) and RE_CANDIDATES.search( + node.s + ): # looks for "dict[candidate]='some_string'" # assign -> subscript -> index -> string assign = node._bandit_parent._bandit_parent._bandit_parent - if isinstance(assign, ast.Assign) and isinstance(assign.value, - ast.Str): + if isinstance(assign, ast.Assign) and isinstance( + assign.value, ast.Str + ): return _report(assign.value.s) elif isinstance(node._bandit_parent, ast.Compare): @@ -107,8 +109,8 @@ def hardcoded_password_string(context): return _report(comp.comparators[0].s) -@test.checks('Call') -@test.test_id('B106') +@test.checks("Call") +@test.test_id("B106") def hardcoded_password_funcarg(context): """**B106: Test for use of hard-coded password function arguments** @@ -158,8 +160,8 @@ def hardcoded_password_funcarg(context): return _report(kw.value.s) -@test.checks('FunctionDef') -@test.test_id('B107') +@test.checks("FunctionDef") +@test.test_id("B107") def hardcoded_password_default(context): """**B107: Test for use of hard-coded password argument defaults** @@ -207,8 +209,9 @@ def hardcoded_password_default(context): # looks for "def function(candidate='some_string')" # this pads the list of default values with "None" if nothing is given - defs = [None] * (len(context.node.args.args) - - len(context.node.args.defaults)) + defs = [None] * ( + len(context.node.args.args) - len(context.node.args.defaults) + ) defs.extend(context.node.args.defaults) # go through all (param, value)s and look for candidates diff --git a/bandit/plugins/general_hardcoded_tmp.py b/bandit/plugins/general_hardcoded_tmp.py index f01e80a41..bf42f7fab 100644 --- a/bandit/plugins/general_hardcoded_tmp.py +++ b/bandit/plugins/general_hardcoded_tmp.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" =================================================== B108: Test for insecure usage of tmp file/directory @@ -49,28 +47,27 @@ .. versionadded:: 0.9.0 """ # noqa: E501 - import bandit from bandit.core import test_properties as test def gen_config(name): - if name == 'hardcoded_tmp_directory': - return {'tmp_dirs': ['/tmp', '/var/tmp', '/dev/shm']} + if name == "hardcoded_tmp_directory": + return {"tmp_dirs": ["/tmp", "/var/tmp", "/dev/shm"]} @test.takes_config -@test.checks('Str') -@test.test_id('B108') +@test.checks("Str") +@test.test_id("B108") def hardcoded_tmp_directory(context, config): - if config is not None and 'tmp_dirs' in config: - tmp_dirs = config['tmp_dirs'] + if config is not None and "tmp_dirs" in config: + tmp_dirs = config["tmp_dirs"] else: - tmp_dirs = ['/tmp', '/var/tmp', '/dev/shm'] + tmp_dirs = ["/tmp", "/var/tmp", "/dev/shm"] if any(context.string_val.startswith(s) for s in tmp_dirs): return bandit.Issue( severity=bandit.MEDIUM, confidence=bandit.MEDIUM, - text="Probable insecure usage of temp file/directory." + text="Probable insecure usage of temp file/directory.", ) diff --git a/bandit/plugins/hashlib_new_insecure_functions.py b/bandit/plugins/hashlib_new_insecure_functions.py index f40fc6a41..3d3e7ac98 100644 --- a/bandit/plugins/hashlib_new_insecure_functions.py +++ b/bandit/plugins/hashlib_new_insecure_functions.py @@ -1,7 +1,5 @@ -# -*- coding:utf-8 -*- # # SPDX-License-Identifier: Apache-2.0 - r""" ============================================================================ B324: Test use of insecure md4, md5, or sha1 hash functions in hashlib.new() @@ -29,23 +27,26 @@ .. versionadded:: 1.5.0 """ - import bandit from bandit.core import test_properties as test -@test.test_id('B324') -@test.checks('Call') +@test.test_id("B324") +@test.checks("Call") def hashlib_new(context): if isinstance(context.call_function_name_qual, str): - qualname_list = context.call_function_name_qual.split('.') + qualname_list = context.call_function_name_qual.split(".") func = qualname_list[-1] - if 'hashlib' in qualname_list and func == 'new': + if "hashlib" in qualname_list and func == "new": args = context.call_args keywords = context.call_keywords - name = args[0] if args else keywords['name'] - if (isinstance(name, str) and - name.lower() in ('md4', 'md5', 'sha', 'sha1')): + name = args[0] if args else keywords["name"] + if isinstance(name, str) and name.lower() in ( + "md4", + "md5", + "sha", + "sha1", + ): return bandit.Issue( severity=bandit.MEDIUM, confidence=bandit.HIGH, diff --git a/bandit/plugins/injection_paramiko.py b/bandit/plugins/injection_paramiko.py index 4d26804b9..270f46b26 100644 --- a/bandit/plugins/injection_paramiko.py +++ b/bandit/plugins/injection_paramiko.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" ============================================== B601: Test for shell injection within Paramiko @@ -37,19 +35,22 @@ .. versionadded:: 0.12.0 """ - import bandit from bandit.core import test_properties as test -@test.checks('Call') -@test.test_id('B601') +@test.checks("Call") +@test.test_id("B601") def paramiko_calls(context): - issue_text = ('Possible shell injection via Paramiko call, check inputs ' - 'are properly sanitized.') - for module in ['paramiko']: + issue_text = ( + "Possible shell injection via Paramiko call, check inputs " + "are properly sanitized." + ) + for module in ["paramiko"]: if context.is_module_imported_like(module): - if context.call_function_name in ['exec_command']: - return bandit.Issue(severity=bandit.MEDIUM, - confidence=bandit.MEDIUM, - text=issue_text) + if context.call_function_name in ["exec_command"]: + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.MEDIUM, + text=issue_text, + ) diff --git a/bandit/plugins/injection_shell.py b/bandit/plugins/injection_shell.py index a99cfd700..62d14e99b 100644 --- a/bandit/plugins/injection_shell.py +++ b/bandit/plugins/injection_shell.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import ast import re @@ -12,7 +10,7 @@ # yuck, regex: starts with a windows drive letter (eg C:) # or one of our path delimeter characters (/, \, .) -full_path_match = re.compile(r'^(?:[A-Za-z](?=\:)|[\\\/\.])') +full_path_match = re.compile(r"^(?:[A-Za-z](?=\:)|[\\\/\.])") def _evaluate_shell_call(context): @@ -25,61 +23,62 @@ def _evaluate_shell_call(context): def gen_config(name): - if name == 'shell_injection': + if name == "shell_injection": return { # Start a process using the subprocess module, or one of its # wrappers. - 'subprocess': - ['subprocess.Popen', - 'subprocess.call', - 'subprocess.check_call', - 'subprocess.check_output', - 'subprocess.run'], - + "subprocess": [ + "subprocess.Popen", + "subprocess.call", + "subprocess.check_call", + "subprocess.check_output", + "subprocess.run", + ], # Start a process with a function vulnerable to shell injection. - 'shell': - ['os.system', - 'os.popen', - 'os.popen2', - 'os.popen3', - 'os.popen4', - 'popen2.popen2', - 'popen2.popen3', - 'popen2.popen4', - 'popen2.Popen3', - 'popen2.Popen4', - 'commands.getoutput', - 'commands.getstatusoutput'], - + "shell": [ + "os.system", + "os.popen", + "os.popen2", + "os.popen3", + "os.popen4", + "popen2.popen2", + "popen2.popen3", + "popen2.popen4", + "popen2.Popen3", + "popen2.Popen4", + "commands.getoutput", + "commands.getstatusoutput", + ], # Start a process with a function that is not vulnerable to shell # injection. - 'no_shell': - ['os.execl', - 'os.execle', - 'os.execlp', - 'os.execlpe', - 'os.execv', - 'os.execve', - 'os.execvp', - 'os.execvpe', - 'os.spawnl', - 'os.spawnle', - 'os.spawnlp', - 'os.spawnlpe', - 'os.spawnv', - 'os.spawnve', - 'os.spawnvp', - 'os.spawnvpe', - 'os.startfile'] - } + "no_shell": [ + "os.execl", + "os.execle", + "os.execlp", + "os.execlpe", + "os.execv", + "os.execve", + "os.execvp", + "os.execvpe", + "os.spawnl", + "os.spawnle", + "os.spawnlp", + "os.spawnlpe", + "os.spawnv", + "os.spawnve", + "os.spawnvp", + "os.spawnvpe", + "os.startfile", + ], + } def has_shell(context): keywords = context.node.keywords result = False - if 'shell' in context.call_keywords: + if "shell" in context.call_keywords: for key in keywords: - if key.arg == 'shell': + if key.arg == "shell": val = key.value if isinstance(val, ast.Num): result = bool(val.n) @@ -87,7 +86,7 @@ def has_shell(context): result = bool(val.elts) elif isinstance(val, ast.Dict): result = bool(val.keys) - elif isinstance(val, ast.Name) and val.id in ['False', 'None']: + elif isinstance(val, ast.Name) and val.id in ["False", "None"]: result = False elif isinstance(val, ast.NameConstant): result = val.value @@ -96,9 +95,9 @@ def has_shell(context): return result -@test.takes_config('shell_injection') -@test.checks('Call') -@test.test_id('B602') +@test.takes_config("shell_injection") +@test.checks("Call") +@test.test_id("B602") def subprocess_popen_with_shell_equals_true(context, config): """**B602: Test for use of popen with shell equals true** @@ -190,7 +189,7 @@ def subprocess_popen_with_shell_equals_true(context, config): .. versionadded:: 0.9.0 """ # noqa: E501 - if config and context.call_function_name_qual in config['subprocess']: + if config and context.call_function_name_qual in config["subprocess"]: if has_shell(context): if len(context.call_args) > 0: sev = _evaluate_shell_call(context) @@ -198,24 +197,24 @@ def subprocess_popen_with_shell_equals_true(context, config): return bandit.Issue( severity=bandit.LOW, confidence=bandit.HIGH, - text='subprocess call with shell=True seems safe, but ' - 'may be changed in the future, consider ' - 'rewriting without shell', - lineno=context.get_lineno_for_call_arg('shell'), + text="subprocess call with shell=True seems safe, but " + "may be changed in the future, consider " + "rewriting without shell", + lineno=context.get_lineno_for_call_arg("shell"), ) else: return bandit.Issue( severity=bandit.HIGH, confidence=bandit.HIGH, - text='subprocess call with shell=True identified, ' - 'security issue.', - lineno=context.get_lineno_for_call_arg('shell'), + text="subprocess call with shell=True identified, " + "security issue.", + lineno=context.get_lineno_for_call_arg("shell"), ) -@test.takes_config('shell_injection') -@test.checks('Call') -@test.test_id('B603') +@test.takes_config("shell_injection") +@test.checks("Call") +@test.test_id("B603") def subprocess_without_shell_equals_true(context, config): """**B603: Test for use of subprocess without shell equals true** @@ -281,20 +280,20 @@ def subprocess_without_shell_equals_true(context, config): .. versionadded:: 0.9.0 """ # noqa: E501 - if config and context.call_function_name_qual in config['subprocess']: + if config and context.call_function_name_qual in config["subprocess"]: if not has_shell(context): return bandit.Issue( severity=bandit.LOW, confidence=bandit.HIGH, - text='subprocess call - check for execution of untrusted ' - 'input.', - lineno=context.get_lineno_for_call_arg('shell'), + text="subprocess call - check for execution of untrusted " + "input.", + lineno=context.get_lineno_for_call_arg("shell"), ) -@test.takes_config('shell_injection') -@test.checks('Call') -@test.test_id('B604') +@test.takes_config("shell_injection") +@test.checks("Call") +@test.test_id("B604") def any_other_function_with_shell_equals_true(context, config): """**B604: Test for any function with shell equals true** @@ -359,20 +358,20 @@ def any_other_function_with_shell_equals_true(context, config): .. versionadded:: 0.9.0 """ # noqa: E501 - if config and context.call_function_name_qual not in config['subprocess']: + if config and context.call_function_name_qual not in config["subprocess"]: if has_shell(context): return bandit.Issue( severity=bandit.MEDIUM, confidence=bandit.LOW, - text='Function call with shell=True parameter identified, ' - 'possible security issue.', - lineno=context.get_lineno_for_call_arg('shell'), - ) + text="Function call with shell=True parameter identified, " + "possible security issue.", + lineno=context.get_lineno_for_call_arg("shell"), + ) -@test.takes_config('shell_injection') -@test.checks('Call') -@test.test_id('B605') +@test.takes_config("shell_injection") +@test.checks("Call") +@test.test_id("B605") def start_process_with_a_shell(context, config): """**B605: Test for starting a process with a shell** @@ -443,29 +442,29 @@ def start_process_with_a_shell(context, config): .. versionadded:: 0.10.0 """ # noqa: E501 - if config and context.call_function_name_qual in config['shell']: + if config and context.call_function_name_qual in config["shell"]: if len(context.call_args) > 0: sev = _evaluate_shell_call(context) if sev == bandit.LOW: return bandit.Issue( severity=bandit.LOW, confidence=bandit.HIGH, - text='Starting a process with a shell: ' - 'Seems safe, but may be changed in the future, ' - 'consider rewriting without shell' + text="Starting a process with a shell: " + "Seems safe, but may be changed in the future, " + "consider rewriting without shell", ) else: return bandit.Issue( severity=bandit.HIGH, confidence=bandit.HIGH, - text='Starting a process with a shell, possible injection' - ' detected, security issue.' + text="Starting a process with a shell, possible injection" + " detected, security issue.", ) -@test.takes_config('shell_injection') -@test.checks('Call') -@test.test_id('B606') +@test.takes_config("shell_injection") +@test.checks("Call") +@test.test_id("B606") def start_process_with_no_shell(context, config): """**B606: Test for starting a process with no shell** @@ -542,17 +541,17 @@ def start_process_with_no_shell(context, config): .. versionadded:: 0.10.0 """ # noqa: E501 - if config and context.call_function_name_qual in config['no_shell']: + if config and context.call_function_name_qual in config["no_shell"]: return bandit.Issue( severity=bandit.LOW, confidence=bandit.MEDIUM, - text='Starting a process without a shell.' + text="Starting a process without a shell.", ) -@test.takes_config('shell_injection') -@test.checks('Call') -@test.test_id('B607') +@test.takes_config("shell_injection") +@test.checks("Call") +@test.test_id("B607") def start_process_with_partial_path(context, config): """**B607: Test for starting a process with a partial path** @@ -627,9 +626,11 @@ def start_process_with_partial_path(context, config): """ if config and len(context.call_args): - if(context.call_function_name_qual in config['subprocess'] or - context.call_function_name_qual in config['shell'] or - context.call_function_name_qual in config['no_shell']): + if ( + context.call_function_name_qual in config["subprocess"] + or context.call_function_name_qual in config["shell"] + or context.call_function_name_qual in config["no_shell"] + ): node = context.node.args[0] # some calls take an arg list, check the first part @@ -641,5 +642,5 @@ def start_process_with_partial_path(context, config): return bandit.Issue( severity=bandit.LOW, confidence=bandit.HIGH, - text='Starting a process with a partial executable path' + text="Starting a process with a partial executable path", ) diff --git a/bandit/plugins/injection_sql.py b/bandit/plugins/injection_sql.py index 582ec1763..7f7b4ccf5 100644 --- a/bandit/plugins/injection_sql.py +++ b/bandit/plugins/injection_sql.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" ============================ B608: Test for SQL injection @@ -50,7 +48,6 @@ .. versionadded:: 0.9.0 """ # noqa: E501 - import ast import re @@ -59,10 +56,10 @@ from bandit.core import utils SIMPLE_SQL_RE = re.compile( - r'(select\s.*from\s|' - r'delete\s+from\s|' - r'insert\s+into\s.*values\s|' - r'update\s.*set\s)', + r"(select\s.*from\s|" + r"delete\s+from\s|" + r"insert\s+into\s.*values\s|" + r"update\s.*set\s)", re.IGNORECASE | re.DOTALL, ) @@ -73,32 +70,35 @@ def _check_string(data): def _evaluate_ast(node): wrapper = None - statement = '' + statement = "" if isinstance(node._bandit_parent, ast.BinOp): out = utils.concat_string(node, node._bandit_parent) wrapper = out[0]._bandit_parent statement = out[1] - elif (isinstance(node._bandit_parent, ast.Attribute) - and node._bandit_parent.attr == 'format'): + elif ( + isinstance(node._bandit_parent, ast.Attribute) + and node._bandit_parent.attr == "format" + ): statement = node.s # Hierarchy for "".format() is Wrapper -> Call -> Attribute -> Str wrapper = node._bandit_parent._bandit_parent._bandit_parent - elif (hasattr(ast, 'JoinedStr') - and isinstance(node._bandit_parent, ast.JoinedStr)): + elif hasattr(ast, "JoinedStr") and isinstance( + node._bandit_parent, ast.JoinedStr + ): statement = node.s wrapper = node._bandit_parent._bandit_parent if isinstance(wrapper, ast.Call): # wrapped in "execute" call? - names = ['execute', 'executemany'] + names = ["execute", "executemany"] name = utils.get_called_name(wrapper) return (name in names, statement) else: return (False, statement) -@test.checks('Str') -@test.test_id('B608') +@test.checks("Str") +@test.test_id("B608") def hardcoded_sql_expressions(context): val = _evaluate_ast(context.node) if _check_string(val[1]): @@ -106,5 +106,5 @@ def hardcoded_sql_expressions(context): severity=bandit.MEDIUM, confidence=bandit.MEDIUM if val[0] else bandit.LOW, text="Possible SQL injection vector through string-based " - "query construction." + "query construction.", ) diff --git a/bandit/plugins/injection_wildcard.py b/bandit/plugins/injection_wildcard.py index 0988129dc..0da922c48 100644 --- a/bandit/plugins/injection_wildcard.py +++ b/bandit/plugins/injection_wildcard.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" ======================================== B609: Test for use of wildcard injection @@ -95,7 +93,6 @@ .. versionadded:: 0.9.0 """ - import bandit from bandit.core import test_properties as test from bandit.plugins import injection_shell # NOTE(tkelsey): shared config @@ -104,36 +101,37 @@ gen_config = injection_shell.gen_config -@test.takes_config('shell_injection') -@test.checks('Call') -@test.test_id('B609') +@test.takes_config("shell_injection") +@test.checks("Call") +@test.test_id("B609") def linux_commands_wildcard_injection(context, config): - if not ('shell' in config and 'subprocess' in config): + if not ("shell" in config and "subprocess" in config): return - vulnerable_funcs = ['chown', 'chmod', 'tar', 'rsync'] - if context.call_function_name_qual in config['shell'] or ( - context.call_function_name_qual in config['subprocess'] and - context.check_call_arg_value('shell', 'True')): + vulnerable_funcs = ["chown", "chmod", "tar", "rsync"] + if context.call_function_name_qual in config["shell"] or ( + context.call_function_name_qual in config["subprocess"] + and context.check_call_arg_value("shell", "True") + ): if context.call_args_count >= 1: call_argument = context.get_call_arg_at_position(0) - argument_string = '' + argument_string = "" if isinstance(call_argument, list): for li in call_argument: - argument_string = argument_string + ' %s' % li + argument_string = argument_string + " %s" % li elif isinstance(call_argument, str): argument_string = call_argument - if argument_string != '': + if argument_string != "": for vulnerable_func in vulnerable_funcs: - if( - vulnerable_func in argument_string and - '*' in argument_string + if ( + vulnerable_func in argument_string + and "*" in argument_string ): return bandit.Issue( severity=bandit.HIGH, confidence=bandit.MEDIUM, - text="Possible wildcard injection in call: %s" % - context.call_function_name_qual, - lineno=context.get_lineno_for_call_arg('shell'), + text="Possible wildcard injection in call: %s" + % context.call_function_name_qual, + lineno=context.get_lineno_for_call_arg("shell"), ) diff --git a/bandit/plugins/insecure_ssl_tls.py b/bandit/plugins/insecure_ssl_tls.py index 24e592880..c2d750839 100644 --- a/bandit/plugins/insecure_ssl_tls.py +++ b/bandit/plugins/insecure_ssl_tls.py @@ -1,32 +1,33 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import bandit from bandit.core import test_properties as test def get_bad_proto_versions(config): - return config['bad_protocol_versions'] + return config["bad_protocol_versions"] def gen_config(name): - if name == 'ssl_with_bad_version': - return {'bad_protocol_versions': - ['PROTOCOL_SSLv2', - 'SSLv2_METHOD', - 'SSLv23_METHOD', - 'PROTOCOL_SSLv3', # strict option - 'PROTOCOL_TLSv1', # strict option - 'SSLv3_METHOD', # strict option - 'TLSv1_METHOD']} # strict option + if name == "ssl_with_bad_version": + return { + "bad_protocol_versions": [ + "PROTOCOL_SSLv2", + "SSLv2_METHOD", + "SSLv23_METHOD", + "PROTOCOL_SSLv3", # strict option + "PROTOCOL_TLSv1", # strict option + "SSLv3_METHOD", # strict option + "TLSv1_METHOD", + ] + } # strict option @test.takes_config -@test.checks('Call') -@test.test_id('B502') +@test.checks("Call") +@test.test_id("B502") def ssl_with_bad_version(context, config): """**B502: Test for SSL use with bad version used** @@ -100,43 +101,47 @@ def ssl_with_bad_version(context, config): .. versionadded:: 0.9.0 """ bad_ssl_versions = get_bad_proto_versions(config) - if context.call_function_name_qual == 'ssl.wrap_socket': - if context.check_call_arg_value('ssl_version', bad_ssl_versions): + if context.call_function_name_qual == "ssl.wrap_socket": + if context.check_call_arg_value("ssl_version", bad_ssl_versions): return bandit.Issue( severity=bandit.HIGH, confidence=bandit.HIGH, text="ssl.wrap_socket call with insecure SSL/TLS protocol " - "version identified, security issue.", - lineno=context.get_lineno_for_call_arg('ssl_version'), + "version identified, security issue.", + lineno=context.get_lineno_for_call_arg("ssl_version"), ) - elif context.call_function_name_qual == 'pyOpenSSL.SSL.Context': - if context.check_call_arg_value('method', bad_ssl_versions): + elif context.call_function_name_qual == "pyOpenSSL.SSL.Context": + if context.check_call_arg_value("method", bad_ssl_versions): return bandit.Issue( severity=bandit.HIGH, confidence=bandit.HIGH, text="SSL.Context call with insecure SSL/TLS protocol " - "version identified, security issue.", - lineno=context.get_lineno_for_call_arg('method'), + "version identified, security issue.", + lineno=context.get_lineno_for_call_arg("method"), ) - elif (context.call_function_name_qual != 'ssl.wrap_socket' and - context.call_function_name_qual != 'pyOpenSSL.SSL.Context'): - if (context.check_call_arg_value('method', bad_ssl_versions) or - context.check_call_arg_value('ssl_version', bad_ssl_versions)): - lineno = (context.get_lineno_for_call_arg('method') or - context.get_lineno_for_call_arg('ssl_version')) + elif ( + context.call_function_name_qual != "ssl.wrap_socket" + and context.call_function_name_qual != "pyOpenSSL.SSL.Context" + ): + if context.check_call_arg_value( + "method", bad_ssl_versions + ) or context.check_call_arg_value("ssl_version", bad_ssl_versions): + lineno = context.get_lineno_for_call_arg( + "method" + ) or context.get_lineno_for_call_arg("ssl_version") return bandit.Issue( severity=bandit.MEDIUM, confidence=bandit.MEDIUM, text="Function call with insecure SSL/TLS protocol " - "identified, possible security issue.", + "identified, possible security issue.", lineno=lineno, ) @test.takes_config("ssl_with_bad_version") -@test.checks('FunctionDef') -@test.test_id('B503') +@test.checks("FunctionDef") +@test.test_id("B503") def ssl_with_bad_defaults(context, config): """**B503: Test for SSL use with bad defaults specified** @@ -186,13 +191,13 @@ def ssl_with_bad_defaults(context, config): severity=bandit.MEDIUM, confidence=bandit.MEDIUM, text="Function definition identified with insecure SSL/TLS " - "protocol version by default, possible security " - "issue." + "protocol version by default, possible security " + "issue.", ) -@test.checks('Call') -@test.test_id('B504') +@test.checks("Call") +@test.test_id("B504") def ssl_with_no_version(context): """**B504: Test for SSL use with no version specified** @@ -234,8 +239,8 @@ def ssl_with_no_version(context): .. versionadded:: 0.9.0 """ - if context.call_function_name_qual == 'ssl.wrap_socket': - if context.check_call_arg_value('ssl_version') is None: + if context.call_function_name_qual == "ssl.wrap_socket": + if context.check_call_arg_value("ssl_version") is None: # check_call_arg_value() returns False if the argument is found # but does not match the supplied value (or the default None). # It returns None if the arg_name passed doesn't exist. This @@ -244,7 +249,7 @@ def ssl_with_no_version(context): severity=bandit.LOW, confidence=bandit.MEDIUM, text="ssl.wrap_socket call with no SSL/TLS protocol version " - "specified, the default SSLv23 could be insecure, " - "possible security issue.", - lineno=context.get_lineno_for_call_arg('ssl_version'), + "specified, the default SSLv23 could be insecure, " + "possible security issue.", + lineno=context.get_lineno_for_call_arg("ssl_version"), ) diff --git a/bandit/plugins/jinja2_templates.py b/bandit/plugins/jinja2_templates.py index 5178cfe6b..d79600e9e 100644 --- a/bandit/plugins/jinja2_templates.py +++ b/bandit/plugins/jinja2_templates.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" ========================================== B701: Test for not auto escaping in jinja2 @@ -59,55 +57,60 @@ .. versionadded:: 0.10.0 """ - import ast import bandit from bandit.core import test_properties as test -@test.checks('Call') -@test.test_id('B701') +@test.checks("Call") +@test.test_id("B701") def jinja2_autoescape_false(context): # check type just to be safe if isinstance(context.call_function_name_qual, str): - qualname_list = context.call_function_name_qual.split('.') + qualname_list = context.call_function_name_qual.split(".") func = qualname_list[-1] - if 'jinja2' in qualname_list and func == 'Environment': + if "jinja2" in qualname_list and func == "Environment": for node in ast.walk(context.node): if isinstance(node, ast.keyword): # definite autoescape = False - if (getattr(node, 'arg', None) == 'autoescape' and - (getattr(node.value, 'id', None) == 'False' or - getattr(node.value, 'value', None) is False)): + if getattr(node, "arg", None) == "autoescape" and ( + getattr(node.value, "id", None) == "False" + or getattr(node.value, "value", None) is False + ): return bandit.Issue( severity=bandit.HIGH, confidence=bandit.HIGH, text="Using jinja2 templates with autoescape=" - "False is dangerous and can lead to XSS. " - "Use autoescape=True or use the " - "select_autoescape function to mitigate XSS " - "vulnerabilities." + "False is dangerous and can lead to XSS. " + "Use autoescape=True or use the " + "select_autoescape function to mitigate XSS " + "vulnerabilities.", ) # found autoescape - if getattr(node, 'arg', None) == 'autoescape': - value = getattr(node, 'value', None) - if (getattr(value, 'id', None) == 'True' or - getattr(value, 'value', None) is True): + if getattr(node, "arg", None) == "autoescape": + value = getattr(node, "value", None) + if ( + getattr(value, "id", None) == "True" + or getattr(value, "value", None) is True + ): return # Check if select_autoescape function is used. - elif isinstance(value, ast.Call) and getattr( - value.func, 'id', None) == 'select_autoescape': + elif ( + isinstance(value, ast.Call) + and getattr(value.func, "id", None) + == "select_autoescape" + ): return else: return bandit.Issue( severity=bandit.HIGH, confidence=bandit.MEDIUM, text="Using jinja2 templates with autoescape=" - "False is dangerous and can lead to XSS. " - "Ensure autoescape=True or use the " - "select_autoescape function to mitigate " - "XSS vulnerabilities." + "False is dangerous and can lead to XSS. " + "Ensure autoescape=True or use the " + "select_autoescape function to mitigate " + "XSS vulnerabilities.", ) # We haven't found a keyword named autoescape, indicating default # behavior @@ -115,6 +118,6 @@ def jinja2_autoescape_false(context): severity=bandit.HIGH, confidence=bandit.HIGH, text="By default, jinja2 sets autoescape to False. Consider " - "using autoescape=True or use the select_autoescape " - "function to mitigate XSS vulnerabilities." + "using autoescape=True or use the select_autoescape " + "function to mitigate XSS vulnerabilities.", ) diff --git a/bandit/plugins/mako_templates.py b/bandit/plugins/mako_templates.py index 8a7491338..e25b10a13 100644 --- a/bandit/plugins/mako_templates.py +++ b/bandit/plugins/mako_templates.py @@ -1,7 +1,5 @@ -# -*- coding:utf-8 -*- # # SPDX-License-Identifier: Apache-2.0 - r""" ==================================== B702: Test for use of mako templates @@ -39,27 +37,26 @@ .. versionadded:: 0.10.0 """ - import bandit from bandit.core import test_properties as test -@test.checks('Call') -@test.test_id('B702') +@test.checks("Call") +@test.test_id("B702") def use_of_mako_templates(context): # check type just to be safe if isinstance(context.call_function_name_qual, str): - qualname_list = context.call_function_name_qual.split('.') + qualname_list = context.call_function_name_qual.split(".") func = qualname_list[-1] - if 'mako' in qualname_list and func == 'Template': + if "mako" in qualname_list and func == "Template": # unlike Jinja2, mako does not have a template wide autoescape # feature and thus each variable must be carefully sanitized. return bandit.Issue( severity=bandit.MEDIUM, confidence=bandit.HIGH, text="Mako templates allow HTML/JS rendering by default and " - "are inherently open to XSS attacks. Ensure variables " - "in all templates are properly sanitized via the 'n', " - "'h' or 'x' flags (depending on context). For example, " - "to HTML escape the variable 'data' do ${ data |h }." + "are inherently open to XSS attacks. Ensure variables " + "in all templates are properly sanitized via the 'n', " + "'h' or 'x' flags (depending on context). For example, " + "to HTML escape the variable 'data' do ${ data |h }.", ) diff --git a/bandit/plugins/ssh_no_host_key_verification.py b/bandit/plugins/ssh_no_host_key_verification.py index c491c538b..88be94fe2 100644 --- a/bandit/plugins/ssh_no_host_key_verification.py +++ b/bandit/plugins/ssh_no_host_key_verification.py @@ -1,7 +1,6 @@ # Copyright (c) 2018 VMware, Inc. # # SPDX-License-Identifier: Apache-2.0 - r""" ========================================== B507: Test for missing host key validation @@ -32,24 +31,28 @@ .. versionadded:: 1.5.1 """ - import bandit from bandit.core import test_properties as test -@test.checks('Call') -@test.test_id('B507') +@test.checks("Call") +@test.test_id("B507") def ssh_no_host_key_verification(context): - if (context.is_module_imported_like('paramiko') and - context.call_function_name == 'set_missing_host_key_policy'): - if (context.call_args and - context.call_args[0] in ['AutoAddPolicy', 'WarningPolicy']): + if ( + context.is_module_imported_like("paramiko") + and context.call_function_name == "set_missing_host_key_policy" + ): + if context.call_args and context.call_args[0] in [ + "AutoAddPolicy", + "WarningPolicy", + ]: issue = bandit.Issue( severity=bandit.HIGH, confidence=bandit.MEDIUM, - text='Paramiko call with policy set to automatically trust ' - 'the unknown host key.', + text="Paramiko call with policy set to automatically trust " + "the unknown host key.", lineno=context.get_lineno_for_call_arg( - 'set_missing_host_key_policy'), + "set_missing_host_key_policy" + ), ) return issue diff --git a/bandit/plugins/try_except_continue.py b/bandit/plugins/try_except_continue.py index 264a23338..170f53c72 100644 --- a/bandit/plugins/try_except_continue.py +++ b/bandit/plugins/try_except_continue.py @@ -2,7 +2,6 @@ # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" ============================================= B112: Test for a continue in the except block @@ -70,7 +69,6 @@ class (or no type). To accommodate this, the test may be configured to ignore .. versionadded:: 1.0.0 """ - import ast import bandit @@ -78,23 +76,26 @@ class (or no type). To accommodate this, the test may be configured to ignore def gen_config(name): - if name == 'try_except_continue': - return {'check_typed_exception': False} + if name == "try_except_continue": + return {"check_typed_exception": False} @test.takes_config -@test.checks('ExceptHandler') -@test.test_id('B112') +@test.checks("ExceptHandler") +@test.test_id("B112") def try_except_continue(context, config): node = context.node if len(node.body) == 1: - if (not config['check_typed_exception'] and - node.type is not None and - getattr(node.type, 'id', None) != 'Exception'): + if ( + not config["check_typed_exception"] + and node.type is not None + and getattr(node.type, "id", None) != "Exception" + ): return if isinstance(node.body[0], ast.Continue): return bandit.Issue( severity=bandit.LOW, confidence=bandit.HIGH, - text=("Try, Except, Continue detected.")) + text=("Try, Except, Continue detected."), + ) diff --git a/bandit/plugins/try_except_pass.py b/bandit/plugins/try_except_pass.py index ae107ca2d..5aae7986d 100644 --- a/bandit/plugins/try_except_pass.py +++ b/bandit/plugins/try_except_pass.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - r""" ========================================= B110: Test for a pass in the except block @@ -69,7 +67,6 @@ class (or no type). To accommodate this, the test may be configured to ignore .. versionadded:: 0.13.0 """ - import ast import bandit @@ -77,24 +74,26 @@ class (or no type). To accommodate this, the test may be configured to ignore def gen_config(name): - if name == 'try_except_pass': - return {'check_typed_exception': False} + if name == "try_except_pass": + return {"check_typed_exception": False} @test.takes_config -@test.checks('ExceptHandler') -@test.test_id('B110') +@test.checks("ExceptHandler") +@test.test_id("B110") def try_except_pass(context, config): node = context.node if len(node.body) == 1: - if (not config['check_typed_exception'] and - node.type is not None and - getattr(node.type, 'id', None) != 'Exception'): + if ( + not config["check_typed_exception"] + and node.type is not None + and getattr(node.type, "id", None) != "Exception" + ): return if isinstance(node.body[0], ast.Pass): return bandit.Issue( severity=bandit.LOW, confidence=bandit.HIGH, - text=("Try, Except, Pass detected.") + text=("Try, Except, Pass detected."), ) diff --git a/bandit/plugins/weak_cryptographic_key.py b/bandit/plugins/weak_cryptographic_key.py index f529273a9..aafc1e45c 100644 --- a/bandit/plugins/weak_cryptographic_key.py +++ b/bandit/plugins/weak_cryptographic_key.py @@ -1,7 +1,6 @@ # Copyright (c) 2015 VMware, Inc. # # SPDX-License-Identifier: Apache-2.0 - r""" ========================================= B505: Test for weak cryptographic key use @@ -35,20 +34,19 @@ .. versionadded:: 0.14.0 """ - import bandit from bandit.core import test_properties as test def gen_config(name): - if name == 'weak_cryptographic_key': + if name == "weak_cryptographic_key": return { - 'weak_key_size_dsa_high': 1024, - 'weak_key_size_dsa_medium': 2048, - 'weak_key_size_rsa_high': 1024, - 'weak_key_size_rsa_medium': 2048, - 'weak_key_size_ec_high': 160, - 'weak_key_size_ec_medium': 224, + "weak_key_size_dsa_high": 1024, + "weak_key_size_dsa_medium": 2048, + "weak_key_size_rsa_high": 1024, + "weak_key_size_rsa_medium": 2048, + "weak_key_size_ec_high": 160, + "weak_key_size_ec_medium": 224, } @@ -58,12 +56,18 @@ def _classify_key_size(config, key_type, key_size): return key_sizes = { - 'DSA': [(config['weak_key_size_dsa_high'], bandit.HIGH), - (config['weak_key_size_dsa_medium'], bandit.MEDIUM)], - 'RSA': [(config['weak_key_size_rsa_high'], bandit.HIGH), - (config['weak_key_size_rsa_medium'], bandit.MEDIUM)], - 'EC': [(config['weak_key_size_ec_high'], bandit.HIGH), - (config['weak_key_size_ec_medium'], bandit.MEDIUM)], + "DSA": [ + (config["weak_key_size_dsa_high"], bandit.HIGH), + (config["weak_key_size_dsa_medium"], bandit.MEDIUM), + ], + "RSA": [ + (config["weak_key_size_rsa_high"], bandit.HIGH), + (config["weak_key_size_rsa_medium"], bandit.MEDIUM), + ], + "EC": [ + (config["weak_key_size_ec_high"], bandit.HIGH), + (config["weak_key_size_ec_medium"], bandit.MEDIUM), + ], } for size, level in key_sizes[key_type]: @@ -71,60 +75,68 @@ def _classify_key_size(config, key_type, key_size): return bandit.Issue( severity=level, confidence=bandit.HIGH, - text='%s key sizes below %d bits are considered breakable. ' % - (key_type, size)) + text="%s key sizes below %d bits are considered breakable. " + % (key_type, size), + ) def _weak_crypto_key_size_cryptography_io(context, config): func_key_type = { - 'cryptography.hazmat.primitives.asymmetric.dsa.' - 'generate_private_key': 'DSA', - 'cryptography.hazmat.primitives.asymmetric.rsa.' - 'generate_private_key': 'RSA', - 'cryptography.hazmat.primitives.asymmetric.ec.' - 'generate_private_key': 'EC', + "cryptography.hazmat.primitives.asymmetric.dsa." + "generate_private_key": "DSA", + "cryptography.hazmat.primitives.asymmetric.rsa." + "generate_private_key": "RSA", + "cryptography.hazmat.primitives.asymmetric.ec." + "generate_private_key": "EC", } arg_position = { - 'DSA': 0, - 'RSA': 1, - 'EC': 0, + "DSA": 0, + "RSA": 1, + "EC": 0, } key_type = func_key_type.get(context.call_function_name_qual) - if key_type in ['DSA', 'RSA']: - key_size = (context.get_call_arg_value('key_size') or - context.get_call_arg_at_position(arg_position[key_type]) or - 2048) + if key_type in ["DSA", "RSA"]: + key_size = ( + context.get_call_arg_value("key_size") + or context.get_call_arg_at_position(arg_position[key_type]) + or 2048 + ) return _classify_key_size(config, key_type, key_size) - elif key_type == 'EC': + elif key_type == "EC": curve_key_sizes = { - 'SECP192R1': 192, - 'SECT163K1': 163, - 'SECT163R2': 163, + "SECP192R1": 192, + "SECT163K1": 163, + "SECT163R2": 163, } - curve = (context.get_call_arg_value('curve') or - context.call_args[arg_position[key_type]]) + curve = ( + context.get_call_arg_value("curve") + or context.call_args[arg_position[key_type]] + ) key_size = curve_key_sizes[curve] if curve in curve_key_sizes else 224 return _classify_key_size(config, key_type, key_size) def _weak_crypto_key_size_pycrypto(context, config): func_key_type = { - 'Crypto.PublicKey.DSA.generate': 'DSA', - 'Crypto.PublicKey.RSA.generate': 'RSA', - 'Cryptodome.PublicKey.DSA.generate': 'DSA', - 'Cryptodome.PublicKey.RSA.generate': 'RSA', + "Crypto.PublicKey.DSA.generate": "DSA", + "Crypto.PublicKey.RSA.generate": "RSA", + "Cryptodome.PublicKey.DSA.generate": "DSA", + "Cryptodome.PublicKey.RSA.generate": "RSA", } key_type = func_key_type.get(context.call_function_name_qual) if key_type: - key_size = (context.get_call_arg_value('bits') or - context.get_call_arg_at_position(0) or - 2048) + key_size = ( + context.get_call_arg_value("bits") + or context.get_call_arg_at_position(0) + or 2048 + ) return _classify_key_size(config, key_type, key_size) @test.takes_config -@test.checks('Call') -@test.test_id('B505') +@test.checks("Call") +@test.test_id("B505") def weak_cryptographic_key(context, config): - return (_weak_crypto_key_size_cryptography_io(context, config) or - _weak_crypto_key_size_pycrypto(context, config)) + return _weak_crypto_key_size_cryptography_io( + context, config + ) or _weak_crypto_key_size_pycrypto(context, config) diff --git a/bandit/plugins/yaml_load.py b/bandit/plugins/yaml_load.py index 30f975a34..2077790f5 100644 --- a/bandit/plugins/yaml_load.py +++ b/bandit/plugins/yaml_load.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright (c) 2016 Rackspace, Inc. # # SPDX-License-Identifier: Apache-2.0 - r""" =============================== B506: Test for use of yaml load @@ -37,31 +35,32 @@ .. versionadded:: 1.0.0 """ - import bandit from bandit.core import test_properties as test -@test.test_id('B506') -@test.checks('Call') +@test.test_id("B506") +@test.checks("Call") def yaml_load(context): - imported = context.is_module_imported_exact('yaml') + imported = context.is_module_imported_exact("yaml") qualname = context.call_function_name_qual if not imported and isinstance(qualname, str): return - qualname_list = qualname.split('.') + qualname_list = qualname.split(".") func = qualname_list[-1] - if all([ - 'yaml' in qualname_list, - func == 'load', - not context.check_call_arg_value('Loader', 'SafeLoader'), - not context.check_call_arg_value('Loader', 'CSafeLoader'), - ]): + if all( + [ + "yaml" in qualname_list, + func == "load", + not context.check_call_arg_value("Loader", "SafeLoader"), + not context.check_call_arg_value("Loader", "CSafeLoader"), + ] + ): return bandit.Issue( severity=bandit.MEDIUM, confidence=bandit.HIGH, text="Use of unsafe yaml load. Allows instantiation of" - " arbitrary objects. Consider yaml.safe_load().", + " arbitrary objects. Consider yaml.safe_load().", lineno=context.node.lineno, ) diff --git a/doc/source/conf.py b/doc/source/conf.py index a64ad04b7..52d490886 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -1,30 +1,32 @@ -# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 - import os import sys -sys.path.insert(0, os.path.abspath('../..')) +sys.path.insert(0, os.path.abspath("../..")) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', - 'sphinx.ext.coverage', 'sphinx.ext.viewcode'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.coverage", + "sphinx.ext.viewcode", +] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'Bandit' -copyright = u'2016, Bandit Developers' +project = "Bandit" +copyright = "2016, Bandit Developers" # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True @@ -34,18 +36,23 @@ add_module_names = True # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" -modindex_common_prefix = ['bandit.'] +modindex_common_prefix = ["bandit."] - #-- Options for man page output -------------------------------------------- +# -- Options for man page output -------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ - ('man/bandit', 'bandit', u'Python source code security analyzer', - [u'PyCQA'], 1) + ( + "man/bandit", + "bandit", + "Python source code security analyzer", + ["PyCQA"], + 1, + ) ] # -- Options for HTML output -------------------------------------------------- @@ -53,21 +60,24 @@ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # html_static_path = ['static'] html_theme_options = {} # Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project +htmlhelp_basename = "%sdoc" % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ - ('index', - '%s.tex' % project, - u'%s Documentation' % project, - u'PyCQA', 'manual'), + ( + "index", + "%s.tex" % project, + "%s Documentation" % project, + "PyCQA", + "manual", + ), ] # Example configuration for intersphinx: refer to the Python standard library. diff --git a/scripts/main.py b/scripts/main.py index 3fd9a077e..be7223f6d 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -1,12 +1,8 @@ #!/usr/bin/env python -# -*- coding:utf-8 -*- - # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - - from bandit import bandit -if __name__ == '__main__': +if __name__ == "__main__": bandit.main() diff --git a/setup.py b/setup.py index 0ca8587bd..f1df3c6d9 100644 --- a/setup.py +++ b/setup.py @@ -1,11 +1,9 @@ # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import setuptools setuptools.setup( - python_requires='>=3.5', - setup_requires=['pbr>=2.0.0'], - pbr=True) + python_requires=">=3.5", setup_requires=["pbr>=2.0.0"], pbr=True +) diff --git a/test-requirements.txt b/test-requirements.txt index 51868f6a1..b2e3c379f 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,7 +3,7 @@ # process, which may cause wedges in the gate later. coverage>=4.5.4 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD -hacking>=2.0.0 # Apache-2.0 +flake8>=4.0.0 # Apache-2.0 stestr>=2.5.0 # Apache-2.0 testscenarios>=0.5.0 # Apache-2.0/BSD testtools>=2.3.0 # MIT diff --git a/tests/functional/test_baseline.py b/tests/functional/test_baseline.py index fd8d831c3..8962c2366 100644 --- a/tests/functional/test_baseline.py +++ b/tests/functional/test_baseline.py @@ -1,7 +1,6 @@ # Copyright 2016 IBM Corp. # # SPDX-License-Identifier: Apache-2.0 - import os import shutil import subprocess @@ -28,23 +27,23 @@ class BaselineFunctionalTests(testtools.TestCase): - '''Functional tests for Bandit baseline. + """Functional tests for Bandit baseline. This set of tests is used to verify that the baseline comparison handles finding and comparing results appropriately. The only comparison is the number of candidates per file, meaning that any candidates found may already exist in the baseline. In this case, all candidates are flagged and a user will need to investigate the candidates related to that file. - ''' + """ def setUp(self): - super(BaselineFunctionalTests, self).setUp() - self.examples_path = 'examples' - self.baseline_commands = ['bandit', '-r'] + super().setUp() + self.examples_path = "examples" + self.baseline_commands = ["bandit", "-r"] self.baseline_report_file = "baseline_report.json" def _run_bandit_baseline(self, target_directory, baseline_file): - '''A helper method to run bandit baseline + """A helper method to run bandit baseline This method will run the bandit baseline test provided an existing baseline report and the target directory containing the content to be @@ -52,16 +51,20 @@ def _run_bandit_baseline(self, target_directory, baseline_file): :param target_directory: Directory containing content to be compared :param baseline_file: File containing an existing baseline report :return The baseline test results and return code - ''' - cmds = self.baseline_commands + ['-b', baseline_file, target_directory] - process = subprocess.Popen(cmds, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, close_fds=True) + """ + cmds = self.baseline_commands + ["-b", baseline_file, target_directory] + process = subprocess.Popen( + cmds, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + ) stdout, stderr = process.communicate() - return (stdout.decode('utf-8'), process.poll()) + return (stdout.decode("utf-8"), process.poll()) def _create_baseline(self, baseline_paired_files): - '''A helper method to create a baseline to use during baseline test + """A helper method to create a baseline to use during baseline test This method will run bandit to create an initial baseline that can then be used during the bandit baseline test. Since the file contents @@ -77,41 +80,59 @@ def _create_baseline(self, baseline_paired_files): :return The target directory for the baseline test and the return code of the bandit run to help determine whether the baseline report was populated - ''' + """ target_directory = self.useFixture(fixtures.TempDir()).path - baseline_results = os.path.join(target_directory, - self.baseline_report_file) + baseline_results = os.path.join( + target_directory, self.baseline_report_file + ) for key_file, value_file in baseline_paired_files.items(): - shutil.copy(os.path.join(self.examples_path, value_file), - os.path.join(target_directory, key_file)) - cmds = self.baseline_commands + ['-f', 'json', '-o', baseline_results, - target_directory] - process = subprocess.Popen(cmds, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, close_fds=True) + shutil.copy( + os.path.join(self.examples_path, value_file), + os.path.join(target_directory, key_file), + ) + cmds = self.baseline_commands + [ + "-f", + "json", + "-o", + baseline_results, + target_directory, + ] + process = subprocess.Popen( + cmds, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + ) stdout, stderr = process.communicate() return_code = process.poll() for key_file, value_file in baseline_paired_files.items(): - shutil.copy(os.path.join(self.examples_path, key_file), - os.path.join(target_directory, key_file)) + shutil.copy( + os.path.join(self.examples_path, key_file), + os.path.join(target_directory, key_file), + ) return (target_directory, return_code) def test_no_new_candidates(self): - '''Tests when there are no new candidates + """Tests when there are no new candidates Test that bandit returns no issues found, as there are no new candidates found compared with those found from the baseline. - ''' - baseline_report_files = {"new_candidates-all.py": - "new_candidates-all.py"} - target_directory, baseline_code = (self._create_baseline( - baseline_report_files)) + """ + baseline_report_files = { + "new_candidates-all.py": "new_candidates-all.py" + } + target_directory, baseline_code = self._create_baseline( + baseline_report_files + ) # assert the initial baseline found results self.assertEqual(1, baseline_code) - baseline_report = os.path.join(target_directory, - self.baseline_report_file) - return_value, return_code = (self._run_bandit_baseline( - target_directory, baseline_report)) + baseline_report = os.path.join( + target_directory, self.baseline_report_file + ) + return_value, return_code = self._run_bandit_baseline( + target_directory, baseline_report + ) # assert there were no results (no candidates found) self.assertEqual(0, return_code) self.assertIn(new_candidates_all_total_lines, return_value) @@ -120,20 +141,23 @@ def test_no_new_candidates(self): self.assertIn(baseline_no_issues_found, return_value) def test_no_existing_no_new_candidates(self): - '''Tests when there are no new or existing candidates + """Tests when there are no new or existing candidates Test file with no existing candidates from baseline and no new candidates. - ''' + """ baseline_report_files = {"okay.py": "okay.py"} - target_directory, baseline_code = (self._create_baseline( - baseline_report_files)) + target_directory, baseline_code = self._create_baseline( + baseline_report_files + ) # assert the initial baseline found nothing self.assertEqual(0, baseline_code) - baseline_report = os.path.join(target_directory, - self.baseline_report_file) - return_value, return_code = (self._run_bandit_baseline( - target_directory, baseline_report)) + baseline_report = os.path.join( + target_directory, self.baseline_report_file + ) + return_value, return_code = self._run_bandit_baseline( + target_directory, baseline_report + ) # assert there were no results (no candidates found) self.assertEqual(0, return_code) self.assertIn("Total lines of code: 1", return_value) @@ -142,21 +166,25 @@ def test_no_existing_no_new_candidates(self): self.assertIn(baseline_no_issues_found, return_value) def test_no_existing_with_new_candidates(self): - '''Tests when there are new candidates and no existing candidates + """Tests when there are new candidates and no existing candidates Test that bandit returns issues found in file that had no existing candidates from baseline but now contain candidates. - ''' - baseline_report_files = {"new_candidates-all.py": - "new_candidates-none.py"} - target_directory, baseline_code = (self._create_baseline( - baseline_report_files)) + """ + baseline_report_files = { + "new_candidates-all.py": "new_candidates-none.py" + } + target_directory, baseline_code = self._create_baseline( + baseline_report_files + ) # assert the initial baseline found nothing self.assertEqual(0, baseline_code) - baseline_report = os.path.join(target_directory, - self.baseline_report_file) - return_value, return_code = (self._run_bandit_baseline( - target_directory, baseline_report)) + baseline_report = os.path.join( + target_directory, self.baseline_report_file + ) + return_value, return_code = self._run_bandit_baseline( + target_directory, baseline_report + ) # assert there were results (candidates found) self.assertEqual(1, return_code) self.assertIn(new_candidates_all_total_lines, return_value) @@ -173,21 +201,25 @@ def test_no_existing_with_new_candidates(self): self.assertIn(candidate_example_five, return_value) def test_existing_and_new_candidates(self): - '''Tests when tere are new candidates and existing candidates + """Tests when tere are new candidates and existing candidates Test that bandit returns issues found in file with existing candidates. The new candidates should be returned in this case. - ''' - baseline_report_files = {"new_candidates-all.py": - "new_candidates-some.py"} - target_directory, baseline_code = (self._create_baseline( - baseline_report_files)) + """ + baseline_report_files = { + "new_candidates-all.py": "new_candidates-some.py" + } + target_directory, baseline_code = self._create_baseline( + baseline_report_files + ) # assert the initial baseline found results self.assertEqual(1, baseline_code) - baseline_report = os.path.join(target_directory, - self.baseline_report_file) - return_value, return_code = (self._run_bandit_baseline( - target_directory, baseline_report)) + baseline_report = os.path.join( + target_directory, self.baseline_report_file + ) + return_value, return_code = self._run_bandit_baseline( + target_directory, baseline_report + ) # assert there were results (candidates found) self.assertEqual(1, return_code) self.assertIn(new_candidates_all_total_lines, return_value) @@ -201,23 +233,27 @@ def test_existing_and_new_candidates(self): self.assertIn(candidate_example_five, return_value) def test_no_new_candidates_include_nosec(self): - '''Test to check nosec references with no new candidates + """Test to check nosec references with no new candidates Test that nosec references are included during a baseline test, which would normally be ignored. In this test case, there are no new candidates even while including the nosec references. - ''' - self.baseline_commands.append('--ignore-nosec') - baseline_report_files = {"new_candidates-all.py": - "new_candidates-all.py"} - target_directory, baseline_code = (self._create_baseline( - baseline_report_files)) + """ + self.baseline_commands.append("--ignore-nosec") + baseline_report_files = { + "new_candidates-all.py": "new_candidates-all.py" + } + target_directory, baseline_code = self._create_baseline( + baseline_report_files + ) # assert the initial baseline found results self.assertEqual(1, baseline_code) - baseline_report = os.path.join(target_directory, - self.baseline_report_file) - return_value, return_code = (self._run_bandit_baseline( - target_directory, baseline_report)) + baseline_report = os.path.join( + target_directory, self.baseline_report_file + ) + return_value, return_code = self._run_bandit_baseline( + target_directory, baseline_report + ) # assert there were no results (candidates found) self.assertEqual(0, return_code) self.assertIn(new_candidates_all_total_lines, return_value) @@ -226,23 +262,27 @@ def test_no_new_candidates_include_nosec(self): self.assertIn(baseline_no_issues_found, return_value) def test_new_candidates_include_nosec_only_nosecs(self): - '''Test to check nosec references with new only nosec candidates + """Test to check nosec references with new only nosec candidates Test that nosec references are included during a baseline test, which would normally be ignored. In this test case, there are new candidates which are specifically nosec references. - ''' - self.baseline_commands.append('--ignore-nosec') - baseline_report_files = {"new_candidates-nosec.py": - "new_candidates-none.py"} - target_directory, baseline_code = (self._create_baseline( - baseline_report_files)) + """ + self.baseline_commands.append("--ignore-nosec") + baseline_report_files = { + "new_candidates-nosec.py": "new_candidates-none.py" + } + target_directory, baseline_code = self._create_baseline( + baseline_report_files + ) # assert the initial baseline found nothing self.assertEqual(0, baseline_code) - baseline_report = os.path.join(target_directory, - self.baseline_report_file) - return_value, return_code = (self._run_bandit_baseline( - target_directory, baseline_report)) + baseline_report = os.path.join( + target_directory, self.baseline_report_file + ) + return_value, return_code = self._run_bandit_baseline( + target_directory, baseline_report + ) # assert there were results (candidates found) self.assertEqual(1, return_code) self.assertIn(new_candidates_some_total_lines, return_value) @@ -259,23 +299,27 @@ def test_new_candidates_include_nosec_only_nosecs(self): self.assertIn(candidate_example_six, return_value) def test_new_candidates_include_nosec_new_nosecs(self): - '''Test to check nosec references with new candidates, including nosecs + """Test to check nosec references with new candidates, including nosecs Test that nosec references are included during a baseline test, which would normally be ignored. In this test case, there are new candidates that also includes new nosec references as well. - ''' - self.baseline_commands.append('--ignore-nosec') - baseline_report_files = {"new_candidates-all.py": - "new_candidates-none.py"} - target_directory, baseline_code = (self._create_baseline( - baseline_report_files)) + """ + self.baseline_commands.append("--ignore-nosec") + baseline_report_files = { + "new_candidates-all.py": "new_candidates-none.py" + } + target_directory, baseline_code = self._create_baseline( + baseline_report_files + ) # assert the initial baseline found nothing self.assertEqual(0, baseline_code) - baseline_report = os.path.join(target_directory, - self.baseline_report_file) - return_value, return_code = (self._run_bandit_baseline( - target_directory, baseline_report)) + baseline_report = os.path.join( + target_directory, self.baseline_report_file + ) + return_value, return_code = self._run_bandit_baseline( + target_directory, baseline_report + ) # assert there were results (candidates found) self.assertEqual(1, return_code) self.assertIn(new_candidates_all_total_lines, return_value) diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index 173d4d67b..39336eb48 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import os import sys @@ -18,68 +16,69 @@ class FunctionalTests(testtools.TestCase): - '''Functional tests for bandit test plugins. + """Functional tests for bandit test plugins. This set of tests runs bandit against each example file in turn and records the score returned. This is compared to a known good value. When new tests are added to an example the expected result should be adjusted to match. - ''' + """ def setUp(self): - super(FunctionalTests, self).setUp() + super().setUp() # NOTE(tkelsey): bandit is very sensitive to paths, so stitch # them up here for the testing environment. # - path = os.path.join(os.getcwd(), 'bandit', 'plugins') + path = os.path.join(os.getcwd(), "bandit", "plugins") b_conf = b_config.BanditConfig() - self.b_mgr = b_manager.BanditManager(b_conf, 'file') - self.b_mgr.b_conf._settings['plugins_dir'] = path + self.b_mgr = b_manager.BanditManager(b_conf, "file") + self.b_mgr.b_conf._settings["plugins_dir"] = path self.b_mgr.b_ts = b_test_set.BanditTestSet(config=b_conf) def run_example(self, example_script, ignore_nosec=False): - '''A helper method to run the specified test + """A helper method to run the specified test This method runs the test, which populates the self.b_mgr.scores value. Call this directly if you need to run a test, but do not need to test the resulting scores against specified values. :param example_script: Filename of an example script to test - ''' - path = os.path.join(os.getcwd(), 'examples', example_script) + """ + path = os.path.join(os.getcwd(), "examples", example_script) self.b_mgr.ignore_nosec = ignore_nosec self.b_mgr.discover_files([path], True) self.b_mgr.run_tests() def check_example(self, example_script, expect, ignore_nosec=False): - '''A helper method to test the scores for example scripts. + """A helper method to test the scores for example scripts. :param example_script: Filename of an example script to test :param expect: dict with expected counts of issue types - ''' + """ # reset scores for subsequent calls to check_example self.b_mgr.scores = [] self.run_example(example_script, ignore_nosec=ignore_nosec) result = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0}, } for test_scores in self.b_mgr.scores: for score_type in test_scores: self.assertIn(score_type, expect) for idx, rank in enumerate(C.RANKING): - result[score_type][rank] = (test_scores[score_type][idx] // - C.RANKING_VALUES[rank]) + result[score_type][rank] = ( + test_scores[score_type][idx] // C.RANKING_VALUES[rank] + ) self.assertDictEqual(expect, result) def check_metrics(self, example_script, expect): - '''A helper method to test the metrics being returned. + """A helper method to test the metrics being returned. :param example_script: Filename of an example script to test :param expect: dict with expected values of metrics - ''' + """ self.b_mgr.metrics = metrics.Metrics() self.b_mgr.scores = [] self.run_example(example_script) @@ -87,624 +86,650 @@ def check_metrics(self, example_script, expect): # test general metrics (excludes issue counts) m = self.b_mgr.metrics.data for k in expect: - if k != 'issues': - self.assertEqual(expect[k], m['_totals'][k]) + if k != "issues": + self.assertEqual(expect[k], m["_totals"][k]) # test issue counts - if 'issues' in expect: + if "issues" in expect: for (criteria, default) in C.CRITERIA: for rank in C.RANKING: - label = '{0}.{1}'.format(criteria, rank) + label = f"{criteria}.{rank}" expected = 0 - if expect['issues'].get(criteria).get(rank): - expected = expect['issues'][criteria][rank] - self.assertEqual(expected, m['_totals'][label]) + if expect["issues"].get(criteria).get(rank): + expected = expect["issues"][criteria][rank] + self.assertEqual(expected, m["_totals"][label]) def test_binding(self): - '''Test the bind-to-0.0.0.0 example.''' + """Test the bind-to-0.0.0.0 example.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 1, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 1, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 1, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 1, "HIGH": 0}, } - self.check_example('binding.py', expect) + self.check_example("binding.py", expect) def test_crypto_md5(self): - '''Test the `hashlib.md5` example.''' + """Test the `hashlib.md5` example.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 15, 'HIGH': 4}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 19} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 15, "HIGH": 4}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 19}, } - self.check_example('crypto-md5.py', expect) + self.check_example("crypto-md5.py", expect) def test_ciphers(self): - '''Test the `Crypto.Cipher` example.''' + """Test the `Crypto.Cipher` example.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 1, 'HIGH': 21}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 22} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 1, "HIGH": 21}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 22}, } - self.check_example('ciphers.py', expect) + self.check_example("ciphers.py", expect) def test_cipher_modes(self): - '''Test for insecure cipher modes.''' + """Test for insecure cipher modes.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 1, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 1} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 1, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 1}, } - self.check_example('cipher-modes.py', expect) + self.check_example("cipher-modes.py", expect) def test_eval(self): - '''Test the `eval` example.''' + """Test the `eval` example.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 3, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 3} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 3, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 3}, } - self.check_example('eval.py', expect) + self.check_example("eval.py", expect) def test_mark_safe(self): - '''Test the `mark_safe` example.''' + """Test the `mark_safe` example.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 1, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 1} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 1, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 1}, } - self.check_example('mark_safe.py', expect) + self.check_example("mark_safe.py", expect) def test_exec(self): - '''Test the `exec` example.''' + """Test the `exec` example.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 1, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 1} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 1, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 1}, } - self.check_example('exec.py', expect) + self.check_example("exec.py", expect) def test_hardcoded_passwords(self): - '''Test for hard-coded passwords.''' + """Test for hard-coded passwords.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 12, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 12, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 12, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 12, "HIGH": 0}, } - self.check_example('hardcoded-passwords.py', expect) + self.check_example("hardcoded-passwords.py", expect) def test_hardcoded_tmp(self): - '''Test for hard-coded /tmp, /var/tmp, /dev/shm.''' + """Test for hard-coded /tmp, /var/tmp, /dev/shm.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 3, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 3, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 3, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 3, "HIGH": 0}, } - self.check_example('hardcoded-tmp.py', expect) + self.check_example("hardcoded-tmp.py", expect) def test_httplib_https(self): - '''Test for `httplib.HTTPSConnection`.''' + """Test for `httplib.HTTPSConnection`.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 3, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 3} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 3, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 3}, } - self.check_example('httplib_https.py', expect) + self.check_example("httplib_https.py", expect) def test_imports_aliases(self): - '''Test the `import X as Y` syntax.''' + """Test the `import X as Y` syntax.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 4, 'MEDIUM': 5, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 9} + "SEVERITY": {"UNDEFINED": 0, "LOW": 4, "MEDIUM": 5, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 9}, } - self.check_example('imports-aliases.py', expect) + self.check_example("imports-aliases.py", expect) def test_imports_from(self): - '''Test the `from X import Y` syntax.''' + """Test the `from X import Y` syntax.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 3, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 3} + "SEVERITY": {"UNDEFINED": 0, "LOW": 3, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 3}, } - self.check_example('imports-from.py', expect) + self.check_example("imports-from.py", expect) def test_imports_function(self): - '''Test the `__import__` function.''' + """Test the `__import__` function.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 2, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2} + "SEVERITY": {"UNDEFINED": 0, "LOW": 2, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 2}, } - self.check_example('imports-function.py', expect) + self.check_example("imports-function.py", expect) def test_telnet_usage(self): - '''Test for `import telnetlib` and Telnet.* calls.''' + """Test for `import telnetlib` and Telnet.* calls.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 2}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 2}, } - self.check_example('telnetlib.py', expect) + self.check_example("telnetlib.py", expect) def test_ftp_usage(self): - '''Test for `import ftplib` and FTP.* calls.''' + """Test for `import ftplib` and FTP.* calls.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 2}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 2}, } - self.check_example('ftplib.py', expect) + self.check_example("ftplib.py", expect) def test_imports(self): - '''Test for dangerous imports.''' + """Test for dangerous imports.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 2, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2} + "SEVERITY": {"UNDEFINED": 0, "LOW": 2, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 2}, } - self.check_example('imports.py', expect) + self.check_example("imports.py", expect) def test_imports_using_importlib(self): - '''Test for dangerous imports using importlib.''' + """Test for dangerous imports using importlib.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 4, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 4} + "SEVERITY": {"UNDEFINED": 0, "LOW": 4, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 4}, } - self.check_example('imports-with-importlib.py', expect) + self.check_example("imports-with-importlib.py", expect) def test_mktemp(self): - '''Test for `tempfile.mktemp`.''' + """Test for `tempfile.mktemp`.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 4, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 4} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 4, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 4}, } - self.check_example('mktemp.py', expect) + self.check_example("mktemp.py", expect) def test_tempnam(self): - '''Test for `os.tempnam` / `os.tmpnam`.''' + """Test for `os.tempnam` / `os.tmpnam`.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 6, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 6} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 6, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 6}, } - self.check_example('tempnam.py', expect) + self.check_example("tempnam.py", expect) def test_nonsense(self): - '''Test that a syntactically invalid module is skipped.''' - self.run_example('nonsense.py') + """Test that a syntactically invalid module is skipped.""" + self.run_example("nonsense.py") self.assertEqual(1, len(self.b_mgr.skipped)) def test_okay(self): - '''Test a vulnerability-free file.''' + """Test a vulnerability-free file.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0}, } - self.check_example('okay.py', expect) + self.check_example("okay.py", expect) def test_subdirectory_okay(self): - '''Test a vulnerability-free file under a subdirectory.''' + """Test a vulnerability-free file under a subdirectory.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0}, } - self.check_example('init-py-test/subdirectory-okay.py', expect) + self.check_example("init-py-test/subdirectory-okay.py", expect) def test_os_chmod(self): - '''Test setting file permissions.''' + """Test setting file permissions.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 2, 'HIGH': 8}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 1, 'HIGH': 9} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 2, "HIGH": 8}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 1, "HIGH": 9}, } - self.check_example('os-chmod.py', expect) + self.check_example("os-chmod.py", expect) def test_os_exec(self): - '''Test for `os.exec*`.''' + """Test for `os.exec*`.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 8, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 8, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 8, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 8, "HIGH": 0}, } - self.check_example('os-exec.py', expect) + self.check_example("os-exec.py", expect) def test_os_popen(self): - '''Test for `os.popen`.''' + """Test for `os.popen`.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 8, 'MEDIUM': 0, 'HIGH': 1}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 9} + "SEVERITY": {"UNDEFINED": 0, "LOW": 8, "MEDIUM": 0, "HIGH": 1}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 9}, } - self.check_example('os-popen.py', expect) + self.check_example("os-popen.py", expect) def test_os_spawn(self): - '''Test for `os.spawn*`.''' + """Test for `os.spawn*`.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 8, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 8, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 8, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 8, "HIGH": 0}, } - self.check_example('os-spawn.py', expect) + self.check_example("os-spawn.py", expect) def test_os_startfile(self): - '''Test for `os.startfile`.''' + """Test for `os.startfile`.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 3, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 3, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 3, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 3, "HIGH": 0}, } - self.check_example('os-startfile.py', expect) + self.check_example("os-startfile.py", expect) def test_os_system(self): - '''Test for `os.system`.''' + """Test for `os.system`.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 1, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 1} + "SEVERITY": {"UNDEFINED": 0, "LOW": 1, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 1}, } - self.check_example('os_system.py', expect) + self.check_example("os_system.py", expect) def test_pickle(self): - '''Test for the `pickle` module.''' + """Test for the `pickle` module.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 2, 'MEDIUM': 6, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 8} + "SEVERITY": {"UNDEFINED": 0, "LOW": 2, "MEDIUM": 6, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 8}, } - self.check_example('pickle_deserialize.py', expect) + self.check_example("pickle_deserialize.py", expect) def test_dill(self): - '''Test for the `dill` module.''' + """Test for the `dill` module.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 1, 'MEDIUM': 2, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 3} + "SEVERITY": {"UNDEFINED": 0, "LOW": 1, "MEDIUM": 2, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 3}, } - self.check_example('dill.py', expect) + self.check_example("dill.py", expect) def test_shelve(self): - '''Test for the `shelve` module.''' + """Test for the `shelve` module.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 1, 'MEDIUM': 2, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 3} + "SEVERITY": {"UNDEFINED": 0, "LOW": 1, "MEDIUM": 2, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 3}, } - self.check_example('shelve_open.py', expect) + self.check_example("shelve_open.py", expect) def test_popen_wrappers(self): - '''Test the `popen2` and `commands` modules.''' + """Test the `popen2` and `commands` modules.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 7, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 7} + "SEVERITY": {"UNDEFINED": 0, "LOW": 7, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 7}, } - self.check_example('popen_wrappers.py', expect) + self.check_example("popen_wrappers.py", expect) def test_random_module(self): - '''Test for the `random` module.''' + """Test for the `random` module.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 7, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 7} + "SEVERITY": {"UNDEFINED": 0, "LOW": 7, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 7}, } - self.check_example('random_module.py', expect) + self.check_example("random_module.py", expect) def test_requests_ssl_verify_disabled(self): - '''Test for the `requests` library skipping verification.''' + """Test for the `requests` library skipping verification.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 7}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 7} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 7}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 7}, } - self.check_example('requests-ssl-verify-disabled.py', expect) + self.check_example("requests-ssl-verify-disabled.py", expect) def test_skip(self): - '''Test `#nosec` and `#noqa` comments.''' + """Test `#nosec` and `#noqa` comments.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 5, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 5} + "SEVERITY": {"UNDEFINED": 0, "LOW": 5, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 5}, } - self.check_example('skip.py', expect) + self.check_example("skip.py", expect) def test_ignore_skip(self): - '''Test --ignore-nosec flag.''' + """Test --ignore-nosec flag.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 7, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 7} + "SEVERITY": {"UNDEFINED": 0, "LOW": 7, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 7}, } - self.check_example('skip.py', expect, ignore_nosec=True) + self.check_example("skip.py", expect, ignore_nosec=True) def test_sql_statements(self): - '''Test for SQL injection through string building.''' - filename = 'sql_statements{}.py' + """Test for SQL injection through string building.""" + filename = "sql_statements{}.py" if sys.version_info <= (3, 6): - filename = filename.format('') + filename = filename.format("") expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 14, - 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 8, 'MEDIUM': 6, - 'HIGH': 0} + "SEVERITY": { + "UNDEFINED": 0, + "LOW": 0, + "MEDIUM": 14, + "HIGH": 0, + }, + "CONFIDENCE": { + "UNDEFINED": 0, + "LOW": 8, + "MEDIUM": 6, + "HIGH": 0, + }, } else: - filename = filename.format('-py36') + filename = filename.format("-py36") expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 16, - 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 9, 'MEDIUM': 7, - 'HIGH': 0} + "SEVERITY": { + "UNDEFINED": 0, + "LOW": 0, + "MEDIUM": 16, + "HIGH": 0, + }, + "CONFIDENCE": { + "UNDEFINED": 0, + "LOW": 9, + "MEDIUM": 7, + "HIGH": 0, + }, } self.check_example(filename, expect) def test_ssl_insecure_version(self): - '''Test for insecure SSL protocol versions.''' + """Test for insecure SSL protocol versions.""" expect = { - 'SEVERITY': {'LOW': 1, 'MEDIUM': 10, 'HIGH': 7}, - 'CONFIDENCE': {'LOW': 0, 'MEDIUM': 11, 'HIGH': 7} + "SEVERITY": {"LOW": 1, "MEDIUM": 10, "HIGH": 7}, + "CONFIDENCE": {"LOW": 0, "MEDIUM": 11, "HIGH": 7}, } expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 1, 'MEDIUM': 10, 'HIGH': 7}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 11, 'HIGH': 7} + "SEVERITY": {"UNDEFINED": 0, "LOW": 1, "MEDIUM": 10, "HIGH": 7}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 11, "HIGH": 7}, } - self.check_example('ssl-insecure-version.py', expect) + self.check_example("ssl-insecure-version.py", expect) def test_subprocess_shell(self): - '''Test for `subprocess.Popen` with `shell=True`.''' + """Test for `subprocess.Popen` with `shell=True`.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 21, 'MEDIUM': 1, 'HIGH': 11}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 1, 'MEDIUM': 0, 'HIGH': 32} + "SEVERITY": {"UNDEFINED": 0, "LOW": 21, "MEDIUM": 1, "HIGH": 11}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 1, "MEDIUM": 0, "HIGH": 32}, } - self.check_example('subprocess_shell.py', expect) + self.check_example("subprocess_shell.py", expect) def test_urlopen(self): - '''Test for dangerous URL opening.''' + """Test for dangerous URL opening.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 14, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 14} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 14, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 14}, } - self.check_example('urlopen.py', expect) + self.check_example("urlopen.py", expect) def test_wildcard_injection(self): - '''Test for wildcard injection in shell commands.''' + """Test for wildcard injection in shell commands.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 10, 'MEDIUM': 0, 'HIGH': 4}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 5, 'HIGH': 9} + "SEVERITY": {"UNDEFINED": 0, "LOW": 10, "MEDIUM": 0, "HIGH": 4}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 5, "HIGH": 9}, } - self.check_example('wildcard-injection.py', expect) + self.check_example("wildcard-injection.py", expect) def test_django_sql_injection(self): """Test insecure extra functions on Django.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 11, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 11, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 11, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 11, "HIGH": 0}, } - self.check_example('django_sql_injection_extra.py', expect) + self.check_example("django_sql_injection_extra.py", expect) def test_django_sql_injection_raw(self): """Test insecure raw functions on Django.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 4, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 4, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 4, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 4, "HIGH": 0}, } - self.check_example('django_sql_injection_raw.py', expect) + self.check_example("django_sql_injection_raw.py", expect) def test_yaml(self): - '''Test for `yaml.load`.''' + """Test for `yaml.load`.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 1, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 1} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 1, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 1}, } - self.check_example('yaml_load.py', expect) + self.check_example("yaml_load.py", expect) def test_host_key_verification(self): - '''Test for ignoring host key verification.''' + """Test for ignoring host key verification.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 2, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 2}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 2, "HIGH": 0}, } - self.check_example('no_host_key_verification.py', expect) + self.check_example("no_host_key_verification.py", expect) def test_jinja2_templating(self): - '''Test jinja templating for potential XSS bugs.''' + """Test jinja templating for potential XSS bugs.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 5}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 2, 'HIGH': 3} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 5}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 2, "HIGH": 3}, } - self.check_example('jinja2_templating.py', expect) + self.check_example("jinja2_templating.py", expect) def test_mako_templating(self): - '''Test Mako templates for XSS.''' + """Test Mako templates for XSS.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 3, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 3} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 3, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 3}, } - self.check_example('mako_templating.py', expect) + self.check_example("mako_templating.py", expect) def test_django_xss_secure(self): """Test false positives for Django XSS""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0}, } self.b_mgr.b_ts = b_test_set.BanditTestSet( - config=self.b_mgr.b_conf, - profile={'exclude': ['B308']} + config=self.b_mgr.b_conf, profile={"exclude": ["B308"]} ) - self.check_example('mark_safe_secure.py', expect) + self.check_example("mark_safe_secure.py", expect) def test_django_xss_insecure(self): """Test for Django XSS via django.utils.safestring""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 28, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 28} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 28, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 28}, } self.b_mgr.b_ts = b_test_set.BanditTestSet( - config=self.b_mgr.b_conf, - profile={'exclude': ['B308']} + config=self.b_mgr.b_conf, profile={"exclude": ["B308"]} ) - self.check_example('mark_safe_insecure.py', expect) + self.check_example("mark_safe_insecure.py", expect) def test_xml(self): - '''Test xml vulnerabilities.''' + """Test xml vulnerabilities.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 1, 'MEDIUM': 4, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 5} + "SEVERITY": {"UNDEFINED": 0, "LOW": 1, "MEDIUM": 4, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 5}, } - self.check_example('xml_etree_celementtree.py', expect) + self.check_example("xml_etree_celementtree.py", expect) expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 1, 'MEDIUM': 2, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 3} + "SEVERITY": {"UNDEFINED": 0, "LOW": 1, "MEDIUM": 2, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 3}, } - self.check_example('xml_expatbuilder.py', expect) + self.check_example("xml_expatbuilder.py", expect) expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 3, 'MEDIUM': 1, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 4} + "SEVERITY": {"UNDEFINED": 0, "LOW": 3, "MEDIUM": 1, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 4}, } - self.check_example('xml_lxml.py', expect) + self.check_example("xml_lxml.py", expect) expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 2, 'MEDIUM': 2, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 4} + "SEVERITY": {"UNDEFINED": 0, "LOW": 2, "MEDIUM": 2, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 4}, } - self.check_example('xml_pulldom.py', expect) + self.check_example("xml_pulldom.py", expect) expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 1}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 1} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 1}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 1}, } - self.check_example('xml_xmlrpc.py', expect) + self.check_example("xml_xmlrpc.py", expect) expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 1, 'MEDIUM': 4, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 5} + "SEVERITY": {"UNDEFINED": 0, "LOW": 1, "MEDIUM": 4, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 5}, } - self.check_example('xml_etree_elementtree.py', expect) + self.check_example("xml_etree_elementtree.py", expect) expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 1, 'MEDIUM': 1, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2} + "SEVERITY": {"UNDEFINED": 0, "LOW": 1, "MEDIUM": 1, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 2}, } - self.check_example('xml_expatreader.py', expect) + self.check_example("xml_expatreader.py", expect) expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 2, 'MEDIUM': 2, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 4} + "SEVERITY": {"UNDEFINED": 0, "LOW": 2, "MEDIUM": 2, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 4}, } - self.check_example('xml_minidom.py', expect) + self.check_example("xml_minidom.py", expect) expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 2, 'MEDIUM': 6, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 8} + "SEVERITY": {"UNDEFINED": 0, "LOW": 2, "MEDIUM": 6, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 8}, } - self.check_example('xml_sax.py', expect) + self.check_example("xml_sax.py", expect) def test_httpoxy(self): - '''Test httpoxy vulnerability.''' + """Test httpoxy vulnerability.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 1}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 1} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 1}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 1}, } - self.check_example('httpoxy_cgihandler.py', expect) - self.check_example('httpoxy_twisted_script.py', expect) - self.check_example('httpoxy_twisted_directory.py', expect) + self.check_example("httpoxy_cgihandler.py", expect) + self.check_example("httpoxy_twisted_script.py", expect) + self.check_example("httpoxy_twisted_directory.py", expect) def test_asserts(self): - '''Test catching the use of assert.''' - test = next((x for x in self.b_mgr.b_ts.tests['Assert'] - if x.__name__ == 'assert_used')) + """Test catching the use of assert.""" + test = next( + x + for x in self.b_mgr.b_ts.tests["Assert"] + if x.__name__ == "assert_used" + ) - test._config = {'skips': []} + test._config = {"skips": []} expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 1, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 1} + "SEVERITY": {"UNDEFINED": 0, "LOW": 1, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 1}, } - self.check_example('assert.py', expect) + self.check_example("assert.py", expect) - test._config = {'skips': ['*assert.py']} + test._config = {"skips": ["*assert.py"]} expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0}, } - self.check_example('assert.py', expect) + self.check_example("assert.py", expect) test._config = {} expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 1, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 1} + "SEVERITY": {"UNDEFINED": 0, "LOW": 1, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 1}, } - self.check_example('assert.py', expect) + self.check_example("assert.py", expect) def test_paramiko_injection(self): - '''Test paramiko command execution.''' + """Test paramiko command execution.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 1, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 1, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 1, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 1, "HIGH": 0}, } - self.check_example('paramiko_injection.py', expect) + self.check_example("paramiko_injection.py", expect) def test_partial_path(self): - '''Test process spawning with partial file paths.''' + """Test process spawning with partial file paths.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 11, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 11} + "SEVERITY": {"UNDEFINED": 0, "LOW": 11, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 11}, } - self.check_example('partial_path_process.py', expect) + self.check_example("partial_path_process.py", expect) def test_try_except_continue(self): - '''Test try, except, continue detection.''' - test = next((x for x in self.b_mgr.b_ts.tests['ExceptHandler'] - if x.__name__ == 'try_except_continue')) + """Test try, except, continue detection.""" + test = next( + x + for x in self.b_mgr.b_ts.tests["ExceptHandler"] + if x.__name__ == "try_except_continue" + ) - test._config = {'check_typed_exception': True} + test._config = {"check_typed_exception": True} expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 3, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 3} + "SEVERITY": {"UNDEFINED": 0, "LOW": 3, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 3}, } - self.check_example('try_except_continue.py', expect) + self.check_example("try_except_continue.py", expect) - test._config = {'check_typed_exception': False} + test._config = {"check_typed_exception": False} expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 2, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2} + "SEVERITY": {"UNDEFINED": 0, "LOW": 2, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 2}, } - self.check_example('try_except_continue.py', expect) + self.check_example("try_except_continue.py", expect) def test_try_except_pass(self): - '''Test try, except pass detection.''' - test = next((x for x in self.b_mgr.b_ts.tests['ExceptHandler'] - if x.__name__ == 'try_except_pass')) + """Test try, except pass detection.""" + test = next( + x + for x in self.b_mgr.b_ts.tests["ExceptHandler"] + if x.__name__ == "try_except_pass" + ) - test._config = {'check_typed_exception': True} + test._config = {"check_typed_exception": True} expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 3, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 3} + "SEVERITY": {"UNDEFINED": 0, "LOW": 3, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 3}, } - self.check_example('try_except_pass.py', expect) + self.check_example("try_except_pass.py", expect) - test._config = {'check_typed_exception': False} + test._config = {"check_typed_exception": False} expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 2, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2} + "SEVERITY": {"UNDEFINED": 0, "LOW": 2, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 2}, } - self.check_example('try_except_pass.py', expect) + self.check_example("try_except_pass.py", expect) def test_metric_gathering(self): expect = { - 'nosec': 2, 'loc': 7, - 'issues': {'CONFIDENCE': {'HIGH': 5}, 'SEVERITY': {'LOW': 5}} + "nosec": 2, + "loc": 7, + "issues": {"CONFIDENCE": {"HIGH": 5}, "SEVERITY": {"LOW": 5}}, } - self.check_metrics('skip.py', expect) + self.check_metrics("skip.py", expect) expect = { - 'nosec': 0, 'loc': 4, - 'issues': {'CONFIDENCE': {'HIGH': 2}, 'SEVERITY': {'LOW': 2}} + "nosec": 0, + "loc": 4, + "issues": {"CONFIDENCE": {"HIGH": 2}, "SEVERITY": {"LOW": 2}}, } - self.check_metrics('imports.py', expect) + self.check_metrics("imports.py", expect) def test_weak_cryptographic_key(self): - '''Test for weak key sizes.''' + """Test for weak key sizes.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 8, 'HIGH': 8}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 16} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 8, "HIGH": 8}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 16}, } - self.check_example('weak_cryptographic_key_sizes.py', expect) + self.check_example("weak_cryptographic_key_sizes.py", expect) def test_multiline_code(self): - '''Test issues in multiline statements return code as expected.''' - self.run_example('multiline_statement.py') + """Test issues in multiline statements return code as expected.""" + self.run_example("multiline_statement.py") self.assertEqual(0, len(self.b_mgr.skipped)) self.assertEqual(1, len(self.b_mgr.files_list)) - self.assertTrue(self.b_mgr.files_list[0].endswith( - 'multiline_statement.py')) + self.assertTrue( + self.b_mgr.files_list[0].endswith("multiline_statement.py") + ) issues = self.b_mgr.get_issue_list() self.assertEqual(2, len(issues)) self.assertTrue( - issues[0].fname.endswith('examples/multiline_statement.py') + issues[0].fname.endswith("examples/multiline_statement.py") ) self.assertEqual(1, issues[0].lineno) self.assertEqual(list(range(1, 3)), issues[0].linerange) - self.assertIn('subprocess', issues[0].get_code()) + self.assertIn("subprocess", issues[0].get_code()) self.assertEqual(5, issues[1].lineno) self.assertEqual(list(range(3, 6 + 1)), issues[1].linerange) - self.assertIn('shell=True', issues[1].get_code()) + self.assertIn("shell=True", issues[1].get_code()) def test_code_line_numbers(self): - self.run_example('binding.py') + self.run_example("binding.py") issues = self.b_mgr.get_issue_list() code_lines = issues[0].get_code().splitlines() @@ -715,30 +740,32 @@ def test_code_line_numbers(self): def test_flask_debug_true(self): expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 1}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 1, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 1}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 1, "HIGH": 0}, } - self.check_example('flask_debug.py', expect) + self.check_example("flask_debug.py", expect) def test_nosec(self): expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 2, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2} + "SEVERITY": {"UNDEFINED": 0, "LOW": 2, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 2}, } - self.check_example('nosec.py', expect) + self.check_example("nosec.py", expect) def test_baseline_filter(self): - issue_text = ('A Flask app appears to be run with debug=True, which ' - 'exposes the Werkzeug debugger and allows the execution ' - 'of arbitrary code.') - json = """{ + issue_text = ( + "A Flask app appears to be run with debug=True, which " + "exposes the Werkzeug debugger and allows the execution " + "of arbitrary code." + ) + json = """{{ "results": [ - { + {{ "code": "...", - "filename": "%s/examples/flask_debug.py", + "filename": "{}/examples/flask_debug.py", "issue_confidence": "MEDIUM", "issue_severity": "HIGH", - "issue_text": "%s", + "issue_text": "{}", "line_number": 10, "col_offset": 0, "line_range": [ @@ -746,47 +773,50 @@ def test_baseline_filter(self): ], "test_name": "flask_debug_true", "test_id": "B201" - } + }} ] - } - """ % (os.getcwd(), issue_text) + }} + """.format( + os.getcwd(), + issue_text, + ) self.b_mgr.populate_baseline(json) - self.run_example('flask_debug.py') + self.run_example("flask_debug.py") self.assertEqual(1, len(self.b_mgr.baseline)) self.assertEqual({}, self.b_mgr.get_issue_list()) def test_unverified_context(self): - '''Test for `ssl._create_unverified_context`.''' + """Test for `ssl._create_unverified_context`.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 1, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 1} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 1, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 1}, } - self.check_example('unverified_context.py', expect) + self.check_example("unverified_context.py", expect) def test_hashlib_new_insecure_functions(self): - '''Test insecure hash functions created by `hashlib.new`.''' + """Test insecure hash functions created by `hashlib.new`.""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 9, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 9} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 9, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 9}, } - self.check_example('hashlib_new_insecure_functions.py', expect) + self.check_example("hashlib_new_insecure_functions.py", expect) def test_blacklist_pycrypto(self): - '''Test importing pycrypto module''' + """Test importing pycrypto module""" expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 2}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 2}, } - self.check_example('pycrypto.py', expect) + self.check_example("pycrypto.py", expect) def test_no_blacklist_pycryptodome(self): - '''Test importing pycryptodome module + """Test importing pycryptodome module make sure it's no longer blacklisted - ''' + """ expect = { - 'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0}, - 'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0} + "SEVERITY": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0}, + "CONFIDENCE": {"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0}, } - self.check_example('pycryptodome.py', expect) + self.check_example("pycryptodome.py", expect) diff --git a/tests/functional/test_runtime.py b/tests/functional/test_runtime.py index 373c98b87..de8236465 100644 --- a/tests/functional/test_runtime.py +++ b/tests/functional/test_runtime.py @@ -1,7 +1,6 @@ # Copyright (c) 2015 VMware, Inc. # # SPDX-License-Identifier: Apache-2.0 - import os import subprocess @@ -9,32 +8,35 @@ class RuntimeTests(testtools.TestCase): - def _test_runtime(self, cmdlist, infile=None): process = subprocess.Popen( cmdlist, stdin=infile if infile else subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - close_fds=True + close_fds=True, ) stdout, stderr = process.communicate() retcode = process.poll() - return (retcode, stdout.decode('utf-8')) + return (retcode, stdout.decode("utf-8")) def _test_example(self, cmdlist, targets): for t in targets: - cmdlist.append(os.path.join(os.getcwd(), 'examples', t)) + cmdlist.append(os.path.join(os.getcwd(), "examples", t)) return self._test_runtime(cmdlist) def test_no_arguments(self): - (retcode, output) = self._test_runtime(['bandit', ]) + (retcode, output) = self._test_runtime( + [ + "bandit", + ] + ) self.assertEqual(2, retcode) self.assertIn("No targets found in CLI or ini files", output) def test_piped_input(self): - with open('examples/imports.py', 'r') as infile: - (retcode, output) = self._test_runtime(['bandit', '-'], infile) + with open("examples/imports.py") as infile: + (retcode, output) = self._test_runtime(["bandit", "-"], infile) self.assertEqual(1, retcode) self.assertIn("Total lines of code: 4", output) self.assertIn("Low: 2", output) @@ -45,14 +47,14 @@ def test_piped_input(self): self.assertIn(":4", output) def test_nonexistent_config(self): - (retcode, output) = self._test_runtime([ - 'bandit', '-c', 'nonexistent.yml', 'xx.py' - ]) + (retcode, output) = self._test_runtime( + ["bandit", "-c", "nonexistent.yml", "xx.py"] + ) self.assertEqual(2, retcode) self.assertIn("nonexistent.yml : Could not read config file.", output) def test_help_arg(self): - (retcode, output) = self._test_runtime(['bandit', '-h']) + (retcode, output) = self._test_runtime(["bandit", "-h"]) self.assertEqual(0, retcode) self.assertIn( "Bandit - a Python source code security analyzer", output @@ -64,41 +66,71 @@ def test_help_arg(self): # test examples (use _test_example() to wrap in config location argument def test_example_nonexistent(self): (retcode, output) = self._test_example( - ['bandit', ], ['nonexistent.py', ] + [ + "bandit", + ], + [ + "nonexistent.py", + ], ) self.assertEqual(0, retcode) self.assertIn("Files skipped (1):", output) self.assertIn("nonexistent.py (No such file or directory", output) def test_example_okay(self): - (retcode, output) = self._test_example(['bandit', ], ['okay.py', ]) + (retcode, output) = self._test_example( + [ + "bandit", + ], + [ + "okay.py", + ], + ) self.assertEqual(0, retcode) self.assertIn("Total lines of code: 1", output) self.assertIn("Files skipped (0):", output) self.assertIn("No issues identified.", output) def test_example_nonsense(self): - (retcode, output) = self._test_example(['bandit', ], ['nonsense.py', ]) + (retcode, output) = self._test_example( + [ + "bandit", + ], + [ + "nonsense.py", + ], + ) self.assertEqual(0, retcode) self.assertIn("Files skipped (1):", output) self.assertIn("nonsense.py (syntax error while parsing AST", output) def test_example_nonsense2(self): (retcode, output) = self._test_example( - ['bandit', ], ['nonsense2.py', ] + [ + "bandit", + ], + [ + "nonsense2.py", + ], ) self.assertEqual(0, retcode) self.assertIn("Files skipped (1):", output) self.assertIn("nonsense2.py (syntax error while parsing AST", output) def test_example_imports(self): - (retcode, output) = self._test_example(['bandit', ], ['imports.py', ]) + (retcode, output) = self._test_example( + [ + "bandit", + ], + [ + "imports.py", + ], + ) self.assertEqual(1, retcode) self.assertIn("Total lines of code: 4", output) self.assertIn("Low: 2", output) self.assertIn("High: 2", output) self.assertIn("Files skipped (0):", output) - self.assertIn("Issue: [B403:blacklist] Consider possible", - output) + self.assertIn("Issue: [B403:blacklist] Consider possible", output) self.assertIn("imports.py:2", output) self.assertIn("imports.py:4", output) diff --git a/tests/unit/cli/test_baseline.py b/tests/unit/cli/test_baseline.py index 245d711d2..5a6113b45 100644 --- a/tests/unit/cli/test_baseline.py +++ b/tests/unit/cli/test_baseline.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2015 Hewlett-Packard Enterprise # # SPDX-License-Identifier: Apache-2.0 - import os import subprocess from unittest import mock @@ -34,22 +32,21 @@ class BanditBaselineToolTests(testtools.TestCase): - @classmethod def setUpClass(cls): # Set up prior to running test class # read in content used for temporary file contents - with open('examples/mktemp.py') as fd: + with open("examples/mktemp.py") as fd: cls.temp_file_contents = fd.read() def setUp(self): # Set up prior to run each test case - super(BanditBaselineToolTests, self).setUp() + super().setUp() self.current_directory = os.getcwd() def tearDown(self): # Tear down after running each test case - super(BanditBaselineToolTests, self).tearDown() + super().tearDown() os.chdir(self.current_directory) def test_bandit_baseline(self): @@ -58,57 +55,72 @@ def test_bandit_baseline(self): repo_directory = self.useFixture(fixtures.TempDir()).path # get benign and findings examples - with open('examples/okay.py') as fd: + with open("examples/okay.py") as fd: benign_contents = fd.read() - with open('examples/os_system.py') as fd: + with open("examples/os_system.py") as fd: malicious_contents = fd.read() - contents = {'benign_one.py': benign_contents, - 'benign_two.py': benign_contents, - 'malicious.py': malicious_contents} + contents = { + "benign_one.py": benign_contents, + "benign_two.py": benign_contents, + "malicious.py": malicious_contents, + } # init git repo, change directory to it git_repo = git.Repo.init(repo_directory) - git_repo.index.commit('Initial commit') + git_repo.index.commit("Initial commit") os.chdir(repo_directory) - with open('bandit.yaml', 'wt') as fd: + with open("bandit.yaml", "wt") as fd: fd.write(config) # create three branches, first has only benign, second adds malicious, # third adds benign - branches = [{'name': 'benign1', - 'files': ['benign_one.py'], - 'expected_return': 0}, - - {'name': 'malicious', - 'files': ['benign_one.py', 'malicious.py'], - 'expected_return': 1}, - - {'name': 'benign2', - 'files': ['benign_one.py', 'malicious.py', - 'benign_two.py'], - 'expected_return': 0}] - - baseline_command = ['bandit-baseline', '-c', 'bandit.yaml', '-r', '.', - '-p', 'test'] + branches = [ + { + "name": "benign1", + "files": ["benign_one.py"], + "expected_return": 0, + }, + { + "name": "malicious", + "files": ["benign_one.py", "malicious.py"], + "expected_return": 1, + }, + { + "name": "benign2", + "files": ["benign_one.py", "malicious.py", "benign_two.py"], + "expected_return": 0, + }, + ] + + baseline_command = [ + "bandit-baseline", + "-c", + "bandit.yaml", + "-r", + ".", + "-p", + "test", + ] for branch in branches: - branch['branch'] = git_repo.create_head(branch['name']) - git_repo.head.reference = branch['branch'] + branch["branch"] = git_repo.create_head(branch["name"]) + git_repo.head.reference = branch["branch"] git_repo.head.reset(working_tree=True) - for f in branch['files']: - with open(f, 'wt') as fd: + for f in branch["files"]: + with open(f, "wt") as fd: fd.write(contents[f]) - git_repo.index.add(branch['files']) - git_repo.index.commit(branch['name']) + git_repo.index.add(branch["files"]) + git_repo.index.commit(branch["name"]) - self.assertEqual(branch['expected_return'], - subprocess.call(baseline_command)) + self.assertEqual( + branch["expected_return"], subprocess.call(baseline_command) + ) def test_main_non_repo(self): # Test that bandit gracefully exits when there is no git repository @@ -117,27 +129,28 @@ def test_main_non_repo(self): os.chdir(repo_dir) # assert the system exits with code 2 - self.assertRaisesRegex(SystemExit, '2', baseline.main) + self.assertRaisesRegex(SystemExit, "2", baseline.main) def test_main_git_command_failure(self): # Test that bandit does not run when the Git command fails repo_directory = self.useFixture(fixtures.TempDir()).path git_repo = git.Repo.init(repo_directory) - git_repo.index.commit('Initial Commit') + git_repo.index.commit("Initial Commit") os.chdir(repo_directory) - additional_content = 'additional_file.py' - with open(additional_content, 'wt') as fd: + additional_content = "additional_file.py" + with open(additional_content, "wt") as fd: fd.write(self.temp_file_contents) git_repo.index.add([additional_content]) - git_repo.index.commit('Additional Content') + git_repo.index.commit("Additional Content") - with mock.patch('git.Repo.commit') as mock_git_repo_commit: + with mock.patch("git.Repo.commit") as mock_git_repo_commit: mock_git_repo_commit.side_effect = git.exc.GitCommandError( - 'commit', '') + "commit", "" + ) # assert the system exits with code 2 - self.assertRaisesRegex(SystemExit, '2', baseline.main) + self.assertRaisesRegex(SystemExit, "2", baseline.main) def test_main_no_parent_commit(self): # Test that bandit exits when there is no parent commit detected when @@ -145,11 +158,11 @@ def test_main_no_parent_commit(self): repo_directory = self.useFixture(fixtures.TempDir()).path git_repo = git.Repo.init(repo_directory) - git_repo.index.commit('Initial Commit') + git_repo.index.commit("Initial Commit") os.chdir(repo_directory) # assert the system exits with code 2 - self.assertRaisesRegex(SystemExit, '2', baseline.main) + self.assertRaisesRegex(SystemExit, "2", baseline.main) def test_main_subprocess_error(self): # Test that bandit handles a CalledProcessError when attempting to run @@ -157,24 +170,24 @@ def test_main_subprocess_error(self): repo_directory = self.useFixture(fixtures.TempDir()).path git_repo = git.Repo.init(repo_directory) - git_repo.index.commit('Initial Commit') + git_repo.index.commit("Initial Commit") os.chdir(repo_directory) - additional_content = 'additional_file.py' - with open(additional_content, 'wt') as fd: + additional_content = "additional_file.py" + with open(additional_content, "wt") as fd: fd.write(self.temp_file_contents) git_repo.index.add([additional_content]) - git_repo.index.commit('Additional Content') + git_repo.index.commit("Additional Content") - with mock.patch('subprocess.check_output') as mock_check_output: - mock_bandit_cmd = 'bandit_mock -b temp_file.txt' - mock_check_output.side_effect = ( - subprocess.CalledProcessError('3', mock_bandit_cmd) + with mock.patch("subprocess.check_output") as mock_check_output: + mock_bandit_cmd = "bandit_mock -b temp_file.txt" + mock_check_output.side_effect = subprocess.CalledProcessError( + "3", mock_bandit_cmd ) # assert the system exits with code 3 (returned from # CalledProcessError) - self.assertRaisesRegex(SystemExit, '3', baseline.main) + self.assertRaisesRegex(SystemExit, "3", baseline.main) def test_init_logger(self): # Test whether the logger was initialized when calling init_logger @@ -199,17 +212,17 @@ def test_initialize_git_command_failure(self): # Test that bandit does not run when the Git command fails repo_directory = self.useFixture(fixtures.TempDir()).path git_repo = git.Repo.init(repo_directory) - git_repo.index.commit('Initial Commit') + git_repo.index.commit("Initial Commit") os.chdir(repo_directory) - additional_content = 'additional_file.py' - with open(additional_content, 'wt') as fd: + additional_content = "additional_file.py" + with open(additional_content, "wt") as fd: fd.write(self.temp_file_contents) git_repo.index.add([additional_content]) - git_repo.index.commit('Additional Content') + git_repo.index.commit("Additional Content") - with mock.patch('git.Repo') as mock_git_repo: - mock_git_repo.side_effect = git.exc.GitCommandNotFound('clone', '') + with mock.patch("git.Repo") as mock_git_repo: + mock_git_repo.side_effect = git.exc.GitCommandNotFound("clone", "") return_value = baseline.initialize() @@ -221,32 +234,32 @@ def test_initialize_dirty_repo(self): # 'dirty' when calling the initialize method repo_directory = self.useFixture(fixtures.TempDir()).path git_repo = git.Repo.init(repo_directory) - git_repo.index.commit('Initial Commit') + git_repo.index.commit("Initial Commit") os.chdir(repo_directory) # make the git repo 'dirty' - with open('dirty_file.py', 'wt') as fd: + with open("dirty_file.py", "wt") as fd: fd.write(self.temp_file_contents) - git_repo.index.add(['dirty_file.py']) + git_repo.index.add(["dirty_file.py"]) return_value = baseline.initialize() # assert bandit did not run due to dirty repo self.assertEqual((None, None, None), return_value) - @mock.patch('sys.argv', ['bandit', '-f', 'txt', 'test']) + @mock.patch("sys.argv", ["bandit", "-f", "txt", "test"]) def test_initialize_existing_report_file(self): # Test that bandit does not run when the output file exists (and the # provided output format does not match the default format) when # calling the initialize method repo_directory = self.useFixture(fixtures.TempDir()).path git_repo = git.Repo.init(repo_directory) - git_repo.index.commit('Initial Commit') + git_repo.index.commit("Initial Commit") os.chdir(repo_directory) # create an existing version of output report file - existing_report = "{}.{}".format(baseline.report_basename, 'txt') - with open(existing_report, 'wt') as fd: + existing_report = "{}.{}".format(baseline.report_basename, "txt") + with open(existing_report, "wt") as fd: fd.write(self.temp_file_contents) return_value = baseline.initialize() @@ -254,14 +267,15 @@ def test_initialize_existing_report_file(self): # assert bandit did not run due to existing report file self.assertEqual((None, None, None), return_value) - @mock.patch('bandit.cli.baseline.bandit_args', ['-o', - 'bandit_baseline_result']) + @mock.patch( + "bandit.cli.baseline.bandit_args", ["-o", "bandit_baseline_result"] + ) def test_initialize_with_output_argument(self): # Test that bandit does not run when the '-o' (output) argument is # specified repo_directory = self.useFixture(fixtures.TempDir()).path git_repo = git.Repo.init(repo_directory) - git_repo.index.commit('Initial Commit') + git_repo.index.commit("Initial Commit") os.chdir(repo_directory) return_value = baseline.initialize() @@ -274,12 +288,12 @@ def test_initialize_existing_temp_file(self): # when calling the initialize method repo_directory = self.useFixture(fixtures.TempDir()).path git_repo = git.Repo.init(repo_directory) - git_repo.index.commit('Initial Commit') + git_repo.index.commit("Initial Commit") os.chdir(repo_directory) # create an existing version of temporary output file existing_temp_file = baseline.baseline_tmp_file - with open(existing_temp_file, 'wt') as fd: + with open(existing_temp_file, "wt") as fd: fd.write(self.temp_file_contents) return_value = baseline.initialize() diff --git a/tests/unit/cli/test_config_generator.py b/tests/unit/cli/test_config_generator.py index a7946c964..9a2a76f60 100644 --- a/tests/unit/cli/test_config_generator.py +++ b/tests/unit/cli/test_config_generator.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2016 Hewlett-Packard Enterprise # # SPDX-License-Identifier: Apache-2.0 - import importlib import logging from unittest import mock @@ -20,23 +18,22 @@ def gen_config(name): return {"test": "test data"} -@test.takes_config('test') -@test.checks('Str') +@test.takes_config("test") +@test.checks("Str") def _test_plugin(context, conf): pass class BanditConfigGeneratorLoggerTests(testtools.TestCase): - def setUp(self): - super(BanditConfigGeneratorLoggerTests, self).setUp() + super().setUp() self.logger = logging.getLogger(config_generator.__name__) self.original_logger_handlers = self.logger.handlers self.original_logger_level = self.logger.level self.logger.handlers = [] def tearDown(self): - super(BanditConfigGeneratorLoggerTests, self).tearDown() + super().tearDown() self.logger.handlers = self.original_logger_handlers self.logger.level = self.original_logger_level @@ -49,40 +46,41 @@ def test_init_logger(self): class BanditConfigGeneratorTests(testtools.TestCase): - @mock.patch('sys.argv', ['bandit-config-generator']) + @mock.patch("sys.argv", ["bandit-config-generator"]) def test_parse_args_no_defaults(self): # Without arguments, the generator should just show help and exit self.assertRaises(SystemExit, config_generator.parse_args) - @mock.patch('sys.argv', ['bandit-config-generator', '--show-defaults']) + @mock.patch("sys.argv", ["bandit-config-generator", "--show-defaults"]) def test_parse_args_show_defaults(self): # Test that the config generator does show default plugin settings return_value = config_generator.parse_args() self.assertTrue(return_value.show_defaults) - @mock.patch('sys.argv', ['bandit-config-generator', '--out', 'dummyfile']) + @mock.patch("sys.argv", ["bandit-config-generator", "--out", "dummyfile"]) def test_parse_args_out_file(self): # Test config generator get proper output file when specified return_value = config_generator.parse_args() - self.assertEqual('dummyfile', return_value.output_file) + self.assertEqual("dummyfile", return_value.output_file) def test_get_config_settings(self): config = {} for plugin in extension_loader.MANAGER.plugins: function = plugin.plugin - if hasattr(plugin.plugin, '_takes_config'): + if hasattr(plugin.plugin, "_takes_config"): module = importlib.import_module(function.__module__) - config[plugin.name] = module.gen_config( - function._takes_config) + config[plugin.name] = module.gen_config(function._takes_config) settings = config_generator.get_config_settings() - self.assertEqual(yaml.safe_dump(config, default_flow_style=False), - settings) + self.assertEqual( + yaml.safe_dump(config, default_flow_style=False), settings + ) - @mock.patch('sys.argv', ['bandit-config-generator', '--show-defaults']) + @mock.patch("sys.argv", ["bandit-config-generator", "--show-defaults"]) def test_main_show_defaults(self): # Test that the config generator does show defaults and returns 0 - with mock.patch('bandit.cli.config_generator.get_config_settings' - ) as mock_config_settings: + with mock.patch( + "bandit.cli.config_generator.get_config_settings" + ) as mock_config_settings: return_value = config_generator.main() # The get_config_settings function should have been called self.assertTrue(mock_config_settings.called) diff --git a/tests/unit/cli/test_main.py b/tests/unit/cli/test_main.py index ecc0f1fa1..5d0fd7b76 100644 --- a/tests/unit/cli/test_main.py +++ b/tests/unit/cli/test_main.py @@ -1,7 +1,6 @@ # Copyright 2016 IBM Corp. # # SPDX-License-Identifier: Apache-2.0 - import logging import os from unittest import mock @@ -49,16 +48,15 @@ class BanditCLIMainLoggerTests(testtools.TestCase): - def setUp(self): - super(BanditCLIMainLoggerTests, self).setUp() + super().setUp() self.logger = logging.getLogger() self.original_logger_handlers = self.logger.handlers self.original_logger_level = self.logger.level self.logger.handlers = [] def tearDown(self): - super(BanditCLIMainLoggerTests, self).tearDown() + super().tearDown() self.logger.handlers = self.original_logger_handlers self.logger.level = self.original_logger_level @@ -77,13 +75,12 @@ def test_init_logger_debug_mode(self): class BanditCLIMainTests(testtools.TestCase): - def setUp(self): - super(BanditCLIMainTests, self).setUp() + super().setUp() self.current_directory = os.getcwd() def tearDown(self): - super(BanditCLIMainTests, self).tearDown() + super().tearDown() os.chdir(self.current_directory) def test_get_options_from_ini_no_ini_path_no_target(self): @@ -101,24 +98,31 @@ def test_get_options_from_ini_no_ini_path_no_bandit_files(self): # Test that no config options are loaded when no ini path is provided # and the target directory contains no bandit config files (.bandit) target_directory = self.useFixture(fixtures.TempDir()).path - self.assertIsNone(bandit._get_options_from_ini(None, - [target_directory])) + self.assertIsNone( + bandit._get_options_from_ini(None, [target_directory]) + ) def test_get_options_from_ini_no_ini_path_multi_bandit_files(self): # Test that bandit exits when no ini path is provided and the target # directory(s) contain multiple bandit config files (.bandit) target_directory = self.useFixture(fixtures.TempDir()).path - second_config = 'second_config_directory' + second_config = "second_config_directory" os.mkdir(os.path.join(target_directory, second_config)) - bandit_config_one = os.path.join(target_directory, '.bandit') - bandit_config_two = os.path.join(target_directory, second_config, - '.bandit') + bandit_config_one = os.path.join(target_directory, ".bandit") + bandit_config_two = os.path.join( + target_directory, second_config, ".bandit" + ) bandit_files = [bandit_config_one, bandit_config_two] for bandit_file in bandit_files: - with open(bandit_file, 'wt') as fd: + with open(bandit_file, "wt") as fd: fd.write(bandit_config_content) - self.assertRaisesRegex(SystemExit, '2', bandit._get_options_from_ini, - None, [target_directory]) + self.assertRaisesRegex( + SystemExit, + "2", + bandit._get_options_from_ini, + None, + [target_directory], + ) def test_init_extensions(self): # Test that an extension loader manager is returned @@ -127,197 +131,222 @@ def test_init_extensions(self): def test_log_option_source_arg_val(self): # Test that the command argument value is returned when provided # with None or a string default value - arg_val = 'file' - ini_val = 'vuln' - option_name = 'aggregate' - for default_val in (None, 'default'): - self.assertEqual(arg_val, bandit._log_option_source( - default_val, - arg_val, - ini_val, - option_name - )) + arg_val = "file" + ini_val = "vuln" + option_name = "aggregate" + for default_val in (None, "default"): + self.assertEqual( + arg_val, + bandit._log_option_source( + default_val, arg_val, ini_val, option_name + ), + ) def test_log_option_source_ini_value(self): # Test that the ini value is returned when no command argument is # provided default_val = None - ini_val = 'vuln' - option_name = 'aggregate' - self.assertEqual(ini_val, bandit._log_option_source( - default_val, - None, - ini_val, - option_name - )) + ini_val = "vuln" + option_name = "aggregate" + self.assertEqual( + ini_val, + bandit._log_option_source(default_val, None, ini_val, option_name), + ) def test_log_option_source_ini_val_with_str_default_and_no_arg_val(self): # Test that the ini value is returned when no command argument is # provided default_val = "file" - arg_val = 'file' - ini_val = 'vuln' - option_name = 'aggregate' - self.assertEqual(ini_val, bandit._log_option_source( - default_val, - arg_val, - ini_val, - option_name - )) + arg_val = "file" + ini_val = "vuln" + option_name = "aggregate" + self.assertEqual( + ini_val, + bandit._log_option_source( + default_val, arg_val, ini_val, option_name + ), + ) def test_log_option_source_no_values(self): # Test that None is returned when no command argument or ini value are # provided - option_name = 'aggregate' - self.assertIsNone(bandit._log_option_source( - None, - None, - None, - option_name - )) - - @mock.patch('sys.argv', ['bandit', '-c', 'bandit.yaml', 'test']) + option_name = "aggregate" + self.assertIsNone( + bandit._log_option_source(None, None, None, option_name) + ) + + @mock.patch("sys.argv", ["bandit", "-c", "bandit.yaml", "test"]) def test_main_config_unopenable(self): # Test that bandit exits when a config file cannot be opened - with mock.patch('bandit.core.config.__init__') as mock_bandit_config: - mock_bandit_config.side_effect = utils.ConfigError('', '') + with mock.patch("bandit.core.config.__init__") as mock_bandit_config: + mock_bandit_config.side_effect = utils.ConfigError("", "") # assert a SystemExit with code 2 - self.assertRaisesRegex(SystemExit, '2', bandit.main) + self.assertRaisesRegex(SystemExit, "2", bandit.main) - @mock.patch('sys.argv', ['bandit', '-c', 'bandit.yaml', 'test']) + @mock.patch("sys.argv", ["bandit", "-c", "bandit.yaml", "test"]) def test_main_invalid_config(self): # Test that bandit exits when a config file contains invalid YAML # content - with mock.patch('bandit.core.config.BanditConfig.__init__' - ) as mock_bandit_config: - mock_bandit_config.side_effect = utils.ConfigError('', '') + with mock.patch( + "bandit.core.config.BanditConfig.__init__" + ) as mock_bandit_config: + mock_bandit_config.side_effect = utils.ConfigError("", "") # assert a SystemExit with code 2 - self.assertRaisesRegex(SystemExit, '2', bandit.main) + self.assertRaisesRegex(SystemExit, "2", bandit.main) - @mock.patch('sys.argv', ['bandit', '-c', 'bandit.yaml', 'test']) + @mock.patch("sys.argv", ["bandit", "-c", "bandit.yaml", "test"]) def test_main_handle_ini_options(self): # Test that bandit handles cmdline args from a bandit.yaml file temp_directory = self.useFixture(fixtures.TempDir()).path os.chdir(temp_directory) - with open('bandit.yaml', 'wt') as fd: + with open("bandit.yaml", "wt") as fd: fd.write(bandit_config_content) - with mock.patch('bandit.cli.main._get_options_from_ini' - ) as mock_get_opts: - mock_get_opts.return_value = {"exclude": "/tmp", - "skips": "skip_test", - "tests": "some_test"} - - with mock.patch('bandit.cli.main.LOG.error') as err_mock: + with mock.patch( + "bandit.cli.main._get_options_from_ini" + ) as mock_get_opts: + mock_get_opts.return_value = { + "exclude": "/tmp", + "skips": "skip_test", + "tests": "some_test", + } + + with mock.patch("bandit.cli.main.LOG.error") as err_mock: # SystemExit with code 2 when test not found in profile - self.assertRaisesRegex(SystemExit, '2', bandit.main) - self.assertEqual(str(err_mock.call_args[0][0]), - 'Unknown test found in profile: some_test') - - @mock.patch('sys.argv', ['bandit', '-c', 'bandit.yaml', '-t', 'badID', - 'test']) + self.assertRaisesRegex(SystemExit, "2", bandit.main) + self.assertEqual( + str(err_mock.call_args[0][0]), + "Unknown test found in profile: some_test", + ) + + @mock.patch( + "sys.argv", ["bandit", "-c", "bandit.yaml", "-t", "badID", "test"] + ) def test_main_unknown_tests(self): # Test that bandit exits when an invalid test ID is provided temp_directory = self.useFixture(fixtures.TempDir()).path os.chdir(temp_directory) - with open('bandit.yaml', 'wt') as fd: + with open("bandit.yaml", "wt") as fd: fd.write(bandit_config_content) # assert a SystemExit with code 2 - self.assertRaisesRegex(SystemExit, '2', bandit.main) + self.assertRaisesRegex(SystemExit, "2", bandit.main) - @mock.patch('sys.argv', ['bandit', '-c', 'bandit.yaml', '-s', 'badID', - 'test']) + @mock.patch( + "sys.argv", ["bandit", "-c", "bandit.yaml", "-s", "badID", "test"] + ) def test_main_unknown_skip_tests(self): # Test that bandit exits when an invalid test ID is provided to skip temp_directory = self.useFixture(fixtures.TempDir()).path os.chdir(temp_directory) - with open('bandit.yaml', 'wt') as fd: + with open("bandit.yaml", "wt") as fd: fd.write(bandit_config_content) # assert a SystemExit with code 2 - self.assertRaisesRegex(SystemExit, '2', bandit.main) + self.assertRaisesRegex(SystemExit, "2", bandit.main) - @mock.patch('sys.argv', ['bandit', '-c', 'bandit.yaml', '-p', 'bad', - 'test']) + @mock.patch( + "sys.argv", ["bandit", "-c", "bandit.yaml", "-p", "bad", "test"] + ) def test_main_profile_not_found(self): # Test that bandit exits when an invalid profile name is provided temp_directory = self.useFixture(fixtures.TempDir()).path os.chdir(temp_directory) - with open('bandit.yaml', 'wt') as fd: + with open("bandit.yaml", "wt") as fd: fd.write(bandit_config_content) # assert a SystemExit with code 2 - with mock.patch('bandit.cli.main.LOG.error') as err_mock: - self.assertRaisesRegex(SystemExit, '2', bandit.main) + with mock.patch("bandit.cli.main.LOG.error") as err_mock: + self.assertRaisesRegex(SystemExit, "2", bandit.main) self.assertEqual( str(err_mock.call_args[0][0]), - 'Unable to find profile (bad) in config file: bandit.yaml') + "Unable to find profile (bad) in config file: bandit.yaml", + ) - @mock.patch('sys.argv', ['bandit', '-c', 'bandit.yaml', '-b', 'base.json', - 'test']) + @mock.patch( + "sys.argv", ["bandit", "-c", "bandit.yaml", "-b", "base.json", "test"] + ) def test_main_baseline_ioerror(self): # Test that bandit exits when encountering an IOError while reading # baseline data temp_directory = self.useFixture(fixtures.TempDir()).path os.chdir(temp_directory) - with open('bandit.yaml', 'wt') as fd: + with open("bandit.yaml", "wt") as fd: fd.write(bandit_config_content) - with open('base.json', 'wt') as fd: + with open("base.json", "wt") as fd: fd.write(bandit_baseline_content) - with mock.patch('bandit.core.manager.BanditManager.populate_baseline' - ) as mock_mgr_pop_bl: + with mock.patch( + "bandit.core.manager.BanditManager.populate_baseline" + ) as mock_mgr_pop_bl: mock_mgr_pop_bl.side_effect = IOError # assert a SystemExit with code 2 - self.assertRaisesRegex(SystemExit, '2', bandit.main) - - @mock.patch('sys.argv', ['bandit', '-c', 'bandit.yaml', '-b', 'base.json', - '-f', 'csv', 'test']) + self.assertRaisesRegex(SystemExit, "2", bandit.main) + + @mock.patch( + "sys.argv", + [ + "bandit", + "-c", + "bandit.yaml", + "-b", + "base.json", + "-f", + "csv", + "test", + ], + ) def test_main_invalid_output_format(self): # Test that bandit exits when an invalid output format is selected temp_directory = self.useFixture(fixtures.TempDir()).path os.chdir(temp_directory) - with open('bandit.yaml', 'wt') as fd: + with open("bandit.yaml", "wt") as fd: fd.write(bandit_config_content) - with open('base.json', 'wt') as fd: + with open("base.json", "wt") as fd: fd.write(bandit_baseline_content) # assert a SystemExit with code 2 - self.assertRaisesRegex(SystemExit, '2', bandit.main) + self.assertRaisesRegex(SystemExit, "2", bandit.main) - @mock.patch('sys.argv', ['bandit', '-c', 'bandit.yaml', 'test', '-o', - 'output']) + @mock.patch( + "sys.argv", ["bandit", "-c", "bandit.yaml", "test", "-o", "output"] + ) def test_main_exit_with_results(self): # Test that bandit exits when there are results temp_directory = self.useFixture(fixtures.TempDir()).path os.chdir(temp_directory) - with open('bandit.yaml', 'wt') as fd: + with open("bandit.yaml", "wt") as fd: fd.write(bandit_config_content) - with mock.patch('bandit.core.manager.BanditManager.results_count' - ) as mock_mgr_results_ct: + with mock.patch( + "bandit.core.manager.BanditManager.results_count" + ) as mock_mgr_results_ct: mock_mgr_results_ct.return_value = 1 # assert a SystemExit with code 1 - self.assertRaisesRegex(SystemExit, '1', bandit.main) + self.assertRaisesRegex(SystemExit, "1", bandit.main) - @mock.patch('sys.argv', ['bandit', '-c', 'bandit.yaml', 'test', '-o', - 'output']) + @mock.patch( + "sys.argv", ["bandit", "-c", "bandit.yaml", "test", "-o", "output"] + ) def test_main_exit_with_no_results(self): # Test that bandit exits when there are no results temp_directory = self.useFixture(fixtures.TempDir()).path os.chdir(temp_directory) - with open('bandit.yaml', 'wt') as fd: + with open("bandit.yaml", "wt") as fd: fd.write(bandit_config_content) - with mock.patch('bandit.core.manager.BanditManager.results_count' - ) as mock_mgr_results_ct: + with mock.patch( + "bandit.core.manager.BanditManager.results_count" + ) as mock_mgr_results_ct: mock_mgr_results_ct.return_value = 0 # assert a SystemExit with code 0 - self.assertRaisesRegex(SystemExit, '0', bandit.main) + self.assertRaisesRegex(SystemExit, "0", bandit.main) - @mock.patch('sys.argv', ['bandit', '-c', 'bandit.yaml', 'test', '-o', - 'output', '--exit-zero']) + @mock.patch( + "sys.argv", + ["bandit", "-c", "bandit.yaml", "test", "-o", "output", "--exit-zero"], + ) def test_main_exit_with_results_and_with_exit_zero_flag(self): # Test that bandit exits with 0 on results and zero flag temp_directory = self.useFixture(fixtures.TempDir()).path os.chdir(temp_directory) - with open('bandit.yaml', 'wt') as fd: + with open("bandit.yaml", "wt") as fd: fd.write(bandit_config_content) - with mock.patch('bandit.core.manager.BanditManager.results_count' - ) as mock_mgr_results_ct: + with mock.patch( + "bandit.core.manager.BanditManager.results_count" + ) as mock_mgr_results_ct: mock_mgr_results_ct.return_value = 1 - self.assertRaisesRegex(SystemExit, '0', bandit.main) + self.assertRaisesRegex(SystemExit, "0", bandit.main) diff --git a/tests/unit/core/test_blacklisting.py b/tests/unit/core/test_blacklisting.py index 2889fd3ed..4aed78481 100644 --- a/tests/unit/core/test_blacklisting.py +++ b/tests/unit/core/test_blacklisting.py @@ -1,33 +1,31 @@ -# -*- coding:utf-8 -*- # # Copyright 2016 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 +import testtools from bandit.core import blacklisting -import testtools - class BlacklistingTests(testtools.TestCase): def test_report_issue(self): - data = {'level': 'HIGH', 'message': 'test {name}', 'id': 'B000'} + data = {"level": "HIGH", "message": "test {name}", "id": "B000"} - issue = blacklisting.report_issue(data, 'name') + issue = blacklisting.report_issue(data, "name") issue_dict = issue.as_dict(with_code=False) self.assertIsInstance(issue_dict, dict) - self.assertEqual('B000', issue_dict['test_id']) - self.assertEqual('HIGH', issue_dict['issue_severity']) - self.assertEqual('HIGH', issue_dict['issue_confidence']) - self.assertEqual('test name', issue_dict['issue_text']) + self.assertEqual("B000", issue_dict["test_id"]) + self.assertEqual("HIGH", issue_dict["issue_severity"]) + self.assertEqual("HIGH", issue_dict["issue_confidence"]) + self.assertEqual("test name", issue_dict["issue_text"]) def test_report_issue_defaults(self): - data = {'message': 'test {name}'} + data = {"message": "test {name}"} - issue = blacklisting.report_issue(data, 'name') + issue = blacklisting.report_issue(data, "name") issue_dict = issue.as_dict(with_code=False) self.assertIsInstance(issue_dict, dict) - self.assertEqual('LEGACY', issue_dict['test_id']) - self.assertEqual('MEDIUM', issue_dict['issue_severity']) - self.assertEqual('HIGH', issue_dict['issue_confidence']) - self.assertEqual('test name', issue_dict['issue_text']) + self.assertEqual("LEGACY", issue_dict["test_id"]) + self.assertEqual("MEDIUM", issue_dict["issue_severity"]) + self.assertEqual("HIGH", issue_dict["issue_confidence"]) + self.assertEqual("test name", issue_dict["issue_text"]) diff --git a/tests/unit/core/test_config.py b/tests/unit/core/test_config.py index 08a28e321..3684023ab 100644 --- a/tests/unit/core/test_config.py +++ b/tests/unit/core/test_config.py @@ -1,12 +1,11 @@ # Copyright 2015 IBM Corp. # # SPDX-License-Identifier: Apache-2.0 - import os import tempfile import textwrap -from unittest import mock import uuid +from unittest import mock import fixtures import testtools @@ -16,16 +15,17 @@ class TempFile(fixtures.Fixture): - def __init__(self, contents=None, suffix='.yaml'): - super(TempFile, self).__init__() + def __init__(self, contents=None, suffix=".yaml"): + super().__init__() self.contents = contents self.suffix = suffix def setUp(self): - super(TempFile, self).setUp() + super().setUp() - with tempfile.NamedTemporaryFile(suffix=self.suffix, mode='wt', - delete=False) as f: + with tempfile.NamedTemporaryFile( + suffix=self.suffix, mode="wt", delete=False + ) as f: if self.contents: f.write(self.contents) @@ -40,12 +40,12 @@ def test_settings(self): example_key = uuid.uuid4().hex example_value = self.getUniqueString() - contents = '%s: %s' % (example_key, example_value) + contents = f"{example_key}: {example_value}" f = self.useFixture(TempFile(contents)) b_config = config.BanditConfig(f.name) # After initialization, can get settings. - self.assertEqual('*.py', b_config.get_setting('plugin_name_pattern')) + self.assertEqual("*.py", b_config.get_setting("plugin_name_pattern")) self.assertEqual({example_key: example_value}, b_config.config) self.assertEqual(example_value, b_config.get_option(example_key)) @@ -53,33 +53,37 @@ def test_settings(self): def test_file_does_not_exist(self): # When the config file doesn't exist, ConfigFileUnopenable is raised. - cfg_file = os.path.join(os.getcwd(), 'notafile') - self.assertRaisesRegex(utils.ConfigError, cfg_file, - config.BanditConfig, cfg_file) + cfg_file = os.path.join(os.getcwd(), "notafile") + self.assertRaisesRegex( + utils.ConfigError, cfg_file, config.BanditConfig, cfg_file + ) def test_yaml_invalid(self): # When the config yaml file isn't valid, sys.exit(2) is called. # The following is invalid because it starts a sequence and doesn't # end it. - invalid_yaml = '- [ something' + invalid_yaml = "- [ something" f = self.useFixture(TempFile(invalid_yaml)) self.assertRaisesRegex( - utils.ConfigError, f.name, config.BanditConfig, f.name) + utils.ConfigError, f.name, config.BanditConfig, f.name + ) class TestGetOption(testtools.TestCase): def setUp(self): - super(TestGetOption, self).setUp() + super().setUp() self.example_key = uuid.uuid4().hex self.example_subkey = uuid.uuid4().hex self.example_subvalue = uuid.uuid4().hex - sample_yaml = textwrap.dedent(""" + sample_yaml = textwrap.dedent( + """ %s: %s: %s - """ % (self.example_key, self.example_subkey, - self.example_subvalue)) + """ + % (self.example_key, self.example_subkey, self.example_subvalue) + ) f = self.useFixture(TempFile(sample_yaml)) @@ -88,21 +92,22 @@ def setUp(self): def test_levels(self): # get_option with .-separated string. - sample_option_name = '%s.%s' % (self.example_key, self.example_subkey) - self.assertEqual(self.example_subvalue, - self.b_config.get_option(sample_option_name)) + sample_option_name = f"{self.example_key}.{self.example_subkey}" + self.assertEqual( + self.example_subvalue, self.b_config.get_option(sample_option_name) + ) def test_levels_not_exist(self): # get_option when option name doesn't exist returns None. - sample_option_name = '%s.%s' % (uuid.uuid4().hex, uuid.uuid4().hex) + sample_option_name = f"{uuid.uuid4().hex}.{uuid.uuid4().hex}" self.assertIsNone(self.b_config.get_option(sample_option_name)) class TestGetSetting(testtools.TestCase): def setUp(self): - super(TestGetSetting, self).setUp() - test_yaml = 'key: value' + super().setUp() + test_yaml = "key: value" f = self.useFixture(TempFile(test_yaml)) self.b_config = config.BanditConfig(f.name) @@ -114,7 +119,8 @@ def test_not_exist(self): class TestConfigCompat(testtools.TestCase): - sample = textwrap.dedent(""" + sample = textwrap.dedent( + """ profiles: test_1: include: @@ -158,94 +164,112 @@ class TestConfigCompat(testtools.TestCase): imports: [telnetlib] level: HIGH message: "{module} is considered insecure." - """) - suffix = '.yaml' + """ + ) + suffix = ".yaml" def setUp(self): - super(TestConfigCompat, self).setUp() + super().setUp() f = self.useFixture(TempFile(self.sample, suffix=self.suffix)) self.config = config.BanditConfig(f.name) def test_converted_include(self): - profiles = self.config.get_option('profiles') - test = profiles['test_1'] - data = {'blacklist': {}, - 'exclude': set(), - 'include': set(['B101', 'B604'])} + profiles = self.config.get_option("profiles") + test = profiles["test_1"] + data = { + "blacklist": {}, + "exclude": set(), + "include": {"B101", "B604"}, + } self.assertEqual(data, test) def test_converted_exclude(self): - profiles = self.config.get_option('profiles') - test = profiles['test_4'] + profiles = self.config.get_option("profiles") + test = profiles["test_4"] - self.assertEqual(set(['B101']), test['exclude']) + self.assertEqual({"B101"}, test["exclude"]) def test_converted_blacklist_call_data(self): - profiles = self.config.get_option('profiles') - test = profiles['test_2'] - data = {'Call': [{'qualnames': ['telnetlib'], - 'level': 'HIGH', - 'message': '{name} is considered insecure.', - 'name': 'telnet'}]} - - self.assertEqual(data, test['blacklist']) + profiles = self.config.get_option("profiles") + test = profiles["test_2"] + data = { + "Call": [ + { + "qualnames": ["telnetlib"], + "level": "HIGH", + "message": "{name} is considered insecure.", + "name": "telnet", + } + ] + } + + self.assertEqual(data, test["blacklist"]) def test_converted_blacklist_import_data(self): - profiles = self.config.get_option('profiles') - test = profiles['test_3'] - data = [{'message': '{name} library appears to be in use.', - 'name': 'pickle', - 'qualnames': ['pickle.loads']}] + profiles = self.config.get_option("profiles") + test = profiles["test_3"] + data = [ + { + "message": "{name} library appears to be in use.", + "name": "pickle", + "qualnames": ["pickle.loads"], + } + ] - self.assertEqual(data, test['blacklist']['Call']) - self.assertEqual(data, test['blacklist']['Import']) - self.assertEqual(data, test['blacklist']['ImportFrom']) + self.assertEqual(data, test["blacklist"]["Call"]) + self.assertEqual(data, test["blacklist"]["Import"]) + self.assertEqual(data, test["blacklist"]["ImportFrom"]) def test_converted_blacklist_call_test(self): - profiles = self.config.get_option('profiles') - test = profiles['test_2'] + profiles = self.config.get_option("profiles") + test = profiles["test_2"] - self.assertEqual(set(['B001']), test['include']) + self.assertEqual({"B001"}, test["include"]) def test_converted_blacklist_import_test(self): - profiles = self.config.get_option('profiles') - test = profiles['test_3'] + profiles = self.config.get_option("profiles") + test = profiles["test_3"] - self.assertEqual(set(['B001']), test['include']) + self.assertEqual({"B001"}, test["include"]) def test_converted_exclude_blacklist(self): - profiles = self.config.get_option('profiles') - test = profiles['test_5'] + profiles = self.config.get_option("profiles") + test = profiles["test_5"] - self.assertEqual(set(['B001']), test['exclude']) + self.assertEqual({"B001"}, test["exclude"]) def test_deprecation_message(self): - msg = ("Config file '%s' contains deprecated legacy config data. " - "Please consider upgrading to the new config format. The tool " - "'bandit-config-generator' can help you with this. Support for " - "legacy configs will be removed in a future bandit version.") - - with mock.patch('bandit.core.config.LOG.warning') as m: + msg = ( + "Config file '%s' contains deprecated legacy config data. " + "Please consider upgrading to the new config format. The tool " + "'bandit-config-generator' can help you with this. Support for " + "legacy configs will be removed in a future bandit version." + ) + + with mock.patch("bandit.core.config.LOG.warning") as m: self.config._config = {"profiles": {}} - self.config.validate('') - self.assertEqual((msg, ''), m.call_args_list[0][0]) + self.config.validate("") + self.assertEqual((msg, ""), m.call_args_list[0][0]) def test_blacklist_error(self): - msg = (" : Config file has an include or exclude reference to legacy " - "test '%s' but no configuration data for it. Configuration " - "data is required for this test. Please consider switching to " - "the new config file format, the tool " - "'bandit-config-generator' can help you with this.") - - for name in ["blacklist_call", - "blacklist_imports", - "blacklist_imports_func"]: - - self.config._config = ( - {"profiles": {"test": {"include": [name]}}}) + msg = ( + " : Config file has an include or exclude reference to legacy " + "test '%s' but no configuration data for it. Configuration " + "data is required for this test. Please consider switching to " + "the new config file format, the tool " + "'bandit-config-generator' can help you with this." + ) + + for name in [ + "blacklist_call", + "blacklist_imports", + "blacklist_imports_func", + ]: + + self.config._config = {"profiles": {"test": {"include": [name]}}} try: - self.config.validate('') + self.config.validate("") except utils.ConfigError as e: self.assertEqual(msg % name, e.message) @@ -258,7 +282,8 @@ def test_bad_yaml(self): class TestTomlConfig(TestConfigCompat): - sample = textwrap.dedent(""" + sample = textwrap.dedent( + """ [tool.bandit.profiles.test_1] include = [ "any_other_function_with_shell_equals_true", @@ -291,5 +316,6 @@ class TestTomlConfig(TestConfigCompat): imports = ["telnetlib"] level = "HIGH" message = "{module} is considered insecure." - """) - suffix = '.toml' + """ + ) + suffix = ".toml" diff --git a/tests/unit/core/test_context.py b/tests/unit/core/test_context.py index c910c9f34..23b3436da 100644 --- a/tests/unit/core/test_context.py +++ b/tests/unit/core/test_context.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2015 Red Hat, Inc. # # SPDX-License-Identifier: Apache-2.0 - import ast from unittest import mock @@ -13,7 +11,6 @@ class ContextTests(testtools.TestCase): - def test_context_create(self): ref_context = mock.Mock() new_context = context.Context(context_object=ref_context) @@ -23,24 +20,24 @@ def test_context_create(self): self.assertIsInstance(new_context._context, dict) def test_repr(self): - ref_object = dict(spam='eggs') - expected_repr = ''.format(ref_object) + ref_object = dict(spam="eggs") + expected_repr = f"" new_context = context.Context(context_object=ref_object) self.assertEqual(expected_repr, repr(new_context)) - @mock.patch('bandit.core.context.Context._get_literal_value') + @mock.patch("bandit.core.context.Context._get_literal_value") def test_call_args(self, get_literal_value): - get_literal_value.return_value = 'eggs' + get_literal_value.return_value = "eggs" ref_call = mock.Mock() - ref_call.args = [mock.Mock(attr='spam'), 'eggs'] + ref_call.args = [mock.Mock(attr="spam"), "eggs"] ref_context = dict(call=ref_call) new_context = context.Context(context_object=ref_context) - expected_args = ['spam', 'eggs'] + expected_args = ["spam", "eggs"] self.assertListEqual(expected_args, new_context.call_args) def test_call_args_count(self): ref_call = mock.Mock() - ref_call.args = ['spam', 'eggs'] + ref_call.args = ["spam", "eggs"] ref_context = dict(call=ref_call) new_context = context.Context(context_object=ref_context) self.assertEqual(len(ref_call.args), new_context.call_args_count) @@ -53,7 +50,7 @@ def test_call_args_count(self): self.assertIsNone(new_context.call_args_count) def test_call_function_name(self): - expected_string = 'spam' + expected_string = "spam" ref_context = dict(name=expected_string) new_context = context.Context(context_object=ref_context) self.assertEqual(expected_string, new_context.call_function_name) @@ -62,7 +59,7 @@ def test_call_function_name(self): self.assertIsNone(new_context.call_function_name) def test_call_function_name_qual(self): - expected_string = 'spam' + expected_string = "spam" ref_context = dict(qualname=expected_string) new_context = context.Context(context_object=ref_context) self.assertEqual(expected_string, new_context.call_function_name_qual) @@ -70,16 +67,16 @@ def test_call_function_name_qual(self): new_context = context.Context() self.assertIsNone(new_context.call_function_name_qual) - @mock.patch('bandit.core.context.Context._get_literal_value') + @mock.patch("bandit.core.context.Context._get_literal_value") def test_call_keywords(self, get_literal_value): - get_literal_value.return_value = 'eggs' - ref_keyword1 = mock.Mock(arg='arg1', value=mock.Mock(attr='spam')) - ref_keyword2 = mock.Mock(arg='arg2', value='eggs') + get_literal_value.return_value = "eggs" + ref_keyword1 = mock.Mock(arg="arg1", value=mock.Mock(attr="spam")) + ref_keyword2 = mock.Mock(arg="arg2", value="eggs") ref_call = mock.Mock() ref_call.keywords = [ref_keyword1, ref_keyword2] ref_context = dict(call=ref_call) new_context = context.Context(context_object=ref_context) - expected_dict = dict(arg1='spam', arg2='eggs') + expected_dict = dict(arg1="spam", arg2="eggs") self.assertDictEqual(expected_dict, new_context.call_keywords) ref_context = dict(call=None) @@ -90,7 +87,7 @@ def test_call_keywords(self, get_literal_value): self.assertIsNone(new_context.call_keywords) def test_node(self): - expected_node = 'spam' + expected_node = "spam" ref_context = dict(node=expected_node) new_context = context.Context(context_object=ref_context) self.assertEqual(expected_node, new_context.node) @@ -99,7 +96,7 @@ def test_node(self): self.assertIsNone(new_context.node) def test_string_val(self): - expected_string = 'spam' + expected_string = "spam" ref_context = dict(str=expected_string) new_context = context.Context(context_object=ref_context) self.assertEqual(expected_string, new_context.string_val) @@ -108,7 +105,7 @@ def test_string_val(self): self.assertIsNone(new_context.string_val) def test_statement(self): - expected_string = 'spam' + expected_string = "spam" ref_context = dict(statement=expected_string) new_context = context.Context(context_object=ref_context) self.assertEqual(expected_string, new_context.statement) @@ -116,13 +113,13 @@ def test_statement(self): new_context = context.Context() self.assertIsNone(new_context.statement) - @mock.patch('bandit.core.utils.get_qual_attr') + @mock.patch("bandit.core.utils.get_qual_attr") def test_function_def_defaults_qual(self, get_qual_attr): - get_qual_attr.return_value = 'spam' - ref_node = mock.Mock(args=mock.Mock(defaults=['spam'])) + get_qual_attr.return_value = "spam" + ref_node = mock.Mock(args=mock.Mock(defaults=["spam"])) ref_context = dict(node=ref_node, import_aliases=None) new_context = context.Context(context_object=ref_context) - self.assertListEqual(['spam'], new_context.function_def_defaults_qual) + self.assertListEqual(["spam"], new_context.function_def_defaults_qual) ref_node = mock.Mock(args=mock.Mock(defaults=[])) ref_context = dict(node=ref_node, import_aliases=None) @@ -139,77 +136,81 @@ def test__get_literal_value(self): expected = value.n self.assertEqual(expected, new_context._get_literal_value(value)) - value = ast.Str('spam') + value = ast.Str("spam") expected = value.s self.assertEqual(expected, new_context._get_literal_value(value)) - value = ast.List([ast.Str('spam'), ast.Num(42)], ast.Load()) - expected = [ast.Str('spam').s, ast.Num(42).n] + value = ast.List([ast.Str("spam"), ast.Num(42)], ast.Load()) + expected = [ast.Str("spam").s, ast.Num(42).n] self.assertListEqual(expected, new_context._get_literal_value(value)) - value = ast.Tuple([ast.Str('spam'), ast.Num(42)], ast.Load()) - expected = (ast.Str('spam').s, ast.Num(42).n) + value = ast.Tuple([ast.Str("spam"), ast.Num(42)], ast.Load()) + expected = (ast.Str("spam").s, ast.Num(42).n) self.assertTupleEqual(expected, new_context._get_literal_value(value)) - value = ast.Set([ast.Str('spam'), ast.Num(42)]) - expected = set([ast.Str('spam').s, ast.Num(42).n]) + value = ast.Set([ast.Str("spam"), ast.Num(42)]) + expected = {ast.Str("spam").s, ast.Num(42).n} self.assertSetEqual(expected, new_context._get_literal_value(value)) - value = ast.Dict(['spam', 'eggs'], [42, 'foo']) - expected = dict(spam=42, eggs='foo') + value = ast.Dict(["spam", "eggs"], [42, "foo"]) + expected = dict(spam=42, eggs="foo") self.assertDictEqual(expected, new_context._get_literal_value(value)) value = ast.Ellipsis() self.assertIsNone(new_context._get_literal_value(value)) - value = ast.Name('spam', ast.Load()) + value = ast.Name("spam", ast.Load()) expected = value.id self.assertEqual(expected, new_context._get_literal_value(value)) - value = ast.Bytes(b'spam') + value = ast.Bytes(b"spam") expected = value.s self.assertEqual(expected, new_context._get_literal_value(value)) self.assertIsNone(new_context._get_literal_value(None)) - @mock.patch('bandit.core.context.Context.call_keywords', - new_callable=mock.PropertyMock) + @mock.patch( + "bandit.core.context.Context.call_keywords", + new_callable=mock.PropertyMock, + ) def test_check_call_arg_value(self, call_keywords): new_context = context.Context() - call_keywords.return_value = dict(spam='eggs') - self.assertTrue(new_context.check_call_arg_value('spam', 'eggs')) - self.assertTrue(new_context.check_call_arg_value('spam', - ['spam', 'eggs'])) - self.assertFalse(new_context.check_call_arg_value('spam', 'spam')) - self.assertFalse(new_context.check_call_arg_value('spam')) - self.assertFalse(new_context.check_call_arg_value('eggs')) + call_keywords.return_value = dict(spam="eggs") + self.assertTrue(new_context.check_call_arg_value("spam", "eggs")) + self.assertTrue( + new_context.check_call_arg_value("spam", ["spam", "eggs"]) + ) + self.assertFalse(new_context.check_call_arg_value("spam", "spam")) + self.assertFalse(new_context.check_call_arg_value("spam")) + self.assertFalse(new_context.check_call_arg_value("eggs")) new_context = context.Context() self.assertIsNone(new_context.check_call_arg_value(None)) - @mock.patch('bandit.core.context.Context.node', - new_callable=mock.PropertyMock) + @mock.patch( + "bandit.core.context.Context.node", new_callable=mock.PropertyMock + ) def test_get_lineno_for_call_arg(self, node): expected_lineno = 42 - keyword1 = mock.Mock(arg='spam', - value=mock.Mock(lineno=expected_lineno)) + keyword1 = mock.Mock( + arg="spam", value=mock.Mock(lineno=expected_lineno) + ) node.return_value = mock.Mock(keywords=[keyword1]) new_context = context.Context() - actual_lineno = new_context.get_lineno_for_call_arg('spam') + actual_lineno = new_context.get_lineno_for_call_arg("spam") self.assertEqual(expected_lineno, actual_lineno) new_context = context.Context() - missing_lineno = new_context.get_lineno_for_call_arg('eggs') + missing_lineno = new_context.get_lineno_for_call_arg("eggs") self.assertIsNone(missing_lineno) def test_get_call_arg_at_position(self): - expected_arg = 'spam' + expected_arg = "spam" ref_call = mock.Mock() ref_call.args = [ast.Str(expected_arg)] ref_context = dict(call=ref_call) new_context = context.Context(context_object=ref_context) - self.assertEqual(expected_arg, - new_context.get_call_arg_at_position(0)) + self.assertEqual(expected_arg, new_context.get_call_arg_at_position(0)) self.assertIsNone(new_context.get_call_arg_at_position(1)) ref_call = mock.Mock() @@ -222,37 +223,37 @@ def test_get_call_arg_at_position(self): self.assertIsNone(new_context.get_call_arg_at_position(0)) def test_is_module_being_imported(self): - ref_context = dict(module='spam') + ref_context = dict(module="spam") new_context = context.Context(context_object=ref_context) - self.assertTrue(new_context.is_module_being_imported('spam')) - self.assertFalse(new_context.is_module_being_imported('eggs')) + self.assertTrue(new_context.is_module_being_imported("spam")) + self.assertFalse(new_context.is_module_being_imported("eggs")) new_context = context.Context() - self.assertFalse(new_context.is_module_being_imported('spam')) + self.assertFalse(new_context.is_module_being_imported("spam")) def test_is_module_imported_exact(self): - ref_context = dict(imports=['spam']) + ref_context = dict(imports=["spam"]) new_context = context.Context(context_object=ref_context) - self.assertTrue(new_context.is_module_imported_exact('spam')) - self.assertFalse(new_context.is_module_imported_exact('eggs')) + self.assertTrue(new_context.is_module_imported_exact("spam")) + self.assertFalse(new_context.is_module_imported_exact("eggs")) new_context = context.Context() - self.assertFalse(new_context.is_module_being_imported('spam')) + self.assertFalse(new_context.is_module_being_imported("spam")) def test_is_module_imported_like(self): - ref_context = dict(imports=[['spam'], ['eggs']]) + ref_context = dict(imports=[["spam"], ["eggs"]]) new_context = context.Context(context_object=ref_context) - self.assertTrue(new_context.is_module_imported_like('spam')) - self.assertFalse(new_context.is_module_imported_like('bacon')) + self.assertTrue(new_context.is_module_imported_like("spam")) + self.assertFalse(new_context.is_module_imported_like("bacon")) new_context = context.Context() - self.assertFalse(new_context.is_module_imported_like('spam')) + self.assertFalse(new_context.is_module_imported_like("spam")) def test_filename(self): - ref_context = dict(filename='spam.py') + ref_context = dict(filename="spam.py") new_context = context.Context(context_object=ref_context) - self.assertEqual(new_context.filename, 'spam.py') + self.assertEqual(new_context.filename, "spam.py") new_context = context.Context() self.assertIsNone(new_context.filename) diff --git a/tests/unit/core/test_docs_util.py b/tests/unit/core/test_docs_util.py index a71fb8834..d1b8fc6a0 100644 --- a/tests/unit/core/test_docs_util.py +++ b/tests/unit/core/test_docs_util.py @@ -1,7 +1,6 @@ # Copyright 2019 Victor Torre # # SPDX-License-Identifier: Apache-2.0 - import testtools from bandit.core.docs_utils import BASE_URL @@ -9,19 +8,21 @@ class DocsUtilTests(testtools.TestCase): - '''This set of tests exercises bandit.core.docs_util functions.''' + """This set of tests exercises bandit.core.docs_util functions.""" def test_overwrite_bib_info(self): - expected_url = BASE_URL + ("blacklists/blacklist_calls.html" - "#b304-b305-ciphers-and-modes") - self.assertEqual(get_url('B304'), get_url('B305')) - self.assertEqual(expected_url, get_url('B304')) + expected_url = BASE_URL + ( + "blacklists/blacklist_calls.html" "#b304-b305-ciphers-and-modes" + ) + self.assertEqual(get_url("B304"), get_url("B305")) + self.assertEqual(expected_url, get_url("B304")) def test_plugin_call_bib(self): expected_url = BASE_URL + "plugins/b101_assert_used.html" - self.assertEqual(expected_url, get_url('B101')) + self.assertEqual(expected_url, get_url("B101")) def test_import_call_bib(self): - expected_url = BASE_URL + ("blacklists/blacklist_imports.html" - "#b413-import-pycrypto") - self.assertEqual(expected_url, get_url('B413')) + expected_url = BASE_URL + ( + "blacklists/blacklist_imports.html" "#b413-import-pycrypto" + ) + self.assertEqual(expected_url, get_url("B413")) diff --git a/tests/unit/core/test_issue.py b/tests/unit/core/test_issue.py index da520f55c..66853cf0c 100644 --- a/tests/unit/core/test_issue.py +++ b/tests/unit/core/test_issue.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2015 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - from unittest import mock import testtools @@ -14,7 +12,6 @@ class IssueTests(testtools.TestCase): - def test_issue_create(self): new_issue = _get_issue_instance() self.assertIsInstance(new_issue, issue.Issue) @@ -22,23 +19,25 @@ def test_issue_create(self): def test_issue_str(self): test_issue = _get_issue_instance() self.assertEqual( - ("Issue: 'Test issue' from B999:bandit_plugin: Severity: MEDIUM " - "Confidence: MEDIUM at code.py:1"), - str(test_issue) + ( + "Issue: 'Test issue' from B999:bandit_plugin: Severity: MEDIUM" + " Confidence: MEDIUM at code.py:1" + ), + str(test_issue), ) def test_issue_as_dict(self): test_issue = _get_issue_instance() test_issue_dict = test_issue.as_dict(with_code=False) self.assertIsInstance(test_issue_dict, dict) - self.assertEqual('code.py', test_issue_dict['filename']) - self.assertEqual('bandit_plugin', test_issue_dict['test_name']) - self.assertEqual('B999', test_issue_dict['test_id']) - self.assertEqual('MEDIUM', test_issue_dict['issue_severity']) - self.assertEqual('MEDIUM', test_issue_dict['issue_confidence']) - self.assertEqual('Test issue', test_issue_dict['issue_text']) - self.assertEqual(1, test_issue_dict['line_number']) - self.assertEqual([], test_issue_dict['line_range']) + self.assertEqual("code.py", test_issue_dict["filename"]) + self.assertEqual("bandit_plugin", test_issue_dict["test_name"]) + self.assertEqual("B999", test_issue_dict["test_id"]) + self.assertEqual("MEDIUM", test_issue_dict["issue_severity"]) + self.assertEqual("MEDIUM", test_issue_dict["issue_confidence"]) + self.assertEqual("Test issue", test_issue_dict["issue_text"]) + self.assertEqual(1, test_issue_dict["line_number"]) + self.assertEqual([], test_issue_dict["line_range"]) def test_issue_filter_severity(self): levels = [bandit.LOW, bandit.MEDIUM, bandit.HIGH] @@ -70,15 +69,15 @@ def test_matches_issue(self): issue_c = _get_issue_instance(confidence=bandit.LOW) issue_d = _get_issue_instance() - issue_d.text = 'ABCD' + issue_d.text = "ABCD" issue_e = _get_issue_instance() - issue_e.fname = 'file1.py' + issue_e.fname = "file1.py" issue_f = issue_a issue_g = _get_issue_instance() - issue_g.test = 'ZZZZ' + issue_g.test = "ZZZZ" issue_h = issue_a issue_h.lineno = 12345 @@ -106,21 +105,21 @@ def test_matches_issue(self): # line number doesn't match but should pass because we don't test that self.assertEqual(issue_a, issue_h) - @mock.patch('linecache.getline') + @mock.patch("linecache.getline") def test_get_code(self, getline): - getline.return_value = b'\x08\x30' + getline.return_value = b"\x08\x30" new_issue = issue.Issue(bandit.MEDIUM, lineno=1) try: new_issue.get_code() except UnicodeDecodeError: - self.fail('Bytes not properly decoded in issue.get_code()') + self.fail("Bytes not properly decoded in issue.get_code()") def _get_issue_instance(severity=bandit.MEDIUM, confidence=bandit.MEDIUM): - new_issue = issue.Issue(severity, confidence, 'Test issue') - new_issue.fname = 'code.py' - new_issue.test = 'bandit_plugin' - new_issue.test_id = 'B999' + new_issue = issue.Issue(severity, confidence, "Test issue") + new_issue.fname = "code.py" + new_issue.test = "bandit_plugin" + new_issue.test_id = "B999" new_issue.lineno = 1 return new_issue diff --git a/tests/unit/core/test_manager.py b/tests/unit/core/test_manager.py index c5a3b9a37..616e6cc9c 100644 --- a/tests/unit/core/test_manager.py +++ b/tests/unit/core/test_manager.py @@ -1,9 +1,7 @@ -# -*- coding:utf-8 -*- # # Copyright 2015 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - import os from unittest import mock @@ -17,71 +15,92 @@ class ManagerTests(testtools.TestCase): - def _get_issue_instance(self, sev=constants.MEDIUM, conf=constants.MEDIUM): - new_issue = issue.Issue(sev, conf, 'Test issue') - new_issue.fname = 'code.py' - new_issue.test = 'bandit_plugin' + new_issue = issue.Issue(sev, conf, "Test issue") + new_issue.fname = "code.py" + new_issue.test = "bandit_plugin" new_issue.lineno = 1 return new_issue def setUp(self): - super(ManagerTests, self).setUp() + super().setUp() self.profile = {} - self.profile['include'] = { - 'any_other_function_with_shell_equals_true', - 'assert_used'} + self.profile["include"] = { + "any_other_function_with_shell_equals_true", + "assert_used", + } self.config = config.BanditConfig() - self.manager = manager.BanditManager(config=self.config, - agg_type='file', - debug=False, - verbose=False) + self.manager = manager.BanditManager( + config=self.config, agg_type="file", debug=False, verbose=False + ) def test_create_manager(self): # make sure we can create a manager self.assertEqual(False, self.manager.debug) self.assertEqual(False, self.manager.verbose) - self.assertEqual('file', self.manager.agg_type) + self.assertEqual("file", self.manager.agg_type) def test_create_manager_with_profile(self): # make sure we can create a manager - m = manager.BanditManager(config=self.config, agg_type='file', - debug=False, verbose=False, - profile=self.profile) + m = manager.BanditManager( + config=self.config, + agg_type="file", + debug=False, + verbose=False, + profile=self.profile, + ) self.assertEqual(False, m.debug) self.assertEqual(False, m.verbose) - self.assertEqual('file', m.agg_type) + self.assertEqual("file", m.agg_type) def test_matches_globlist(self): - self.assertTrue(manager._matches_glob_list('test', ['*tes*'])) - self.assertFalse(manager._matches_glob_list('test', ['*fes*'])) + self.assertTrue(manager._matches_glob_list("test", ["*tes*"])) + self.assertFalse(manager._matches_glob_list("test", ["*fes*"])) def test_is_file_included(self): - a = manager._is_file_included(path='a.py', included_globs=['*.py'], - excluded_path_strings=[], - enforce_glob=True) - - b = manager._is_file_included(path='a.dd', included_globs=['*.py'], - excluded_path_strings=[], - enforce_glob=False) - - c = manager._is_file_included(path='a.py', included_globs=['*.py'], - excluded_path_strings=['a.py'], - enforce_glob=True) - - d = manager._is_file_included(path='a.dd', included_globs=['*.py'], - excluded_path_strings=[], - enforce_glob=True) - - e = manager._is_file_included(path='x_a.py', included_globs=['*.py'], - excluded_path_strings=['x_*.py'], - enforce_glob=True) - - f = manager._is_file_included(path='x.py', included_globs=['*.py'], - excluded_path_strings=['x_*.py'], - enforce_glob=True) + a = manager._is_file_included( + path="a.py", + included_globs=["*.py"], + excluded_path_strings=[], + enforce_glob=True, + ) + + b = manager._is_file_included( + path="a.dd", + included_globs=["*.py"], + excluded_path_strings=[], + enforce_glob=False, + ) + + c = manager._is_file_included( + path="a.py", + included_globs=["*.py"], + excluded_path_strings=["a.py"], + enforce_glob=True, + ) + + d = manager._is_file_included( + path="a.dd", + included_globs=["*.py"], + excluded_path_strings=[], + enforce_glob=True, + ) + + e = manager._is_file_included( + path="x_a.py", + included_globs=["*.py"], + excluded_path_strings=["x_*.py"], + enforce_glob=True, + ) + + f = manager._is_file_included( + path="x.py", + included_globs=["*.py"], + excluded_path_strings=["x_*.py"], + enforce_glob=True, + ) self.assertTrue(a) self.assertTrue(b) self.assertFalse(c) @@ -89,19 +108,19 @@ def test_is_file_included(self): self.assertFalse(e) self.assertTrue(f) - @mock.patch('os.walk') + @mock.patch("os.walk") def test_get_files_from_dir(self, os_walk): os_walk.return_value = [ - ('/', ('a'), ()), - ('/a', (), ('a.py', 'b.py', 'c.ww')) + ("/", ("a"), ()), + ("/a", (), ("a.py", "b.py", "c.ww")), ] - inc, exc = manager._get_files_from_dir(files_dir='', - included_globs=['*.py'], - excluded_path_strings=None) + inc, exc = manager._get_files_from_dir( + files_dir="", included_globs=["*.py"], excluded_path_strings=None + ) - self.assertEqual(set(['/a/c.ww']), exc) - self.assertEqual(set(['/a/a.py', '/a/b.py']), inc) + self.assertEqual({"/a/c.ww"}, exc) + self.assertEqual({"/a/a.py", "/a/b.py"}, inc) def test_populate_baseline_success(self): # Test populate_baseline with valid JSON @@ -121,16 +140,22 @@ def test_populate_baseline_success(self): ] } """ - issue_dictionary = {"code": "test code", "filename": "example_file.py", - "issue_severity": "low", "issue_confidence": "low", - "issue_text": "test issue", "test_name": - "some_test", "test_id": "x", "line_number": "n", - "line_range": "n-m"} + issue_dictionary = { + "code": "test code", + "filename": "example_file.py", + "issue_severity": "low", + "issue_confidence": "low", + "issue_text": "test issue", + "test_name": "some_test", + "test_id": "x", + "line_number": "n", + "line_range": "n-m", + } baseline_items = [issue.issue_from_dict(issue_dictionary)] self.manager.populate_baseline(baseline_data) self.assertEqual(baseline_items, self.manager.baseline) - @mock.patch('logging.Logger.warning') + @mock.patch("logging.Logger.warning") def test_populate_baseline_invalid_json(self, mock_logger_warning): # Test populate_baseline with invalid JSON content baseline_data = """{"data": "bad"}""" @@ -141,12 +166,14 @@ def test_populate_baseline_invalid_json(self, mock_logger_warning): def test_results_count(self): levels = [constants.LOW, constants.MEDIUM, constants.HIGH] - self.manager.results = ( - [issue.Issue(severity=level, confidence=level) - for level in levels]) + self.manager.results = [ + issue.Issue(severity=level, confidence=level) for level in levels + ] - r = [self.manager.results_count(sev_filter=level, conf_filter=level) - for level in levels] + r = [ + self.manager.results_count(sev_filter=level, conf_filter=level) + for level in levels + ] self.assertEqual([3, 2, 1], r) @@ -158,9 +185,10 @@ def test_output_results_invalid_format(self): conf_level = constants.LOW output_filename = os.path.join(temp_directory, "_temp_output") output_format = "invalid" - with open(output_filename, 'w') as tmp_file: - self.manager.output_results(lines, sev_level, conf_level, - tmp_file, output_format) + with open(output_filename, "w") as tmp_file: + self.manager.output_results( + lines, sev_level, conf_level, tmp_file, output_format + ) self.assertTrue(os.path.isfile(output_filename)) def test_output_results_valid_format(self): @@ -171,107 +199,112 @@ def test_output_results_valid_format(self): conf_level = constants.LOW output_filename = os.path.join(temp_directory, "_temp_output.txt") output_format = "txt" - with open(output_filename, 'w') as tmp_file: - self.manager.output_results(lines, sev_level, conf_level, - tmp_file, output_format) + with open(output_filename, "w") as tmp_file: + self.manager.output_results( + lines, sev_level, conf_level, tmp_file, output_format + ) self.assertTrue(os.path.isfile(output_filename)) - @mock.patch('os.path.isdir') + @mock.patch("os.path.isdir") def test_discover_files_recurse_skip(self, isdir): isdir.return_value = True - self.manager.discover_files(['thing'], False) + self.manager.discover_files(["thing"], False) self.assertEqual([], self.manager.files_list) self.assertEqual([], self.manager.excluded_files) - @mock.patch('os.path.isdir') + @mock.patch("os.path.isdir") def test_discover_files_recurse_files(self, isdir): isdir.return_value = True - with mock.patch.object(manager, '_get_files_from_dir') as m: - m.return_value = (set(['files']), set(['excluded'])) - self.manager.discover_files(['thing'], True) - self.assertEqual(['files'], self.manager.files_list) - self.assertEqual(['excluded'], self.manager.excluded_files) + with mock.patch.object(manager, "_get_files_from_dir") as m: + m.return_value = ({"files"}, {"excluded"}) + self.manager.discover_files(["thing"], True) + self.assertEqual(["files"], self.manager.files_list) + self.assertEqual(["excluded"], self.manager.excluded_files) - @mock.patch('os.path.isdir') + @mock.patch("os.path.isdir") def test_discover_files_exclude(self, isdir): isdir.return_value = False - with mock.patch.object(manager, '_is_file_included') as m: + with mock.patch.object(manager, "_is_file_included") as m: m.return_value = False - self.manager.discover_files(['thing'], True) + self.manager.discover_files(["thing"], True) self.assertEqual([], self.manager.files_list) - self.assertEqual(['thing'], self.manager.excluded_files) + self.assertEqual(["thing"], self.manager.excluded_files) - @mock.patch('os.path.isdir') + @mock.patch("os.path.isdir") def test_discover_files_exclude_dir(self, isdir): isdir.return_value = False # Test exclude dir using wildcard - self.manager.discover_files(['./x/y.py'], True, './x/*') + self.manager.discover_files(["./x/y.py"], True, "./x/*") self.assertEqual([], self.manager.files_list) - self.assertEqual(['./x/y.py'], self.manager.excluded_files) + self.assertEqual(["./x/y.py"], self.manager.excluded_files) # Test exclude dir without wildcard isdir.side_effect = [True, False] - self.manager.discover_files(['./x/y.py'], True, './x/') + self.manager.discover_files(["./x/y.py"], True, "./x/") self.assertEqual([], self.manager.files_list) - self.assertEqual(['./x/y.py'], self.manager.excluded_files) + self.assertEqual(["./x/y.py"], self.manager.excluded_files) # Test exclude dir without wildcard or trailing slash isdir.side_effect = [True, False] - self.manager.discover_files(['./x/y.py'], True, './x') + self.manager.discover_files(["./x/y.py"], True, "./x") self.assertEqual([], self.manager.files_list) - self.assertEqual(['./x/y.py'], self.manager.excluded_files) + self.assertEqual(["./x/y.py"], self.manager.excluded_files) # Test exclude dir without prefix or suffix isdir.side_effect = [False, False] - self.manager.discover_files(['./x/y/z.py'], True, 'y') + self.manager.discover_files(["./x/y/z.py"], True, "y") self.assertEqual([], self.manager.files_list) - self.assertEqual(['./x/y/z.py'], self.manager.excluded_files) + self.assertEqual(["./x/y/z.py"], self.manager.excluded_files) - @mock.patch('os.path.isdir') + @mock.patch("os.path.isdir") def test_discover_files_exclude_cmdline(self, isdir): isdir.return_value = False - with mock.patch.object(manager, '_is_file_included') as m: - self.manager.discover_files(['a', 'b', 'c'], True, - excluded_paths='a,b') - m.assert_called_with('c', ['*.py', '*.pyw'], ['a', 'b'], - enforce_glob=False) - - @mock.patch('os.path.isdir') + with mock.patch.object(manager, "_is_file_included") as m: + self.manager.discover_files( + ["a", "b", "c"], True, excluded_paths="a,b" + ) + m.assert_called_with( + "c", ["*.py", "*.pyw"], ["a", "b"], enforce_glob=False + ) + + @mock.patch("os.path.isdir") def test_discover_files_exclude_glob(self, isdir): isdir.return_value = False - self.manager.discover_files(['a.py', 'test_a.py', 'test.py'], True, - excluded_paths='test_*.py') - self.assertEqual(['a.py', 'test.py'], self.manager.files_list) - self.assertEqual(['test_a.py'], self.manager.excluded_files) + self.manager.discover_files( + ["a.py", "test_a.py", "test.py"], True, excluded_paths="test_*.py" + ) + self.assertEqual(["a.py", "test.py"], self.manager.files_list) + self.assertEqual(["test_a.py"], self.manager.excluded_files) - @mock.patch('os.path.isdir') + @mock.patch("os.path.isdir") def test_discover_files_include(self, isdir): isdir.return_value = False - with mock.patch.object(manager, '_is_file_included') as m: + with mock.patch.object(manager, "_is_file_included") as m: m.return_value = True - self.manager.discover_files(['thing'], True) - self.assertEqual(['thing'], self.manager.files_list) + self.manager.discover_files(["thing"], True) + self.assertEqual(["thing"], self.manager.files_list) self.assertEqual([], self.manager.excluded_files) def test_run_tests_keyboardinterrupt(self): # Test that bandit manager exits when there is a keyboard interrupt temp_directory = self.useFixture(fixtures.TempDir()).path - some_file = os.path.join(temp_directory, 'some_code_file.py') - with open(some_file, 'wt') as fd: - fd.write('some_code = x + 1') + some_file = os.path.join(temp_directory, "some_code_file.py") + with open(some_file, "wt") as fd: + fd.write("some_code = x + 1") self.manager.files_list = [some_file] - with mock.patch('bandit.core.metrics.Metrics.count_issues' - ) as mock_count_issues: + with mock.patch( + "bandit.core.metrics.Metrics.count_issues" + ) as mock_count_issues: mock_count_issues.side_effect = KeyboardInterrupt # assert a SystemExit with code 2 - self.assertRaisesRegex(SystemExit, '2', self.manager.run_tests) + self.assertRaisesRegex(SystemExit, "2", self.manager.run_tests) def test_run_tests_ioerror(self): # Test that a file name is skipped and added to the manager.skipped # list when there is an IOError attempting to open/read the file temp_directory = self.useFixture(fixtures.TempDir()).path - no_such_file = os.path.join(temp_directory, 'no_such_file.py') + no_such_file = os.path.join(temp_directory, "no_such_file.py") self.manager.files_list = [no_such_file] self.manager.run_tests() # since the file name and the IOError.strerror text are added to @@ -281,55 +314,67 @@ def test_run_tests_ioerror(self): def test_compare_baseline(self): issue_a = self._get_issue_instance() - issue_a.fname = 'file1.py' + issue_a.fname = "file1.py" issue_b = self._get_issue_instance() - issue_b.fname = 'file2.py' + issue_b.fname = "file2.py" issue_c = self._get_issue_instance(sev=constants.HIGH) - issue_c.fname = 'file1.py' + issue_c.fname = "file1.py" # issue c is in results, not in baseline self.assertEqual( [issue_c], - manager._compare_baseline_results([issue_a, issue_b], - [issue_a, issue_b, issue_c])) + manager._compare_baseline_results( + [issue_a, issue_b], [issue_a, issue_b, issue_c] + ), + ) # baseline and results are the same self.assertEqual( [], - manager._compare_baseline_results([issue_a, issue_b, issue_c], - [issue_a, issue_b, issue_c])) + manager._compare_baseline_results( + [issue_a, issue_b, issue_c], [issue_a, issue_b, issue_c] + ), + ) # results are better than baseline self.assertEqual( [], - manager._compare_baseline_results([issue_a, issue_b, issue_c], - [issue_a, issue_b])) + manager._compare_baseline_results( + [issue_a, issue_b, issue_c], [issue_a, issue_b] + ), + ) def test_find_candidate_matches(self): issue_a = self._get_issue_instance() issue_b = self._get_issue_instance() issue_c = self._get_issue_instance() - issue_c.fname = 'file1.py' + issue_c.fname = "file1.py" # issue a and b are the same, both should be returned as candidates - self.assertEqual({issue_a: [issue_a, issue_b]}, - manager._find_candidate_matches([issue_a], - [issue_a, issue_b])) + self.assertEqual( + {issue_a: [issue_a, issue_b]}, + manager._find_candidate_matches([issue_a], [issue_a, issue_b]), + ) # issue a and c are different, only a should be returned - self.assertEqual({issue_a: [issue_a]}, - manager._find_candidate_matches([issue_a], - [issue_a, issue_c])) + self.assertEqual( + {issue_a: [issue_a]}, + manager._find_candidate_matches([issue_a], [issue_a, issue_c]), + ) # c doesn't match a, empty list should be returned - self.assertEqual({issue_a: []}, - manager._find_candidate_matches([issue_a], [issue_c])) + self.assertEqual( + {issue_a: []}, + manager._find_candidate_matches([issue_a], [issue_c]), + ) # a and b match, a and b should both return a and b candidates self.assertEqual( {issue_a: [issue_a, issue_b], issue_b: [issue_a, issue_b]}, - manager._find_candidate_matches([issue_a, issue_b], - [issue_a, issue_b, issue_c])) + manager._find_candidate_matches( + [issue_a, issue_b], [issue_a, issue_b, issue_c] + ), + ) diff --git a/tests/unit/core/test_meta_ast.py b/tests/unit/core/test_meta_ast.py index 6ef61b99a..10f4b1d90 100644 --- a/tests/unit/core/test_meta_ast.py +++ b/tests/unit/core/test_meta_ast.py @@ -1,30 +1,30 @@ # Copyright (c) 2015 VMware, Inc. # # SPDX-License-Identifier: Apache-2.0 - import testtools from bandit.core import meta_ast class BanditMetaAstTests(testtools.TestCase): - def setUp(self): - super(BanditMetaAstTests, self).setUp() + super().setUp() self.b_meta_ast = meta_ast.BanditMetaAst() - self.node = 'fake_node' - self.parent_id = 'fake_parent_id' + self.node = "fake_node" + self.parent_id = "fake_parent_id" self.depth = 1 self.b_meta_ast.add_node(self.node, self.parent_id, self.depth) self.node_id = hex(id(self.node)) def test_add_node(self): - expected = {'raw': self.node, - 'parent_id': self.parent_id, - 'depth': self.depth} + expected = { + "raw": self.node, + "parent_id": self.parent_id, + "depth": self.depth, + } self.assertEqual(expected, self.b_meta_ast.nodes[self.node_id]) def test_str(self): node = self.b_meta_ast.nodes[self.node_id] - expected = 'Node: %s\n\t%s\nLength: 1\n' % (self.node_id, node) + expected = f"Node: {self.node_id}\n\t{node}\nLength: 1\n" self.assertEqual(expected, str(self.b_meta_ast)) diff --git a/tests/unit/core/test_test_set.py b/tests/unit/core/test_test_set.py index 861d099a4..117de71ef 100644 --- a/tests/unit/core/test_test_set.py +++ b/tests/unit/core/test_test_set.py @@ -1,13 +1,11 @@ -# -*- coding:utf-8 -*- # # Copyright (c) 2016 Hewlett-Packard Development Company, L.P. # # SPDX-License-Identifier: Apache-2.0 - from unittest import mock -from stevedore import extension import testtools +from stevedore import extension from bandit.blacklists import utils from bandit.core import extension_loader @@ -15,34 +13,43 @@ from bandit.core import test_set -@test.checks('Str') -@test.test_id('B000') +@test.checks("Str") +@test.test_id("B000") def test_plugin(): sets = [] - sets.append(utils.build_conf_dict( - 'telnet', 'B401', ['telnetlib'], - 'A telnet-related module is being imported. Telnet is ' - 'considered insecure. Use SSH or some other encrypted protocol.', - 'HIGH' - )) - - sets.append(utils.build_conf_dict( - 'marshal', 'B302', ['marshal.load', 'marshal.loads'], - 'Deserialization with the marshal module is possibly dangerous.' - )) - - return {'Import': sets, 'ImportFrom': sets, 'Call': sets} + sets.append( + utils.build_conf_dict( + "telnet", + "B401", + ["telnetlib"], + "A telnet-related module is being imported. Telnet is " + "considered insecure. Use SSH or some other encrypted protocol.", + "HIGH", + ) + ) + + sets.append( + utils.build_conf_dict( + "marshal", + "B302", + ["marshal.load", "marshal.loads"], + "Deserialization with the marshal module is possibly dangerous.", + ) + ) + + return {"Import": sets, "ImportFrom": sets, "Call": sets} class BanditTestSetTests(testtools.TestCase): def _make_test_manager(self, plugin): return extension.ExtensionManager.make_test_instance( - [extension.Extension('test_plugin', None, test_plugin, None)]) + [extension.Extension("test_plugin", None, test_plugin, None)] + ) def setUp(self): - super(BanditTestSetTests, self).setUp() + super().setUp() mngr = self._make_test_manager(mock.Mock) - self.patchExtMan = mock.patch('stevedore.extension.ExtensionManager') + self.patchExtMan = mock.patch("stevedore.extension.ExtensionManager") self.mockExtMan = self.patchExtMan.start() self.mockExtMan.return_value = mngr self.old_ext_man = extension_loader.MANAGER @@ -52,100 +59,107 @@ def setUp(self): def tearDown(self): self.patchExtMan.stop() - super(BanditTestSetTests, self).tearDown() + super().tearDown() extension_loader.MANAGER = self.old_ext_man def test_has_defaults(self): ts = test_set.BanditTestSet(self.config) - self.assertEqual(1, len(ts.get_tests('Str'))) + self.assertEqual(1, len(ts.get_tests("Str"))) def test_profile_include_id(self): - profile = {'include': ['B000']} + profile = {"include": ["B000"]} ts = test_set.BanditTestSet(self.config, profile) - self.assertEqual(1, len(ts.get_tests('Str'))) + self.assertEqual(1, len(ts.get_tests("Str"))) def test_profile_exclude_id(self): - profile = {'exclude': ['B000']} + profile = {"exclude": ["B000"]} ts = test_set.BanditTestSet(self.config, profile) - self.assertEqual(0, len(ts.get_tests('Str'))) + self.assertEqual(0, len(ts.get_tests("Str"))) def test_profile_include_none(self): - profile = {'include': []} # same as no include + profile = {"include": []} # same as no include ts = test_set.BanditTestSet(self.config, profile) - self.assertEqual(1, len(ts.get_tests('Str'))) + self.assertEqual(1, len(ts.get_tests("Str"))) def test_profile_exclude_none(self): - profile = {'exclude': []} # same as no exclude + profile = {"exclude": []} # same as no exclude ts = test_set.BanditTestSet(self.config, profile) - self.assertEqual(1, len(ts.get_tests('Str'))) + self.assertEqual(1, len(ts.get_tests("Str"))) def test_profile_has_builtin_blacklist(self): ts = test_set.BanditTestSet(self.config) - self.assertEqual(1, len(ts.get_tests('Import'))) - self.assertEqual(1, len(ts.get_tests('ImportFrom'))) - self.assertEqual(1, len(ts.get_tests('Call'))) + self.assertEqual(1, len(ts.get_tests("Import"))) + self.assertEqual(1, len(ts.get_tests("ImportFrom"))) + self.assertEqual(1, len(ts.get_tests("Call"))) def test_profile_exclude_builtin_blacklist(self): - profile = {'exclude': ['B001']} + profile = {"exclude": ["B001"]} ts = test_set.BanditTestSet(self.config, profile) - self.assertEqual(0, len(ts.get_tests('Import'))) - self.assertEqual(0, len(ts.get_tests('ImportFrom'))) - self.assertEqual(0, len(ts.get_tests('Call'))) + self.assertEqual(0, len(ts.get_tests("Import"))) + self.assertEqual(0, len(ts.get_tests("ImportFrom"))) + self.assertEqual(0, len(ts.get_tests("Call"))) def test_profile_exclude_builtin_blacklist_specific(self): - profile = {'exclude': ['B302', 'B401']} + profile = {"exclude": ["B302", "B401"]} ts = test_set.BanditTestSet(self.config, profile) - self.assertEqual(0, len(ts.get_tests('Import'))) - self.assertEqual(0, len(ts.get_tests('ImportFrom'))) - self.assertEqual(0, len(ts.get_tests('Call'))) + self.assertEqual(0, len(ts.get_tests("Import"))) + self.assertEqual(0, len(ts.get_tests("ImportFrom"))) + self.assertEqual(0, len(ts.get_tests("Call"))) def test_profile_filter_blacklist_none(self): ts = test_set.BanditTestSet(self.config) - blacklist = ts.get_tests('Import')[0] + blacklist = ts.get_tests("Import")[0] - self.assertEqual(2, len(blacklist._config['Import'])) - self.assertEqual(2, len(blacklist._config['ImportFrom'])) - self.assertEqual(2, len(blacklist._config['Call'])) + self.assertEqual(2, len(blacklist._config["Import"])) + self.assertEqual(2, len(blacklist._config["ImportFrom"])) + self.assertEqual(2, len(blacklist._config["Call"])) def test_profile_filter_blacklist_one(self): - profile = {'exclude': ['B401']} + profile = {"exclude": ["B401"]} ts = test_set.BanditTestSet(self.config, profile) - blacklist = ts.get_tests('Import')[0] + blacklist = ts.get_tests("Import")[0] - self.assertEqual(1, len(blacklist._config['Import'])) - self.assertEqual(1, len(blacklist._config['ImportFrom'])) - self.assertEqual(1, len(blacklist._config['Call'])) + self.assertEqual(1, len(blacklist._config["Import"])) + self.assertEqual(1, len(blacklist._config["ImportFrom"])) + self.assertEqual(1, len(blacklist._config["Call"])) def test_profile_filter_blacklist_include(self): - profile = {'include': ['B001', 'B401']} + profile = {"include": ["B001", "B401"]} ts = test_set.BanditTestSet(self.config, profile) - blacklist = ts.get_tests('Import')[0] + blacklist = ts.get_tests("Import")[0] - self.assertEqual(1, len(blacklist._config['Import'])) - self.assertEqual(1, len(blacklist._config['ImportFrom'])) - self.assertEqual(1, len(blacklist._config['Call'])) + self.assertEqual(1, len(blacklist._config["Import"])) + self.assertEqual(1, len(blacklist._config["ImportFrom"])) + self.assertEqual(1, len(blacklist._config["Call"])) def test_profile_filter_blacklist_all(self): - profile = {'exclude': ['B401', 'B302']} + profile = {"exclude": ["B401", "B302"]} ts = test_set.BanditTestSet(self.config, profile) # if there is no blacklist data for a node type then we wont add a # blacklist test to it, as this would be pointless. - self.assertEqual(0, len(ts.get_tests('Import'))) - self.assertEqual(0, len(ts.get_tests('ImportFrom'))) - self.assertEqual(0, len(ts.get_tests('Call'))) + self.assertEqual(0, len(ts.get_tests("Import"))) + self.assertEqual(0, len(ts.get_tests("ImportFrom"))) + self.assertEqual(0, len(ts.get_tests("Call"))) def test_profile_blacklist_compat(self): - data = [utils.build_conf_dict( - 'marshal', 'B302', ['marshal.load', 'marshal.loads'], - ('Deserialization with the marshal module is possibly ' - 'dangerous.'))] - - profile = {'include': ['B001'], 'blacklist': {'Call': data}} + data = [ + utils.build_conf_dict( + "marshal", + "B302", + ["marshal.load", "marshal.loads"], + ( + "Deserialization with the marshal module is possibly " + "dangerous." + ), + ) + ] + + profile = {"include": ["B001"], "blacklist": {"Call": data}} ts = test_set.BanditTestSet(self.config, profile) - blacklist = ts.get_tests('Call')[0] + blacklist = ts.get_tests("Call")[0] - self.assertNotIn('Import', blacklist._config) - self.assertNotIn('ImportFrom', blacklist._config) - self.assertEqual(1, len(blacklist._config['Call'])) + self.assertNotIn("Import", blacklist._config) + self.assertNotIn("ImportFrom", blacklist._config) + self.assertEqual(1, len(blacklist._config["Call"])) diff --git a/tests/unit/core/test_util.py b/tests/unit/core/test_util.py index 13c31f6a6..374191c7d 100644 --- a/tests/unit/core/test_util.py +++ b/tests/unit/core/test_util.py @@ -1,10 +1,8 @@ -# -*- coding:utf-8 -*- # # Copyright 2014 Hewlett-Packard Development Company, L.P. # Copyright 2015 Nebula, Inc. # # SPDX-License-Identifier: Apache-2.0 - import ast import os import shutil @@ -17,199 +15,249 @@ def _touch(path): - '''Create an empty file at ``path``.''' - open(path, 'w').close() + """Create an empty file at ``path``.""" + open(path, "w").close() class UtilTests(testtools.TestCase): - '''This set of tests exercises bandit.core.util functions.''' + """This set of tests exercises bandit.core.util functions.""" def setUp(self): - super(UtilTests, self).setUp() + super().setUp() self._setup_get_module_qualname_from_path() def _setup_get_module_qualname_from_path(self): - '''Setup a fake directory for testing get_module_qualname_from_path(). + """Setup a fake directory for testing get_module_qualname_from_path(). Create temporary directory and then create fake .py files within directory structure. We setup test cases for a typical module, a path misssing a middle __init__.py, no __init__.py anywhere in path, symlinking .py files. - ''' + """ self.tempdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tempdir) self.reltempdir = os.path.relpath(self.tempdir) # good/a/b/c/test_typical.py - os.makedirs(os.path.join( - self.tempdir, 'good', 'a', 'b', 'c'), 0o755) - _touch(os.path.join(self.tempdir, 'good', '__init__.py')) - _touch(os.path.join(self.tempdir, 'good', 'a', '__init__.py')) - _touch(os.path.join( - self.tempdir, 'good', 'a', 'b', '__init__.py')) - _touch(os.path.join( - self.tempdir, 'good', 'a', 'b', 'c', '__init__.py')) - _touch(os.path.join( - self.tempdir, 'good', 'a', 'b', 'c', 'test_typical.py')) + os.makedirs(os.path.join(self.tempdir, "good", "a", "b", "c"), 0o755) + _touch(os.path.join(self.tempdir, "good", "__init__.py")) + _touch(os.path.join(self.tempdir, "good", "a", "__init__.py")) + _touch(os.path.join(self.tempdir, "good", "a", "b", "__init__.py")) + _touch( + os.path.join(self.tempdir, "good", "a", "b", "c", "__init__.py") + ) + _touch( + os.path.join( + self.tempdir, "good", "a", "b", "c", "test_typical.py" + ) + ) # missingmid/a/b/c/test_missingmid.py - os.makedirs(os.path.join( - self.tempdir, 'missingmid', 'a', 'b', 'c'), 0o755) - _touch(os.path.join(self.tempdir, 'missingmid', '__init__.py')) + os.makedirs( + os.path.join(self.tempdir, "missingmid", "a", "b", "c"), 0o755 + ) + _touch(os.path.join(self.tempdir, "missingmid", "__init__.py")) # no missingmid/a/__init__.py - _touch(os.path.join( - self.tempdir, 'missingmid', 'a', 'b', '__init__.py')) - _touch(os.path.join( - self.tempdir, 'missingmid', 'a', 'b', 'c', '__init__.py')) - _touch(os.path.join( - self.tempdir, 'missingmid', 'a', 'b', 'c', 'test_missingmid.py')) + _touch( + os.path.join(self.tempdir, "missingmid", "a", "b", "__init__.py") + ) + _touch( + os.path.join( + self.tempdir, "missingmid", "a", "b", "c", "__init__.py" + ) + ) + _touch( + os.path.join( + self.tempdir, "missingmid", "a", "b", "c", "test_missingmid.py" + ) + ) # missingend/a/b/c/test_missingend.py - os.makedirs(os.path.join( - self.tempdir, 'missingend', 'a', 'b', 'c'), 0o755) - _touch(os.path.join( - self.tempdir, 'missingend', '__init__.py')) - _touch(os.path.join( - self.tempdir, 'missingend', 'a', 'b', '__init__.py')) + os.makedirs( + os.path.join(self.tempdir, "missingend", "a", "b", "c"), 0o755 + ) + _touch(os.path.join(self.tempdir, "missingend", "__init__.py")) + _touch( + os.path.join(self.tempdir, "missingend", "a", "b", "__init__.py") + ) # no missingend/a/b/c/__init__.py - _touch(os.path.join( - self.tempdir, 'missingend', 'a', 'b', 'c', 'test_missingend.py')) + _touch( + os.path.join( + self.tempdir, "missingend", "a", "b", "c", "test_missingend.py" + ) + ) # syms/a/bsym/c/test_typical.py - os.makedirs(os.path.join(self.tempdir, 'syms', 'a'), 0o755) - _touch(os.path.join(self.tempdir, 'syms', '__init__.py')) - _touch(os.path.join(self.tempdir, 'syms', 'a', '__init__.py')) - os.symlink(os.path.join(self.tempdir, 'good', 'a', 'b'), - os.path.join(self.tempdir, 'syms', 'a', 'bsym')) + os.makedirs(os.path.join(self.tempdir, "syms", "a"), 0o755) + _touch(os.path.join(self.tempdir, "syms", "__init__.py")) + _touch(os.path.join(self.tempdir, "syms", "a", "__init__.py")) + os.symlink( + os.path.join(self.tempdir, "good", "a", "b"), + os.path.join(self.tempdir, "syms", "a", "bsym"), + ) def test_get_module_qualname_from_path_abs_typical(self): - '''Test get_module_qualname_from_path with typical absolute paths.''' + """Test get_module_qualname_from_path with typical absolute paths.""" - name = b_utils.get_module_qualname_from_path(os.path.join( - self.tempdir, 'good', 'a', 'b', 'c', 'test_typical.py')) - self.assertEqual('good.a.b.c.test_typical', name) + name = b_utils.get_module_qualname_from_path( + os.path.join( + self.tempdir, "good", "a", "b", "c", "test_typical.py" + ) + ) + self.assertEqual("good.a.b.c.test_typical", name) def test_get_module_qualname_from_path_with_dot(self): - '''Test get_module_qualname_from_path with a "." .''' + """Test get_module_qualname_from_path with a "." .""" - name = b_utils.get_module_qualname_from_path(os.path.join( - '.', '__init__.py')) + name = b_utils.get_module_qualname_from_path( + os.path.join(".", "__init__.py") + ) - self.assertEqual('__init__', name) + self.assertEqual("__init__", name) def test_get_module_qualname_from_path_abs_missingmid(self): # Test get_module_qualname_from_path with missing module # __init__.py - name = b_utils.get_module_qualname_from_path(os.path.join( - self.tempdir, 'missingmid', 'a', 'b', 'c', - 'test_missingmid.py')) - self.assertEqual('b.c.test_missingmid', name) + name = b_utils.get_module_qualname_from_path( + os.path.join( + self.tempdir, "missingmid", "a", "b", "c", "test_missingmid.py" + ) + ) + self.assertEqual("b.c.test_missingmid", name) def test_get_module_qualname_from_path_abs_missingend(self): # Test get_module_qualname_from_path with no __init__.py # last dir''' - name = b_utils.get_module_qualname_from_path(os.path.join( - self.tempdir, 'missingend', 'a', 'b', 'c', - 'test_missingend.py')) - self.assertEqual('test_missingend', name) + name = b_utils.get_module_qualname_from_path( + os.path.join( + self.tempdir, "missingend", "a", "b", "c", "test_missingend.py" + ) + ) + self.assertEqual("test_missingend", name) def test_get_module_qualname_from_path_abs_syms(self): - '''Test get_module_qualname_from_path with symlink in path.''' + """Test get_module_qualname_from_path with symlink in path.""" - name = b_utils.get_module_qualname_from_path(os.path.join( - self.tempdir, 'syms', 'a', 'bsym', 'c', 'test_typical.py')) - self.assertEqual('syms.a.bsym.c.test_typical', name) + name = b_utils.get_module_qualname_from_path( + os.path.join( + self.tempdir, "syms", "a", "bsym", "c", "test_typical.py" + ) + ) + self.assertEqual("syms.a.bsym.c.test_typical", name) def test_get_module_qualname_from_path_rel_typical(self): - '''Test get_module_qualname_from_path with typical relative paths.''' + """Test get_module_qualname_from_path with typical relative paths.""" - name = b_utils.get_module_qualname_from_path(os.path.join( - self.reltempdir, 'good', 'a', 'b', 'c', 'test_typical.py')) - self.assertEqual('good.a.b.c.test_typical', name) + name = b_utils.get_module_qualname_from_path( + os.path.join( + self.reltempdir, "good", "a", "b", "c", "test_typical.py" + ) + ) + self.assertEqual("good.a.b.c.test_typical", name) def test_get_module_qualname_from_path_rel_missingmid(self): # Test get_module_qualname_from_path with module __init__.py # missing and relative paths - name = b_utils.get_module_qualname_from_path(os.path.join( - self.reltempdir, 'missingmid', 'a', 'b', 'c', - 'test_missingmid.py')) - self.assertEqual('b.c.test_missingmid', name) + name = b_utils.get_module_qualname_from_path( + os.path.join( + self.reltempdir, + "missingmid", + "a", + "b", + "c", + "test_missingmid.py", + ) + ) + self.assertEqual("b.c.test_missingmid", name) def test_get_module_qualname_from_path_rel_missingend(self): # Test get_module_qualname_from_path with __init__.py missing from # last dir and using relative paths - name = b_utils.get_module_qualname_from_path(os.path.join( - self.reltempdir, 'missingend', 'a', 'b', 'c', - 'test_missingend.py')) - self.assertEqual('test_missingend', name) + name = b_utils.get_module_qualname_from_path( + os.path.join( + self.reltempdir, + "missingend", + "a", + "b", + "c", + "test_missingend.py", + ) + ) + self.assertEqual("test_missingend", name) def test_get_module_qualname_from_path_rel_syms(self): - '''Test get_module_qualname_from_path with symbolic relative paths.''' + """Test get_module_qualname_from_path with symbolic relative paths.""" - name = b_utils.get_module_qualname_from_path(os.path.join( - self.reltempdir, 'syms', 'a', 'bsym', 'c', 'test_typical.py')) - self.assertEqual('syms.a.bsym.c.test_typical', name) + name = b_utils.get_module_qualname_from_path( + os.path.join( + self.reltempdir, "syms", "a", "bsym", "c", "test_typical.py" + ) + ) + self.assertEqual("syms.a.bsym.c.test_typical", name) def test_get_module_qualname_from_path_sys(self): - '''Test get_module_qualname_from_path with system module paths.''' + """Test get_module_qualname_from_path with system module paths.""" name = b_utils.get_module_qualname_from_path(os.__file__) - self.assertEqual('os', name) + self.assertEqual("os", name) # This will fail because of magic for os.path. Not sure how to fix. # name = b_utils.get_module_qualname_from_path(os.path.__file__) # self.assertEqual(name, 'os.path') def test_get_module_qualname_from_path_invalid_path(self): - '''Test get_module_qualname_from_path with invalid path.''' + """Test get_module_qualname_from_path with invalid path.""" - name = b_utils.get_module_qualname_from_path('/a/b/c/d/e.py') - self.assertEqual('e', name) + name = b_utils.get_module_qualname_from_path("/a/b/c/d/e.py") + self.assertEqual("e", name) def test_get_module_qualname_from_path_dir(self): - '''Test get_module_qualname_from_path with dir path.''' + """Test get_module_qualname_from_path with dir path.""" - self.assertRaises(b_utils.InvalidModulePath, - b_utils.get_module_qualname_from_path, '/tmp/') + self.assertRaises( + b_utils.InvalidModulePath, + b_utils.get_module_qualname_from_path, + "/tmp/", + ) def test_namespace_path_join(self): - p = b_utils.namespace_path_join('base1.base2', 'name') - self.assertEqual('base1.base2.name', p) + p = b_utils.namespace_path_join("base1.base2", "name") + self.assertEqual("base1.base2.name", p) def test_namespace_path_split(self): - (head, tail) = b_utils.namespace_path_split('base1.base2.name') - self.assertEqual('base1.base2', head) - self.assertEqual('name', tail) + (head, tail) = b_utils.namespace_path_split("base1.base2.name") + self.assertEqual("base1.base2", head) + self.assertEqual("name", tail) def test_get_call_name1(self): - '''Gets a qualified call name.''' - tree = ast.parse('a.b.c.d(x,y)').body[0].value + """Gets a qualified call name.""" + tree = ast.parse("a.b.c.d(x,y)").body[0].value name = b_utils.get_call_name(tree, {}) - self.assertEqual('a.b.c.d', name) + self.assertEqual("a.b.c.d", name) def test_get_call_name2(self): - '''Gets qualified call name and resolves aliases.''' - tree = ast.parse('a.b.c.d(x,y)').body[0].value + """Gets qualified call name and resolves aliases.""" + tree = ast.parse("a.b.c.d(x,y)").body[0].value - name = b_utils.get_call_name(tree, {'a': 'alias.x.y'}) - self.assertEqual('alias.x.y.b.c.d', name) + name = b_utils.get_call_name(tree, {"a": "alias.x.y"}) + self.assertEqual("alias.x.y.b.c.d", name) - name = b_utils.get_call_name(tree, {'a.b': 'alias.x.y'}) - self.assertEqual('alias.x.y.c.d', name) + name = b_utils.get_call_name(tree, {"a.b": "alias.x.y"}) + self.assertEqual("alias.x.y.c.d", name) - name = b_utils.get_call_name(tree, {'a.b.c.d': 'alias.x.y'}) - self.assertEqual('alias.x.y', name) + name = b_utils.get_call_name(tree, {"a.b.c.d": "alias.x.y"}) + self.assertEqual("alias.x.y", name) def test_get_call_name3(self): - '''Getting name for a complex call.''' - tree = ast.parse('a.list[0](x,y)').body[0].value + """Getting name for a complex call.""" + tree = ast.parse("a.list[0](x,y)").body[0].value name = b_utils._get_attr_qual_name(tree, {}) - self.assertEqual('', name) + self.assertEqual("", name) # TODO(ljfisher) At best we might be able to get: # self.assertEqual(name, 'a.list[0]') @@ -253,41 +301,44 @@ def test_escaped_representation_mixed(self): self.assertEqual(res, b"ascii\\x00\\uffff") def test_deepgetattr(self): - a = type('', (), {}) - a.b = type('', (), {}) - a.b.c = type('', (), {}) - a.b.c.d = 'deep value' - a.b.c.d2 = 'deep value 2' - a.b.c.e = 'a.b.c' - self.assertEqual('deep value', b_utils.deepgetattr(a.b.c, 'd')) - self.assertEqual('deep value 2', b_utils.deepgetattr(a.b.c, 'd2')) - self.assertEqual('a.b.c', b_utils.deepgetattr(a.b.c, 'e')) - self.assertEqual('deep value', b_utils.deepgetattr(a, 'b.c.d')) - self.assertEqual('deep value 2', b_utils.deepgetattr(a, 'b.c.d2')) - self.assertRaises(AttributeError, b_utils.deepgetattr, a.b, 'z') + a = type("", (), {}) + a.b = type("", (), {}) + a.b.c = type("", (), {}) + a.b.c.d = "deep value" + a.b.c.d2 = "deep value 2" + a.b.c.e = "a.b.c" + self.assertEqual("deep value", b_utils.deepgetattr(a.b.c, "d")) + self.assertEqual("deep value 2", b_utils.deepgetattr(a.b.c, "d2")) + self.assertEqual("a.b.c", b_utils.deepgetattr(a.b.c, "e")) + self.assertEqual("deep value", b_utils.deepgetattr(a, "b.c.d")) + self.assertEqual("deep value 2", b_utils.deepgetattr(a, "b.c.d2")) + self.assertRaises(AttributeError, b_utils.deepgetattr, a.b, "z") def test_parse_ini_file(self): - tests = [{'content': "[bandit]\nexclude=/abc,/def", - 'expected': {'exclude': '/abc,/def'}}, - - {'content': '[Blabla]\nsomething=something', - 'expected': None}] + tests = [ + { + "content": "[bandit]\nexclude=/abc,/def", + "expected": {"exclude": "/abc,/def"}, + }, + {"content": "[Blabla]\nsomething=something", "expected": None}, + ] - with tempfile.NamedTemporaryFile('r+') as t: + with tempfile.NamedTemporaryFile("r+") as t: for test in tests: - with open(t.name, 'w') as f: - f.write(test['content']) + with open(t.name, "w") as f: + f.write(test["content"]) - self.assertEqual(b_utils.parse_ini_file(t.name), - test['expected']) + self.assertEqual( + b_utils.parse_ini_file(t.name), test["expected"] + ) def test_check_ast_node_good(self): node = b_utils.check_ast_node("Call") self.assertEqual("Call", node) def test_check_ast_node_bad_node(self): - self.assertRaises(TypeError, b_utils.check_ast_node, 'Derp') + self.assertRaises(TypeError, b_utils.check_ast_node, "Derp") def test_check_ast_node_bad_type(self): - self.assertRaises(TypeError, b_utils.check_ast_node, 'walk') + self.assertRaises(TypeError, b_utils.check_ast_node, "walk") diff --git a/tests/unit/formatters/test_csv.py b/tests/unit/formatters/test_csv.py index ee45fff62..77dc9bc48 100644 --- a/tests/unit/formatters/test_csv.py +++ b/tests/unit/formatters/test_csv.py @@ -1,7 +1,6 @@ # Copyright (c) 2015 VMware, Inc. # # SPDX-License-Identifier: Apache-2.0 - import csv import tempfile @@ -15,41 +14,48 @@ class CsvFormatterTests(testtools.TestCase): - def setUp(self): - super(CsvFormatterTests, self).setUp() + super().setUp() conf = config.BanditConfig() - self.manager = manager.BanditManager(conf, 'file') + self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() - self.context = {'filename': self.tmp_fname, - 'lineno': 4, - 'linerange': [4]} - self.check_name = 'hardcoded_bind_all_interfaces' - self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM, - 'Possible binding to all interfaces.') + self.context = { + "filename": self.tmp_fname, + "lineno": 4, + "linerange": [4], + } + self.check_name = "hardcoded_bind_all_interfaces" + self.issue = issue.Issue( + bandit.MEDIUM, bandit.MEDIUM, "Possible binding to all interfaces." + ) self.manager.out_file = self.tmp_fname - self.issue.fname = self.context['filename'] - self.issue.lineno = self.context['lineno'] - self.issue.linerange = self.context['linerange'] + self.issue.fname = self.context["filename"] + self.issue.lineno = self.context["lineno"] + self.issue.linerange = self.context["linerange"] self.issue.test = self.check_name self.manager.results.append(self.issue) def test_report(self): - with open(self.tmp_fname, 'w') as tmp_file: - b_csv.report(self.manager, tmp_file, self.issue.severity, - self.issue.confidence) + with open(self.tmp_fname, "w") as tmp_file: + b_csv.report( + self.manager, + tmp_file, + self.issue.severity, + self.issue.confidence, + ) with open(self.tmp_fname) as f: reader = csv.DictReader(f) data = next(reader) - self.assertEqual(self.tmp_fname, data['filename']) - self.assertEqual(self.issue.severity, data['issue_severity']) - self.assertEqual(self.issue.confidence, data['issue_confidence']) - self.assertEqual(self.issue.text, data['issue_text']) - self.assertEqual(str(self.context['lineno']), data['line_number']) - self.assertEqual(str(self.context['linerange']), - data['line_range']) - self.assertEqual(self.check_name, data['test_name']) - self.assertIsNotNone(data['more_info']) + self.assertEqual(self.tmp_fname, data["filename"]) + self.assertEqual(self.issue.severity, data["issue_severity"]) + self.assertEqual(self.issue.confidence, data["issue_confidence"]) + self.assertEqual(self.issue.text, data["issue_text"]) + self.assertEqual(str(self.context["lineno"]), data["line_number"]) + self.assertEqual( + str(self.context["linerange"]), data["line_range"] + ) + self.assertEqual(self.check_name, data["test_name"]) + self.assertIsNotNone(data["more_info"]) diff --git a/tests/unit/formatters/test_custom.py b/tests/unit/formatters/test_custom.py index 3a903ac97..16335d13f 100644 --- a/tests/unit/formatters/test_custom.py +++ b/tests/unit/formatters/test_custom.py @@ -1,5 +1,4 @@ # SPDX-License-Identifier: Apache-2.0 - import csv import tempfile @@ -13,40 +12,45 @@ class CustomFormatterTests(testtools.TestCase): - def setUp(self): - super(CustomFormatterTests, self).setUp() + super().setUp() conf = config.BanditConfig() - self.manager = manager.BanditManager(conf, 'custom') + self.manager = manager.BanditManager(conf, "custom") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() - self.context = {'filename': self.tmp_fname, - 'lineno': 4, - 'linerange': [4], - 'col_offset': 30} - self.check_name = 'hardcoded_bind_all_interfaces' - self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM, - 'Possible binding to all interfaces.') + self.context = { + "filename": self.tmp_fname, + "lineno": 4, + "linerange": [4], + "col_offset": 30, + } + self.check_name = "hardcoded_bind_all_interfaces" + self.issue = issue.Issue( + bandit.MEDIUM, bandit.MEDIUM, "Possible binding to all interfaces." + ) self.manager.out_file = self.tmp_fname - self.issue.fname = self.context['filename'] - self.issue.lineno = self.context['lineno'] - self.issue.linerange = self.context['linerange'] - self.issue.col_offset = self.context['col_offset'] + self.issue.fname = self.context["filename"] + self.issue.lineno = self.context["lineno"] + self.issue.linerange = self.context["linerange"] + self.issue.col_offset = self.context["col_offset"] self.issue.test = self.check_name self.manager.results.append(self.issue) def test_report(self): - with open(self.tmp_fname, 'w') as tmp_file: + with open(self.tmp_fname, "w") as tmp_file: custom.report( - self.manager, tmp_file, self.issue.severity, + self.manager, + tmp_file, + self.issue.severity, self.issue.confidence, - template="{line},{col},{severity},{msg}") + template="{line},{col},{severity},{msg}", + ) with open(self.tmp_fname) as f: - reader = csv.DictReader(f, ['line', 'col', 'severity', 'message']) + reader = csv.DictReader(f, ["line", "col", "severity", "message"]) data = next(reader) - self.assertEqual(str(self.context['lineno']), data['line']) - self.assertEqual(str(self.context['col_offset']), data['col']) - self.assertEqual(self.issue.severity, data['severity']) - self.assertEqual(self.issue.text, data['message']) + self.assertEqual(str(self.context["lineno"]), data["line"]) + self.assertEqual(str(self.context["col_offset"]), data["col"]) + self.assertEqual(self.issue.severity, data["severity"]) + self.assertEqual(self.issue.text, data["message"]) diff --git a/tests/unit/formatters/test_html.py b/tests/unit/formatters/test_html.py index 9d2c297c3..f115e146d 100644 --- a/tests/unit/formatters/test_html.py +++ b/tests/unit/formatters/test_html.py @@ -2,7 +2,6 @@ # Copyright (c) 2015 Hewlett Packard Enterprise # # SPDX-License-Identifier: Apache-2.0 - import collections import tempfile from unittest import mock @@ -18,41 +17,39 @@ class HtmlFormatterTests(testtools.TestCase): - def setUp(self): - super(HtmlFormatterTests, self).setUp() + super().setUp() conf = config.BanditConfig() - self.manager = manager.BanditManager(conf, 'file') + self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname def test_report_with_skipped(self): - self.manager.skipped = [('abc.py', 'File is bad')] + self.manager.skipped = [("abc.py", "File is bad")] - with open(self.tmp_fname, 'w') as tmp_file: - b_html.report( - self.manager, tmp_file, bandit.LOW, bandit.LOW) + with open(self.tmp_fname, "w") as tmp_file: + b_html.report(self.manager, tmp_file, bandit.LOW, bandit.LOW) with open(self.tmp_fname) as f: - soup = bs4.BeautifulSoup(f.read(), 'html.parser') - skipped = soup.find_all('div', id='skipped')[0] + soup = bs4.BeautifulSoup(f.read(), "html.parser") + skipped = soup.find_all("div", id="skipped")[0] - self.assertEqual(1, len(soup.find_all('div', id='skipped'))) - self.assertIn('abc.py', skipped.text) - self.assertIn('File is bad', skipped.text) + self.assertEqual(1, len(soup.find_all("div", id="skipped"))) + self.assertIn("abc.py", skipped.text) + self.assertIn("File is bad", skipped.text) - @mock.patch('bandit.core.issue.Issue.get_code') - @mock.patch('bandit.core.manager.BanditManager.get_issue_list') + @mock.patch("bandit.core.issue.Issue.get_code") + @mock.patch("bandit.core.manager.BanditManager.get_issue_list") def test_report_contents(self, get_issue_list, get_code): - self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50} + self.manager.metrics.data["_totals"] = {"loc": 1000, "nosec": 50} issue_a = _get_issue_instance(severity=bandit.LOW) - issue_a.fname = 'abc.py' - issue_a.test = 'AAAAAAA' - issue_a.text = 'BBBBBBB' - issue_a.confidence = 'CCCCCCC' + issue_a.fname = "abc.py" + issue_a.test = "AAAAAAA" + issue_a.text = "BBBBBBB" + issue_a.confidence = "CCCCCCC" # don't need to test severity, it determines the color which we're # testing separately @@ -60,73 +57,83 @@ def test_report_contents(self, get_issue_list, get_code): issue_c = _get_issue_instance(severity=bandit.HIGH) issue_x = _get_issue_instance() - get_code.return_value = 'some code' + get_code.return_value = "some code" issue_y = _get_issue_instance() get_issue_list.return_value = collections.OrderedDict( - [(issue_a, [issue_x, issue_y]), - (issue_b, [issue_x]), (issue_c, [issue_y])]) + [ + (issue_a, [issue_x, issue_y]), + (issue_b, [issue_x]), + (issue_c, [issue_y]), + ] + ) - with open(self.tmp_fname, 'w') as tmp_file: - b_html.report( - self.manager, tmp_file, bandit.LOW, bandit.LOW) + with open(self.tmp_fname, "w") as tmp_file: + b_html.report(self.manager, tmp_file, bandit.LOW, bandit.LOW) with open(self.tmp_fname) as f: - soup = bs4.BeautifulSoup(f.read(), 'html.parser') + soup = bs4.BeautifulSoup(f.read(), "html.parser") - self.assertEqual('1000', soup.find_all('span', id='loc')[0].text) - self.assertEqual('50', soup.find_all('span', id='nosec')[0].text) + self.assertEqual("1000", soup.find_all("span", id="loc")[0].text) + self.assertEqual("50", soup.find_all("span", id="nosec")[0].text) - issue1 = soup.find_all('div', id='issue-0')[0] - issue2 = soup.find_all('div', id='issue-1')[0] - issue3 = soup.find_all('div', id='issue-2')[0] + issue1 = soup.find_all("div", id="issue-0")[0] + issue2 = soup.find_all("div", id="issue-1")[0] + issue3 = soup.find_all("div", id="issue-2")[0] # make sure the class has been applied properly - self.assertEqual(1, len(issue1.find_all( - 'div', {'class': 'issue-sev-low'}))) + self.assertEqual( + 1, len(issue1.find_all("div", {"class": "issue-sev-low"})) + ) - self.assertEqual(1, len(issue2.find_all( - 'div', {'class': 'issue-sev-medium'}))) + self.assertEqual( + 1, len(issue2.find_all("div", {"class": "issue-sev-medium"})) + ) - self.assertEqual(1, len(issue3.find_all( - 'div', {'class': 'issue-sev-high'}))) + self.assertEqual( + 1, len(issue3.find_all("div", {"class": "issue-sev-high"})) + ) # issue1 has a candidates section with 2 candidates in it - self.assertEqual(1, len(issue1.find_all('div', - {'class': 'candidates'}))) - self.assertEqual(2, len(issue1.find_all('div', - {'class': 'candidate'}))) + self.assertEqual( + 1, len(issue1.find_all("div", {"class": "candidates"})) + ) + self.assertEqual( + 2, len(issue1.find_all("div", {"class": "candidate"})) + ) # issue2 doesn't have candidates - self.assertEqual(0, len(issue2.find_all('div', - {'class': 'candidates'}))) - self.assertEqual(0, len(issue2.find_all('div', - {'class': 'candidate'}))) + self.assertEqual( + 0, len(issue2.find_all("div", {"class": "candidates"})) + ) + self.assertEqual( + 0, len(issue2.find_all("div", {"class": "candidate"})) + ) # issue1 doesn't have code issue 2 and 3 do - self.assertEqual(0, len(issue1.find_all('div', {'class': 'code'}))) - self.assertEqual(1, len(issue2.find_all('div', {'class': 'code'}))) - self.assertEqual(1, len(issue3.find_all('div', {'class': 'code'}))) + self.assertEqual(0, len(issue1.find_all("div", {"class": "code"}))) + self.assertEqual(1, len(issue2.find_all("div", {"class": "code"}))) + self.assertEqual(1, len(issue3.find_all("div", {"class": "code"}))) # issue2 code and issue1 first candidate have code - element1 = issue1.find_all('div', {'class': 'candidate'}) - self.assertIn('some code', element1[0].text) - element2 = issue2.find_all('div', {'class': 'code'}) - self.assertIn('some code', element2[0].text) + element1 = issue1.find_all("div", {"class": "candidate"}) + self.assertIn("some code", element1[0].text) + element2 = issue2.find_all("div", {"class": "code"}) + self.assertIn("some code", element2[0].text) # make sure correct things are being output in issues - self.assertIn('AAAAAAA:', issue1.text) - self.assertIn('BBBBBBB', issue1.text) - self.assertIn('CCCCCCC', issue1.text) - self.assertIn('abc.py', issue1.text) - self.assertIn('Line number: 1', issue1.text) - - @mock.patch('bandit.core.issue.Issue.get_code') - @mock.patch('bandit.core.manager.BanditManager.get_issue_list') + self.assertIn("AAAAAAA:", issue1.text) + self.assertIn("BBBBBBB", issue1.text) + self.assertIn("CCCCCCC", issue1.text) + self.assertIn("abc.py", issue1.text) + self.assertIn("Line number: 1", issue1.text) + + @mock.patch("bandit.core.issue.Issue.get_code") + @mock.patch("bandit.core.manager.BanditManager.get_issue_list") def test_escaping(self, get_issue_list, get_code): - self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50} - marker = '' + self.manager.metrics.data["_totals"] = {"loc": 1000, "nosec": 50} + marker = "" issue_a = _get_issue_instance() issue_x = _get_issue_instance() @@ -134,9 +141,8 @@ def test_escaping(self, get_issue_list, get_code): get_issue_list.return_value = {issue_a: [issue_x]} - with open(self.tmp_fname, 'w') as tmp_file: - b_html.report( - self.manager, tmp_file, bandit.LOW, bandit.LOW) + with open(self.tmp_fname, "w") as tmp_file: + b_html.report(self.manager, tmp_file, bandit.LOW, bandit.LOW) with open(self.tmp_fname) as f: contents = f.read() @@ -144,8 +150,8 @@ def test_escaping(self, get_issue_list, get_code): def _get_issue_instance(severity=bandit.MEDIUM, confidence=bandit.MEDIUM): - new_issue = issue.Issue(severity, confidence, 'Test issue') - new_issue.fname = 'code.py' - new_issue.test = 'bandit_plugin' + new_issue = issue.Issue(severity, confidence, "Test issue") + new_issue.fname = "code.py" + new_issue.test = "bandit_plugin" new_issue.lineno = 1 return new_issue diff --git a/tests/unit/formatters/test_json.py b/tests/unit/formatters/test_json.py index 392ce2142..454faa300 100644 --- a/tests/unit/formatters/test_json.py +++ b/tests/unit/formatters/test_json.py @@ -1,7 +1,6 @@ # Copyright (c) 2015 VMware, Inc. # # SPDX-License-Identifier: Apache-2.0 - import collections import json import tempfile @@ -19,70 +18,83 @@ class JsonFormatterTests(testtools.TestCase): - def setUp(self): - super(JsonFormatterTests, self).setUp() + super().setUp() conf = config.BanditConfig() - self.manager = manager.BanditManager(conf, 'file') + self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() - self.context = {'filename': self.tmp_fname, - 'lineno': 4, - 'linerange': [4]} - self.check_name = 'hardcoded_bind_all_interfaces' - self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM, - 'Possible binding to all interfaces.') + self.context = { + "filename": self.tmp_fname, + "lineno": 4, + "linerange": [4], + } + self.check_name = "hardcoded_bind_all_interfaces" + self.issue = issue.Issue( + bandit.MEDIUM, bandit.MEDIUM, "Possible binding to all interfaces." + ) - self.candidates = [issue.Issue(bandit.LOW, bandit.LOW, 'Candidate A', - lineno=1), - issue.Issue(bandit.HIGH, bandit.HIGH, 'Candiate B', - lineno=2)] + self.candidates = [ + issue.Issue(bandit.LOW, bandit.LOW, "Candidate A", lineno=1), + issue.Issue(bandit.HIGH, bandit.HIGH, "Candiate B", lineno=2), + ] self.manager.out_file = self.tmp_fname - self.issue.fname = self.context['filename'] - self.issue.lineno = self.context['lineno'] - self.issue.linerange = self.context['linerange'] + self.issue.fname = self.context["filename"] + self.issue.lineno = self.context["lineno"] + self.issue.linerange = self.context["linerange"] self.issue.test = self.check_name self.manager.results.append(self.issue) self.manager.metrics = metrics.Metrics() # mock up the metrics - for key in ['_totals', 'binding.py']: - self.manager.metrics.data[key] = {'loc': 4, 'nosec': 2} + for key in ["_totals", "binding.py"]: + self.manager.metrics.data[key] = {"loc": 4, "nosec": 2} for (criteria, default) in constants.CRITERIA: for rank in constants.RANKING: - self.manager.metrics.data[key]['{0}.{1}'.format( - criteria, rank - )] = 0 + self.manager.metrics.data[key][f"{criteria}.{rank}"] = 0 - @mock.patch('bandit.core.manager.BanditManager.get_issue_list') + @mock.patch("bandit.core.manager.BanditManager.get_issue_list") def test_report(self, get_issue_list): - self.manager.files_list = ['binding.py'] - self.manager.scores = [{'SEVERITY': [0] * len(constants.RANKING), - 'CONFIDENCE': [0] * len(constants.RANKING)}] + self.manager.files_list = ["binding.py"] + self.manager.scores = [ + { + "SEVERITY": [0] * len(constants.RANKING), + "CONFIDENCE": [0] * len(constants.RANKING), + } + ] get_issue_list.return_value = collections.OrderedDict( - [(self.issue, self.candidates)]) + [(self.issue, self.candidates)] + ) - with open(self.tmp_fname, 'w') as tmp_file: - b_json.report(self.manager, tmp_file, self.issue.severity, - self.issue.confidence) + with open(self.tmp_fname, "w") as tmp_file: + b_json.report( + self.manager, + tmp_file, + self.issue.severity, + self.issue.confidence, + ) with open(self.tmp_fname) as f: data = json.loads(f.read()) - self.assertIsNotNone(data['generated_at']) - self.assertEqual(self.tmp_fname, data['results'][0]['filename']) - self.assertEqual(self.issue.severity, - data['results'][0]['issue_severity']) - self.assertEqual(self.issue.confidence, - data['results'][0]['issue_confidence']) - self.assertEqual(self.issue.text, data['results'][0]['issue_text']) - self.assertEqual(self.context['lineno'], - data['results'][0]['line_number']) - self.assertEqual(self.context['linerange'], - data['results'][0]['line_range']) - self.assertEqual(self.check_name, data['results'][0]['test_name']) - self.assertIn('candidates', data['results'][0]) - self.assertIn('more_info', data['results'][0]) - self.assertIsNotNone(data['results'][0]['more_info']) + self.assertIsNotNone(data["generated_at"]) + self.assertEqual(self.tmp_fname, data["results"][0]["filename"]) + self.assertEqual( + self.issue.severity, data["results"][0]["issue_severity"] + ) + self.assertEqual( + self.issue.confidence, data["results"][0]["issue_confidence"] + ) + self.assertEqual(self.issue.text, data["results"][0]["issue_text"]) + self.assertEqual( + self.context["lineno"], data["results"][0]["line_number"] + ) + self.assertEqual( + self.context["linerange"], data["results"][0]["line_range"] + ) + self.assertEqual(self.check_name, data["results"][0]["test_name"]) + self.assertIn("candidates", data["results"][0]) + self.assertIn("more_info", data["results"][0]) + self.assertIsNotNone(data["results"][0]["more_info"]) diff --git a/tests/unit/formatters/test_screen.py b/tests/unit/formatters/test_screen.py index 18bf2b8b5..cdeeeb34d 100644 --- a/tests/unit/formatters/test_screen.py +++ b/tests/unit/formatters/test_screen.py @@ -2,7 +2,6 @@ # Copyright (c) 2015 Hewlett Packard Enterprise # # SPDX-License-Identifier: Apache-2.0 - import collections import tempfile from unittest import mock @@ -18,152 +17,182 @@ class ScreenFormatterTests(testtools.TestCase): - def setUp(self): - super(ScreenFormatterTests, self).setUp() + super().setUp() - @mock.patch('bandit.core.issue.Issue.get_code') + @mock.patch("bandit.core.issue.Issue.get_code") def test_output_issue(self, get_code): issue = _get_issue_instance() - get_code.return_value = 'DDDDDDD' - indent_val = 'CCCCCCC' + get_code.return_value = "DDDDDDD" + indent_val = "CCCCCCC" def _template(_issue, _indent_val, _code, _color): - return_val = ["{}{}>> Issue: [{}:{}] {}". - format(_indent_val, _color, _issue.test_id, - _issue.test, _issue.text), - "{} Severity: {} Confidence: {}". - format(_indent_val, _issue.severity.capitalize(), - _issue.confidence.capitalize()), - "{} Location: {}:{}:{}". - format(_indent_val, _issue.fname, _issue.lineno, - _issue.col_offset), - "{} More Info: {}{}".format( - _indent_val, docs_utils.get_url(_issue.test_id), - screen.COLOR['DEFAULT'])] + return_val = [ + "{}{}>> Issue: [{}:{}] {}".format( + _indent_val, + _color, + _issue.test_id, + _issue.test, + _issue.text, + ), + "{} Severity: {} Confidence: {}".format( + _indent_val, + _issue.severity.capitalize(), + _issue.confidence.capitalize(), + ), + "{} Location: {}:{}:{}".format( + _indent_val, _issue.fname, _issue.lineno, _issue.col_offset + ), + "{} More Info: {}{}".format( + _indent_val, + docs_utils.get_url(_issue.test_id), + screen.COLOR["DEFAULT"], + ), + ] if _code: - return_val.append("{}{}".format(_indent_val, _code)) - return '\n'.join(return_val) + return_val.append(f"{_indent_val}{_code}") + return "\n".join(return_val) issue_text = screen._output_issue_str(issue, indent_val) - expected_return = _template(issue, indent_val, 'DDDDDDD', - screen.COLOR['MEDIUM']) + expected_return = _template( + issue, indent_val, "DDDDDDD", screen.COLOR["MEDIUM"] + ) self.assertEqual(expected_return, issue_text) - issue_text = screen._output_issue_str(issue, indent_val, - show_code=False) - expected_return = _template(issue, indent_val, '', - screen.COLOR['MEDIUM']) + issue_text = screen._output_issue_str( + issue, indent_val, show_code=False + ) + expected_return = _template( + issue, indent_val, "", screen.COLOR["MEDIUM"] + ) self.assertEqual(expected_return, issue_text) - issue.lineno = '' - issue.col_offset = '' - issue_text = screen._output_issue_str(issue, indent_val, - show_lineno=False) - expected_return = _template(issue, indent_val, 'DDDDDDD', - screen.COLOR['MEDIUM']) + issue.lineno = "" + issue.col_offset = "" + issue_text = screen._output_issue_str( + issue, indent_val, show_lineno=False + ) + expected_return = _template( + issue, indent_val, "DDDDDDD", screen.COLOR["MEDIUM"] + ) self.assertEqual(expected_return, issue_text) - @mock.patch('bandit.core.manager.BanditManager.get_issue_list') + @mock.patch("bandit.core.manager.BanditManager.get_issue_list") def test_no_issues(self, get_issue_list): conf = config.BanditConfig() - self.manager = manager.BanditManager(conf, 'file') + self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname get_issue_list.return_value = collections.OrderedDict() - with mock.patch('bandit.formatters.screen.do_print') as m: - with open(self.tmp_fname, 'w') as tmp_file: - screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, - lines=5) - self.assertIn('No issues identified.', - '\n'.join([str(a) for a in m.call_args])) - - @mock.patch('bandit.core.manager.BanditManager.get_issue_list') + with mock.patch("bandit.formatters.screen.do_print") as m: + with open(self.tmp_fname, "w") as tmp_file: + screen.report( + self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5 + ) + self.assertIn( + "No issues identified.", + "\n".join([str(a) for a in m.call_args]), + ) + + @mock.patch("bandit.core.manager.BanditManager.get_issue_list") def test_report_nobaseline(self, get_issue_list): conf = config.BanditConfig() - self.manager = manager.BanditManager(conf, 'file') + self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname self.manager.verbose = True - self.manager.files_list = ['binding.py'] + self.manager.files_list = ["binding.py"] - self.manager.scores = [{'SEVERITY': [0, 0, 0, 1], - 'CONFIDENCE': [0, 0, 0, 1]}] + self.manager.scores = [ + {"SEVERITY": [0, 0, 0, 1], "CONFIDENCE": [0, 0, 0, 1]} + ] - self.manager.skipped = [('abc.py', 'File is bad')] - self.manager.excluded_files = ['def.py'] + self.manager.skipped = [("abc.py", "File is bad")] + self.manager.excluded_files = ["def.py"] issue_a = _get_issue_instance() issue_b = _get_issue_instance() get_issue_list.return_value = [issue_a, issue_b] - self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50} - for category in ['SEVERITY', 'CONFIDENCE']: - for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']: - self.manager.metrics.data['_totals']['%s.%s' % - (category, level)] = 1 + self.manager.metrics.data["_totals"] = {"loc": 1000, "nosec": 50} + for category in ["SEVERITY", "CONFIDENCE"]: + for level in ["UNDEFINED", "LOW", "MEDIUM", "HIGH"]: + self.manager.metrics.data["_totals"][f"{category}.{level}"] = 1 # Validate that we're outputting the correct issues - output_str_fn = 'bandit.formatters.screen._output_issue_str' + output_str_fn = "bandit.formatters.screen._output_issue_str" with mock.patch(output_str_fn) as output_str: - output_str.return_value = 'ISSUE_OUTPUT_TEXT' + output_str.return_value = "ISSUE_OUTPUT_TEXT" - with open(self.tmp_fname, 'w') as tmp_file: - screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, - lines=5) + with open(self.tmp_fname, "w") as tmp_file: + screen.report( + self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5 + ) - calls = [mock.call(issue_a, '', lines=5), - mock.call(issue_b, '', lines=5)] + calls = [ + mock.call(issue_a, "", lines=5), + mock.call(issue_b, "", lines=5), + ] output_str.assert_has_calls(calls, any_order=True) # Validate that we're outputting all of the expected fields and the # correct values - with mock.patch('bandit.formatters.screen.do_print') as m: - with open(self.tmp_fname, 'w') as tmp_file: - screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, - lines=5) + with mock.patch("bandit.formatters.screen.do_print") as m: + with open(self.tmp_fname, "w") as tmp_file: + screen.report( + self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5 + ) - data = '\n'.join([str(a) for a in m.call_args[0][0]]) + data = "\n".join([str(a) for a in m.call_args[0][0]]) - expected = 'Run started' + expected = "Run started" self.assertIn(expected, data) expected_items = [ - screen.header('Files in scope (1):'), - '\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})'] + screen.header("Files in scope (1):"), + "\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})", + ] for item in expected_items: self.assertIn(item, data) - expected = screen.header('Files excluded (1):') + '\n\tdef.py' + expected = screen.header("Files excluded (1):") + "\n\tdef.py" self.assertIn(expected, data) - expected = ('Total lines of code: 1000\n\tTotal lines skipped ' - '(#nosec): 50') + expected = ( + "Total lines of code: 1000\n\tTotal lines skipped " + "(#nosec): 50" + ) self.assertIn(expected, data) - expected = ('Total issues (by severity):\n\t\tUndefined: 1\n\t\t' - 'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1') + expected = ( + "Total issues (by severity):\n\t\tUndefined: 1\n\t\t" + "Low: 1\n\t\tMedium: 1\n\t\tHigh: 1" + ) self.assertIn(expected, data) - expected = ('Total issues (by confidence):\n\t\tUndefined: 1\n\t\t' - 'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1') + expected = ( + "Total issues (by confidence):\n\t\tUndefined: 1\n\t\t" + "Low: 1\n\t\tMedium: 1\n\t\tHigh: 1" + ) self.assertIn(expected, data) - expected = (screen.header('Files skipped (1):') + - '\n\tabc.py (File is bad)') + expected = ( + screen.header("Files skipped (1):") + + "\n\tabc.py (File is bad)" + ) self.assertIn(expected, data) - @mock.patch('bandit.core.manager.BanditManager.get_issue_list') + @mock.patch("bandit.core.manager.BanditManager.get_issue_list") def test_report_baseline(self, get_issue_list): conf = config.BanditConfig() - self.manager = manager.BanditManager(conf, 'file') + self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname @@ -172,37 +201,40 @@ def test_report_baseline(self, get_issue_list): issue_b = _get_issue_instance() issue_x = _get_issue_instance() - issue_x.fname = 'x' + issue_x.fname = "x" issue_y = _get_issue_instance() - issue_y.fname = 'y' + issue_y.fname = "y" issue_z = _get_issue_instance() - issue_z.fname = 'z' + issue_z.fname = "z" get_issue_list.return_value = collections.OrderedDict( - [(issue_a, [issue_x]), (issue_b, [issue_y, issue_z])]) + [(issue_a, [issue_x]), (issue_b, [issue_y, issue_z])] + ) # Validate that we're outputting the correct issues - indent_val = ' ' * 10 - output_str_fn = 'bandit.formatters.screen._output_issue_str' + indent_val = " " * 10 + output_str_fn = "bandit.formatters.screen._output_issue_str" with mock.patch(output_str_fn) as output_str: - output_str.return_value = 'ISSUE_OUTPUT_TEXT' + output_str.return_value = "ISSUE_OUTPUT_TEXT" - with open(self.tmp_fname, 'w') as tmp_file: - screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, - lines=5) + with open(self.tmp_fname, "w") as tmp_file: + screen.report( + self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5 + ) - calls = [mock.call(issue_a, '', lines=5), - mock.call(issue_b, '', show_code=False, - show_lineno=False), - mock.call(issue_y, indent_val, lines=5), - mock.call(issue_z, indent_val, lines=5)] + calls = [ + mock.call(issue_a, "", lines=5), + mock.call(issue_b, "", show_code=False, show_lineno=False), + mock.call(issue_y, indent_val, lines=5), + mock.call(issue_z, indent_val, lines=5), + ] output_str.assert_has_calls(calls, any_order=True) def _get_issue_instance(severity=bandit.MEDIUM, confidence=bandit.MEDIUM): - new_issue = issue.Issue(severity, confidence, 'Test issue') - new_issue.fname = 'code.py' - new_issue.test = 'bandit_plugin' + new_issue = issue.Issue(severity, confidence, "Test issue") + new_issue.fname = "code.py" + new_issue.test = "bandit_plugin" new_issue.lineno = 1 return new_issue diff --git a/tests/unit/formatters/test_text.py b/tests/unit/formatters/test_text.py index 30a45af0e..a55172d0b 100644 --- a/tests/unit/formatters/test_text.py +++ b/tests/unit/formatters/test_text.py @@ -2,7 +2,6 @@ # Copyright (c) 2015 Hewlett Packard Enterprise # # SPDX-License-Identifier: Apache-2.0 - import collections import tempfile from unittest import mock @@ -18,140 +17,152 @@ class TextFormatterTests(testtools.TestCase): - def setUp(self): - super(TextFormatterTests, self).setUp() + super().setUp() - @mock.patch('bandit.core.issue.Issue.get_code') + @mock.patch("bandit.core.issue.Issue.get_code") def test_output_issue(self, get_code): issue = _get_issue_instance() - get_code.return_value = 'DDDDDDD' - indent_val = 'CCCCCCC' + get_code.return_value = "DDDDDDD" + indent_val = "CCCCCCC" def _template(_issue, _indent_val, _code): - return_val = ["{}>> Issue: [{}:{}] {}". - format(_indent_val, _issue.test_id, _issue.test, - _issue.text), - "{} Severity: {} Confidence: {}". - format(_indent_val, _issue.severity.capitalize(), - _issue.confidence.capitalize()), - "{} Location: {}:{}:{}". - format(_indent_val, _issue.fname, _issue.lineno, - _issue.col_offset), - "{} More Info: {}".format( - _indent_val, docs_utils.get_url(_issue.test_id))] + return_val = [ + "{}>> Issue: [{}:{}] {}".format( + _indent_val, _issue.test_id, _issue.test, _issue.text + ), + "{} Severity: {} Confidence: {}".format( + _indent_val, + _issue.severity.capitalize(), + _issue.confidence.capitalize(), + ), + "{} Location: {}:{}:{}".format( + _indent_val, _issue.fname, _issue.lineno, _issue.col_offset + ), + "{} More Info: {}".format( + _indent_val, docs_utils.get_url(_issue.test_id) + ), + ] if _code: - return_val.append("{}{}".format(_indent_val, _code)) - return '\n'.join(return_val) + return_val.append(f"{_indent_val}{_code}") + return "\n".join(return_val) issue_text = b_text._output_issue_str(issue, indent_val) - expected_return = _template(issue, indent_val, 'DDDDDDD') + expected_return = _template(issue, indent_val, "DDDDDDD") self.assertEqual(expected_return, issue_text) - issue_text = b_text._output_issue_str(issue, indent_val, - show_code=False) - expected_return = _template(issue, indent_val, '') + issue_text = b_text._output_issue_str( + issue, indent_val, show_code=False + ) + expected_return = _template(issue, indent_val, "") self.assertEqual(expected_return, issue_text) - issue.lineno = '' - issue.col_offset = '' - issue_text = b_text._output_issue_str(issue, indent_val, - show_lineno=False) - expected_return = _template(issue, indent_val, 'DDDDDDD') + issue.lineno = "" + issue.col_offset = "" + issue_text = b_text._output_issue_str( + issue, indent_val, show_lineno=False + ) + expected_return = _template(issue, indent_val, "DDDDDDD") self.assertEqual(expected_return, issue_text) - @mock.patch('bandit.core.manager.BanditManager.get_issue_list') + @mock.patch("bandit.core.manager.BanditManager.get_issue_list") def test_no_issues(self, get_issue_list): conf = config.BanditConfig() - self.manager = manager.BanditManager(conf, 'file') + self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname get_issue_list.return_value = collections.OrderedDict() - with open(self.tmp_fname, 'w') as tmp_file: - b_text.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, - lines=5) + with open(self.tmp_fname, "w") as tmp_file: + b_text.report( + self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5 + ) with open(self.tmp_fname) as f: data = f.read() - self.assertIn('No issues identified.', data) + self.assertIn("No issues identified.", data) - @mock.patch('bandit.core.manager.BanditManager.get_issue_list') + @mock.patch("bandit.core.manager.BanditManager.get_issue_list") def test_report_nobaseline(self, get_issue_list): conf = config.BanditConfig() - self.manager = manager.BanditManager(conf, 'file') + self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname self.manager.verbose = True - self.manager.files_list = ['binding.py'] + self.manager.files_list = ["binding.py"] - self.manager.scores = [{'SEVERITY': [0, 0, 0, 1], - 'CONFIDENCE': [0, 0, 0, 1]}] + self.manager.scores = [ + {"SEVERITY": [0, 0, 0, 1], "CONFIDENCE": [0, 0, 0, 1]} + ] - self.manager.skipped = [('abc.py', 'File is bad')] - self.manager.excluded_files = ['def.py'] + self.manager.skipped = [("abc.py", "File is bad")] + self.manager.excluded_files = ["def.py"] issue_a = _get_issue_instance() issue_b = _get_issue_instance() get_issue_list.return_value = [issue_a, issue_b] - self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50} - for category in ['SEVERITY', 'CONFIDENCE']: - for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']: - self.manager.metrics.data['_totals']['%s.%s' % - (category, level)] = 1 + self.manager.metrics.data["_totals"] = {"loc": 1000, "nosec": 50} + for category in ["SEVERITY", "CONFIDENCE"]: + for level in ["UNDEFINED", "LOW", "MEDIUM", "HIGH"]: + self.manager.metrics.data["_totals"][f"{category}.{level}"] = 1 # Validate that we're outputting the correct issues - output_str_fn = 'bandit.formatters.text._output_issue_str' + output_str_fn = "bandit.formatters.text._output_issue_str" with mock.patch(output_str_fn) as output_str: - output_str.return_value = 'ISSUE_OUTPUT_TEXT' + output_str.return_value = "ISSUE_OUTPUT_TEXT" - with open(self.tmp_fname, 'w') as tmp_file: - b_text.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, - lines=5) + with open(self.tmp_fname, "w") as tmp_file: + b_text.report( + self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5 + ) - calls = [mock.call(issue_a, '', lines=5), - mock.call(issue_b, '', lines=5)] + calls = [ + mock.call(issue_a, "", lines=5), + mock.call(issue_b, "", lines=5), + ] output_str.assert_has_calls(calls, any_order=True) # Validate that we're outputting all of the expected fields and the # correct values - with open(self.tmp_fname, 'w') as tmp_file: - b_text.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, - lines=5) + with open(self.tmp_fname, "w") as tmp_file: + b_text.report( + self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5 + ) with open(self.tmp_fname) as f: data = f.read() - expected_items = ['Run started', - 'Files in scope (1)', - 'binding.py (score: ', - "CONFIDENCE: 1", - "SEVERITY: 1", - 'Files excluded (1):', - 'def.py', - 'Undefined: 1', - 'Low: 1', - 'Medium: 1', - 'High: 1', - 'Total lines skipped ', - '(#nosec): 50', - 'Total issues (by severity)', - 'Total issues (by confidence)', - 'Files skipped (1)', - 'abc.py (File is bad)' - ] + expected_items = [ + "Run started", + "Files in scope (1)", + "binding.py (score: ", + "CONFIDENCE: 1", + "SEVERITY: 1", + "Files excluded (1):", + "def.py", + "Undefined: 1", + "Low: 1", + "Medium: 1", + "High: 1", + "Total lines skipped ", + "(#nosec): 50", + "Total issues (by severity)", + "Total issues (by confidence)", + "Files skipped (1)", + "abc.py (File is bad)", + ] for item in expected_items: self.assertIn(item, data) - @mock.patch('bandit.core.manager.BanditManager.get_issue_list') + @mock.patch("bandit.core.manager.BanditManager.get_issue_list") def test_report_baseline(self, get_issue_list): conf = config.BanditConfig() - self.manager = manager.BanditManager(conf, 'file') + self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname @@ -160,37 +171,40 @@ def test_report_baseline(self, get_issue_list): issue_b = _get_issue_instance() issue_x = _get_issue_instance() - issue_x.fname = 'x' + issue_x.fname = "x" issue_y = _get_issue_instance() - issue_y.fname = 'y' + issue_y.fname = "y" issue_z = _get_issue_instance() - issue_z.fname = 'z' + issue_z.fname = "z" get_issue_list.return_value = collections.OrderedDict( - [(issue_a, [issue_x]), (issue_b, [issue_y, issue_z])]) + [(issue_a, [issue_x]), (issue_b, [issue_y, issue_z])] + ) # Validate that we're outputting the correct issues - indent_val = ' ' * 10 - output_str_fn = 'bandit.formatters.text._output_issue_str' + indent_val = " " * 10 + output_str_fn = "bandit.formatters.text._output_issue_str" with mock.patch(output_str_fn) as output_str: - output_str.return_value = 'ISSUE_OUTPUT_TEXT' + output_str.return_value = "ISSUE_OUTPUT_TEXT" - with open(self.tmp_fname, 'w') as tmp_file: - b_text.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, - lines=5) + with open(self.tmp_fname, "w") as tmp_file: + b_text.report( + self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5 + ) - calls = [mock.call(issue_a, '', lines=5), - mock.call(issue_b, '', show_code=False, - show_lineno=False), - mock.call(issue_y, indent_val, lines=5), - mock.call(issue_z, indent_val, lines=5)] + calls = [ + mock.call(issue_a, "", lines=5), + mock.call(issue_b, "", show_code=False, show_lineno=False), + mock.call(issue_y, indent_val, lines=5), + mock.call(issue_z, indent_val, lines=5), + ] output_str.assert_has_calls(calls, any_order=True) def _get_issue_instance(severity=bandit.MEDIUM, confidence=bandit.MEDIUM): - new_issue = issue.Issue(severity, confidence, 'Test issue') - new_issue.fname = 'code.py' - new_issue.test = 'bandit_plugin' + new_issue = issue.Issue(severity, confidence, "Test issue") + new_issue.fname = "code.py" + new_issue.test = "bandit_plugin" new_issue.lineno = 1 return new_issue diff --git a/tests/unit/formatters/test_xml.py b/tests/unit/formatters/test_xml.py index dd5e16d4a..c2c06d3f6 100644 --- a/tests/unit/formatters/test_xml.py +++ b/tests/unit/formatters/test_xml.py @@ -1,7 +1,6 @@ # Copyright (c) 2015 VMware, Inc. # # SPDX-License-Identifier: Apache-2.0 - import collections import tempfile from xml.etree import cElementTree as ET @@ -16,23 +15,25 @@ class XmlFormatterTests(testtools.TestCase): - def setUp(self): - super(XmlFormatterTests, self).setUp() + super().setUp() conf = config.BanditConfig() - self.manager = manager.BanditManager(conf, 'file') + self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() - self.context = {'filename': self.tmp_fname, - 'lineno': 4, - 'linerange': [4]} - self.check_name = 'hardcoded_bind_all_interfaces' - self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM, - 'Possible binding to all interfaces.') + self.context = { + "filename": self.tmp_fname, + "lineno": 4, + "linerange": [4], + } + self.check_name = "hardcoded_bind_all_interfaces" + self.issue = issue.Issue( + bandit.MEDIUM, bandit.MEDIUM, "Possible binding to all interfaces." + ) self.manager.out_file = self.tmp_fname - self.issue.fname = self.context['filename'] - self.issue.lineno = self.context['lineno'] - self.issue.linerange = self.context['linerange'] + self.issue.fname = self.context["filename"] + self.issue.lineno = self.context["lineno"] + self.issue.linerange = self.context["linerange"] self.issue.test = self.check_name self.manager.results.append(self.issue) @@ -45,32 +46,39 @@ def _xml_to_dict(self, t): for dc in map(self._xml_to_dict, children): for k, v in dc.items(): dd[k].append(v) - d = {t.tag: {k: v[0] if len(v) == 1 else v - for k, v in dd.items()}} + d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}} if t.attrib: - d[t.tag].update(('@' + k, v) for k, v in t.attrib.items()) + d[t.tag].update(("@" + k, v) for k, v in t.attrib.items()) if t.text: text = t.text.strip() if children or t.attrib: if text: - d[t.tag]['#text'] = text + d[t.tag]["#text"] = text else: d[t.tag] = text return d def test_report(self): - with open(self.tmp_fname, 'wb') as tmp_file: - b_xml.report(self.manager, tmp_file, self.issue.severity, - self.issue.confidence) + with open(self.tmp_fname, "wb") as tmp_file: + b_xml.report( + self.manager, + tmp_file, + self.issue.severity, + self.issue.confidence, + ) with open(self.tmp_fname) as f: data = self._xml_to_dict(ET.XML(f.read())) - self.assertEqual(self.tmp_fname, - data['testsuite']['testcase']['@classname']) + self.assertEqual( + self.tmp_fname, data["testsuite"]["testcase"]["@classname"] + ) self.assertEqual( self.issue.text, - data['testsuite']['testcase']['error']['@message']) - self.assertEqual(self.check_name, - data['testsuite']['testcase']['@name']) + data["testsuite"]["testcase"]["error"]["@message"], + ) + self.assertEqual( + self.check_name, data["testsuite"]["testcase"]["@name"] + ) self.assertIsNotNone( - data['testsuite']['testcase']['error']['@more_info']) + data["testsuite"]["testcase"]["error"]["@more_info"] + ) diff --git a/tests/unit/formatters/test_yaml.py b/tests/unit/formatters/test_yaml.py index aefd4fd12..bca249c78 100644 --- a/tests/unit/formatters/test_yaml.py +++ b/tests/unit/formatters/test_yaml.py @@ -1,7 +1,6 @@ # Copyright (c) 2017 VMware, Inc. # # SPDX-License-Identifier: Apache-2.0 - import collections import tempfile from unittest import mock @@ -19,70 +18,83 @@ class YamlFormatterTests(testtools.TestCase): - def setUp(self): - super(YamlFormatterTests, self).setUp() + super().setUp() conf = config.BanditConfig() - self.manager = manager.BanditManager(conf, 'file') + self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() - self.context = {'filename': self.tmp_fname, - 'lineno': 4, - 'linerange': [4]} - self.check_name = 'hardcoded_bind_all_interfaces' - self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM, - 'Possible binding to all interfaces.') + self.context = { + "filename": self.tmp_fname, + "lineno": 4, + "linerange": [4], + } + self.check_name = "hardcoded_bind_all_interfaces" + self.issue = issue.Issue( + bandit.MEDIUM, bandit.MEDIUM, "Possible binding to all interfaces." + ) - self.candidates = [issue.Issue(bandit.LOW, bandit.LOW, 'Candidate A', - lineno=1), - issue.Issue(bandit.HIGH, bandit.HIGH, 'Candiate B', - lineno=2)] + self.candidates = [ + issue.Issue(bandit.LOW, bandit.LOW, "Candidate A", lineno=1), + issue.Issue(bandit.HIGH, bandit.HIGH, "Candiate B", lineno=2), + ] self.manager.out_file = self.tmp_fname - self.issue.fname = self.context['filename'] - self.issue.lineno = self.context['lineno'] - self.issue.linerange = self.context['linerange'] + self.issue.fname = self.context["filename"] + self.issue.lineno = self.context["lineno"] + self.issue.linerange = self.context["linerange"] self.issue.test = self.check_name self.manager.results.append(self.issue) self.manager.metrics = metrics.Metrics() # mock up the metrics - for key in ['_totals', 'binding.py']: - self.manager.metrics.data[key] = {'loc': 4, 'nosec': 2} + for key in ["_totals", "binding.py"]: + self.manager.metrics.data[key] = {"loc": 4, "nosec": 2} for (criteria, default) in constants.CRITERIA: for rank in constants.RANKING: - self.manager.metrics.data[key]['{0}.{1}'.format( - criteria, rank - )] = 0 + self.manager.metrics.data[key][f"{criteria}.{rank}"] = 0 - @mock.patch('bandit.core.manager.BanditManager.get_issue_list') + @mock.patch("bandit.core.manager.BanditManager.get_issue_list") def test_report(self, get_issue_list): - self.manager.files_list = ['binding.py'] - self.manager.scores = [{'SEVERITY': [0] * len(constants.RANKING), - 'CONFIDENCE': [0] * len(constants.RANKING)}] + self.manager.files_list = ["binding.py"] + self.manager.scores = [ + { + "SEVERITY": [0] * len(constants.RANKING), + "CONFIDENCE": [0] * len(constants.RANKING), + } + ] get_issue_list.return_value = collections.OrderedDict( - [(self.issue, self.candidates)]) + [(self.issue, self.candidates)] + ) - with open(self.tmp_fname, 'w') as tmp_file: - b_json.report(self.manager, tmp_file, self.issue.severity, - self.issue.confidence) + with open(self.tmp_fname, "w") as tmp_file: + b_json.report( + self.manager, + tmp_file, + self.issue.severity, + self.issue.confidence, + ) with open(self.tmp_fname) as f: data = yaml.load(f.read(), Loader=yaml.SafeLoader) - self.assertIsNotNone(data['generated_at']) - self.assertEqual(self.tmp_fname, data['results'][0]['filename']) - self.assertEqual(self.issue.severity, - data['results'][0]['issue_severity']) - self.assertEqual(self.issue.confidence, - data['results'][0]['issue_confidence']) - self.assertEqual(self.issue.text, data['results'][0]['issue_text']) - self.assertEqual(self.context['lineno'], - data['results'][0]['line_number']) - self.assertEqual(self.context['linerange'], - data['results'][0]['line_range']) - self.assertEqual(self.check_name, data['results'][0]['test_name']) - self.assertIn('candidates', data['results'][0]) - self.assertIn('more_info', data['results'][0]) - self.assertIsNotNone(data['results'][0]['more_info']) + self.assertIsNotNone(data["generated_at"]) + self.assertEqual(self.tmp_fname, data["results"][0]["filename"]) + self.assertEqual( + self.issue.severity, data["results"][0]["issue_severity"] + ) + self.assertEqual( + self.issue.confidence, data["results"][0]["issue_confidence"] + ) + self.assertEqual(self.issue.text, data["results"][0]["issue_text"]) + self.assertEqual( + self.context["lineno"], data["results"][0]["line_number"] + ) + self.assertEqual( + self.context["linerange"], data["results"][0]["line_range"] + ) + self.assertEqual(self.check_name, data["results"][0]["test_name"]) + self.assertIn("candidates", data["results"][0]) + self.assertIn("more_info", data["results"][0]) + self.assertIsNotNone(data["results"][0]["more_info"]) diff --git a/tools/openstack_coverage.py b/tools/openstack_coverage.py index 762a08351..1df6480d7 100755 --- a/tools/openstack_coverage.py +++ b/tools/openstack_coverage.py @@ -35,7 +35,7 @@ PATH_PROJECT_LIST = "openstack/governance/plain/reference/projects.yaml" PATH_ZUUL = "zuul/layout.yaml" -TITLE = "OpenStack Bandit Coverage Report -- {0} UTC".format( +TITLE = "OpenStack Bandit Coverage Report -- {} UTC".format( datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') ) @@ -48,13 +48,13 @@ def get_yaml(url): data = yaml.load(r.content) return(data) raise SystemError( - "Could not obtain valid YAML from specified source ({0})" + "Could not obtain valid YAML from specified source ({})" .format(url) ) def list_projects(conf_jenkins): - data = get_yaml("{0}{1}{2}".format(BASE_URL, PATH_INFRA, conf_jenkins)) + data = get_yaml("{}{}{}".format(BASE_URL, PATH_INFRA, conf_jenkins)) # parse data bandit_projects = [] for project in data: @@ -68,15 +68,15 @@ def list_projects(conf_jenkins): # output results print("Bandit jobs have been defined in the following OpenStack projects:") for project in sorted(bandit_projects): - print(" - {0}".format(project)) - print("\n(Configuration from {0}{1}{2})\n".format( + print(" - {}".format(project)) + print("\n(Configuration from {}{}{})\n".format( BASE_URL, PATH_INFRA, conf_jenkins )) return bandit_projects def coverage_zuul(conf_zuul): - data = get_yaml("{0}{1}{2}".format(BASE_URL, PATH_INFRA, conf_zuul)) + data = get_yaml("{}{}{}".format(BASE_URL, PATH_INFRA, conf_zuul)) # parse data bandit_jobs = {} bandit_tests = {key: set() for key in TEST_TYPES} @@ -96,15 +96,15 @@ def coverage_zuul(conf_zuul): # output results for test_type in bandit_tests: print( - "\n{0} tests exist for the following OpenStack projects:" + "\n{} tests exist for the following OpenStack projects:" .format(test_type.capitalize()) ) for project in sorted(bandit_tests[test_type]): if project[1] is False: - print(" - {0}".format(project[0])) + print(" - {}".format(project[0])) else: - print(" - {0} (VOTING)".format(project[0])) - print("\n(Configuration from {0}{1}{2})\n".format( + print(" - {} (VOTING)".format(project[0])) + print("\n(Configuration from {}{}{})\n".format( BASE_URL, PATH_INFRA, conf_zuul )) @@ -137,7 +137,7 @@ def _get_repo_names(project_list): # as the key and the repo as the value. project_repos = {key: None for key in project_list} - yaml_data = get_yaml("{0}{1}".format(BASE_URL, PATH_PROJECT_LIST)) + yaml_data = get_yaml("{}{}".format(BASE_URL, PATH_PROJECT_LIST)) for project in yaml_data: diff --git a/tox.ini b/tox.ini index fded4fbd3..41cea3e9b 100644 --- a/tox.ini +++ b/tox.ini @@ -29,6 +29,7 @@ commands = flake8 {posargs} bandit bandit-baseline -r bandit -ll -ii [testenv:pep8] +ignore_errors = true deps = {[testenv]deps} . usedevelop = False @@ -79,7 +80,14 @@ deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:pylint] -commands = pylint --rcfile=pylintrc bandit +commands = -pylint --rcfile=pylintrc bandit + +[testenv:format] +skip_install = true +deps = + pre-commit +commands = + pre-commit run --all-files --show-diff-on-failure [testenv:lower-constraints] basepython = python3