Skip to content
This repository has been archived by the owner on May 10, 2024. It is now read-only.

Commit

Permalink
Enable PII Annotations checking enforcement
Browse files Browse the repository at this point in the history
DE-1764
  • Loading branch information
pwnage101 committed Oct 29, 2019
1 parent b509b2b commit 1312e4f
Show file tree
Hide file tree
Showing 2 changed files with 98 additions and 28 deletions.
81 changes: 60 additions & 21 deletions pavelib/paver_tests/test_pii_check.py
Expand Up @@ -4,34 +4,73 @@
from __future__ import absolute_import

import io
import shutil
import tempfile
import unittest

import six
from mock import patch
from paver.easy import call_task
from path import Path as path
from paver.easy import call_task, BuildFailure

import pavelib.quality
from pavelib.utils.envs import Env


@patch.object(pavelib.quality.run_pii_check, 'needs')
@patch('pavelib.quality.sh')
def test_pii_check_report_dir_override(mock_paver_sh, mock_needs, tmpdir):
class TestPaverPIICheck(unittest.TestCase):
"""
run_pii_check succeeds with proper report dir
For testing the paver run_pii_check task
"""
# Make the expected stdout files.
report_dir = tmpdir.mkdir('pii_report')
cms_stdout_report = report_dir.join('pii_check_cms.report')
cms_stdout_report.write('Coverage found 33 uncovered models:\n')
lms_stdout_report = report_dir.join('pii_check_lms.report')
lms_stdout_report.write('Coverage found 66 uncovered models:\n')

mock_needs.return_value = 0
call_task('pavelib.quality.run_pii_check', options={"report_dir": six.text_type(report_dir)})
mock_calls = [six.text_type(call) for call in mock_paver_sh.mock_calls]
assert len(mock_calls) == 2
assert any(['lms.envs.test' in call for call in mock_calls])
assert any(['cms.envs.test' in call for call in mock_calls])
assert all([six.text_type(report_dir) in call for call in mock_calls])
metrics_file = Env.METRICS_DIR / 'pii'
assert io.open(metrics_file, 'r').read() == '66'
def setUp(self):
super(TestPaverPIICheck, self).setUp()
self.report_dir = path(tempfile.mkdtemp())
self.addCleanup(shutil.rmtree, self.report_dir)

@patch.object(pavelib.quality.run_pii_check, 'needs')
@patch('pavelib.quality.sh')
def test_pii_check_report_dir_override(self, mock_paver_sh, mock_needs):
"""
run_pii_check succeeds with proper report dir
"""
# Make the expected stdout files.
cms_stdout_report = self.report_dir / 'pii_check_cms.report'
cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n'])
lms_stdout_report = self.report_dir / 'pii_check_lms.report'
lms_stdout_report.write_lines(['Coverage found 66 uncovered models:\n'])

mock_needs.return_value = 0
call_task('pavelib.quality.run_pii_check', options={"report_dir": six.text_type(self.report_dir)})
mock_calls = [six.text_type(call) for call in mock_paver_sh.mock_calls]
assert len(mock_calls) == 2
assert any(['lms.envs.test' in call for call in mock_calls])
assert any(['cms.envs.test' in call for call in mock_calls])
assert all([six.text_type(self.report_dir) in call for call in mock_calls])
metrics_file = Env.METRICS_DIR / 'pii'
assert io.open(metrics_file, 'r').read() == 'Number of PII Annotation violations: 66\n'

@patch.object(pavelib.quality.run_pii_check, 'needs')
@patch('pavelib.quality.sh')
def test_pii_check_failed(self, mock_paver_sh, mock_needs):
"""
run_pii_check fails due to crossing the threshold.
"""
# Make the expected stdout files.
cms_stdout_report = self.report_dir / 'pii_check_cms.report'
cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n'])
lms_stdout_report = self.report_dir / 'pii_check_lms.report'
lms_stdout_report.write_lines([
'Coverage found 66 uncovered models:',
'Coverage threshold not met! Needed 100.0, actually 95.0!',
])

mock_needs.return_value = 0
with self.assertRaises(SystemExit):
call_task('pavelib.quality.run_pii_check', options={"report_dir": six.text_type(self.report_dir)})
self.assertRaises(BuildFailure)
mock_calls = [six.text_type(call) for call in mock_paver_sh.mock_calls]
assert len(mock_calls) == 2
assert any(['lms.envs.test' in call for call in mock_calls])
assert any(['cms.envs.test' in call for call in mock_calls])
assert all([six.text_type(self.report_dir) in call for call in mock_calls])
metrics_file = Env.METRICS_DIR / 'pii'
assert io.open(metrics_file, 'r').read() == 'Number of PII Annotation violations: 66\n'
45 changes: 38 additions & 7 deletions pavelib/quality.py
Expand Up @@ -766,22 +766,39 @@ def _extract_missing_pii_annotations(filename):
filename: Filename where stdout of django_find_annotations was captured.
Returns:
Number of uncovered models.
three-tuple containing:
1. The number of uncovered models,
2. A bool indicating whether the coverage is still below the threshold, and
3. The full report as a string.
"""
uncovered_models = None
pii_check_passed = True
if os.path.isfile(filename):
with io.open(filename, 'r') as report_file:
lines = report_file.readlines()

# Find the count of uncovered models.
uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered')
for line in lines:
uncovered_match = uncovered_regex.match(line)
if uncovered_match:
uncovered_models = int(uncovered_match.groups()[0])
break

# Find a message which suggests the check failed.
failure_regex = re.compile(r'^Coverage threshold not met!')
for line in lines:
failure_match = failure_regex.match(line)
if failure_match:
pii_check_passed = False
break

# Each line in lines already contains a newline.
full_log = ''.join(lines)
else:
fail_quality('pii', u'FAILURE: Log file could not be found: {}'.format(filename))

return uncovered_models
return (uncovered_models, pii_check_passed, full_log)


@task
Expand All @@ -798,7 +815,8 @@ def run_pii_check(options):
default_report_dir = (Env.REPORT_DIR / pii_report_name)
report_dir = getattr(options, 'report_dir', default_report_dir)
output_file = os.path.join(report_dir, 'pii_check_{}.report')
uncovered_model_counts = []
env_report = []
pii_check_passed = True
for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")):
try:
print()
Expand All @@ -814,17 +832,30 @@ def run_pii_check(options):
report_dir, env_settings_file, report_dir, env_name.lower(), run_output_file
)
)
uncovered_model_counts.append(_extract_missing_pii_annotations(run_output_file))
uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file)
env_report.append((
uncovered_model_count,
full_log,
))

except BuildFailure as error_message:
fail_quality(pii_report_name, 'FAILURE: {}'.format(error_message))

uncovered_count = max(uncovered_model_counts)
if not pii_check_passed_env:
pii_check_passed = False

# Determine which suite is the worst offender by obtaining the max() keying off uncovered_count.
uncovered_count, full_log = max(env_report, key=lambda r: r[0])

# Write metric file.
if uncovered_count is None:
uncovered_count = 0
_write_metric(uncovered_count, (Env.METRICS_DIR / pii_report_name))
metrics_str = u"Number of PII Annotation violations: {}\n".format(uncovered_count)
_write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name))

return True
# Finally, fail the paver task if code_annotations suggests that the check failed.
if not pii_check_passed:
fail_quality('pii', full_log)


@task
Expand Down

0 comments on commit 1312e4f

Please sign in to comment.