diff --git a/.github/workflows/unit_tests.yaml b/.github/workflows/unit_tests.yaml index bae521c..a96e355 100644 --- a/.github/workflows/unit_tests.yaml +++ b/.github/workflows/unit_tests.yaml @@ -10,16 +10,15 @@ on: jobs: UnitTest: runs-on: ubuntu-latest - env: DATABASE_NAME: test_database steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: '3.10' @@ -30,38 +29,53 @@ jobs: - name: Determine output folder id: set_output_folder + shell: bash run: | - if [[ $GITHUB_EVENT_NAME == "pull_request" ]]; then - branch_name=$GITHUB_BASE_REF - else - branch_name=$GITHUB_REF_NAME - fi - - if [[ $branch_name == "main" ]]; then - echo "output_folder=prod" >> $GITHUB_ENV - elif [[ $branch_name == "stage" ]]; then - echo "output_folder=stage" >> $GITHUB_ENV - elif [[ $branch_name == "dev" ]]; then - echo "output_folder=dev" >> $GITHUB_ENV + if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then + branch_name="$GITHUB_BASE_REF" else - echo "Unknown branch: $branch_name" - exit 1 + branch_name="$GITHUB_REF_NAME" fi - - name: Run tests with coverage + case "$branch_name" in + main) echo "output_folder=prod" >> "$GITHUB_ENV" ;; + stage) echo "output_folder=stage" >> "$GITHUB_ENV" ;; + dev) echo "output_folder=dev" >> "$GITHUB_ENV" ;; + *) echo "Unknown branch: $branch_name"; exit 1 ;; + esac + + - name: Run tests with coverage (show failures in logs) + shell: bash run: | - timestamp=$(date '+%Y-%m-%d_%H-%M-%S') + set -o pipefail + timestamp="$(date '+%Y-%m-%d_%H-%M-%S')" mkdir -p test_results log_file="test_results/${timestamp}_report.log" - echo -e "\nTest Cases Report Report\n" >> $log_file - # Run the tests and append output to the log file - python -m coverage run --source=src -m unittest discover -s tests/unit_tests >> $log_file 2>&1 - echo -e "\nCoverage Report\n" >> $log_file - coverage report >> $log_file + + { + echo + echo "Test Cases Report" + echo + } | tee -a "$log_file" + + # Run unittest in verbose mode; mirror output to console and file + python -m coverage run --source=src -m unittest discover -s tests/unit_tests -v 2>&1 | tee -a "$log_file" + test_status=${PIPESTATUS[0]} + + echo -e "\nCoverage Report\n" | tee -a "$log_file" + coverage report 2>&1 | tee -a "$log_file" + + exit $test_status - name: Check coverage - run: | - coverage report --fail-under=85 + run: coverage report --fail-under=85 + + # Optional: keep the log as a build artifact for easy download + - name: Upload test log artifact + uses: actions/upload-artifact@v4 + with: + name: unit-test-log + path: test_results/ - name: Upload report to Azure uses: LanceMcCarthy/Action-AzureBlobUpload@v2 @@ -69,7 +83,6 @@ jobs: source_folder: 'test_results' destination_folder: '${{ env.output_folder }}' connection_string: ${{ secrets.AZURE_STORAGE_CONNECTION_STRING }} - container_name: 'osw-validation-service ' + container_name: 'osw-validation-service' clean_destination_folder: false delete_if_exists: false - diff --git a/requirements.txt b/requirements.txt index 768bd1f..e3c6fb2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,4 +4,4 @@ python-ms-core==0.0.23 uvicorn==0.20.0 html_testRunner==1.2.1 geopandas==0.14.4 -python-osw-validation==0.2.11 \ No newline at end of file +python-osw-validation==0.2.13 \ No newline at end of file diff --git a/src/validation.py b/src/validation.py index a4120d5..fd04b27 100644 --- a/src/validation.py +++ b/src/validation.py @@ -9,6 +9,7 @@ from python_osw_validation import OSWValidation from .models.queue_message_content import ValidationResult import uuid +import json ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) # Path used for download file generation. @@ -55,8 +56,8 @@ def is_osw_valid(self, max_errors) -> ValidationResult: validation_result = validator.validate(max_errors) result.is_valid = validation_result.is_valid if not result.is_valid: - result.validation_message = validation_result.errors - logger.error(f' Error While Validating File: {str(validation_result.errors)}') + result.validation_message = json.dumps(validation_result.issues) + logger.error(f' Error While Validating File: {json.dumps(validation_result.issues)}') Validation.clean_up(downloaded_file_path) else: result.validation_message = 'Failed to validate because unknown file format' diff --git a/tests/unit_tests/test_validation.py b/tests/unit_tests/test_validation.py index bcaa483..aa3cbde 100644 --- a/tests/unit_tests/test_validation.py +++ b/tests/unit_tests/test_validation.py @@ -1,4 +1,5 @@ import os +import json import unittest from pathlib import Path from src.validation import Validation @@ -75,12 +76,36 @@ def test_validate_invalid_file(self, mock_download_file, mock_clean_up): # Assert that validation is marked as valid self.assertFalse(result.is_valid) - self.assertIn('Validation error', ' '.join(result.validation_message)) + errors = json.loads(result.validation_message) + self.assertNotEqual(len(errors), 0) # Ensure clean_up is called twice (once for the file, once for the folder) self.assertEqual(mock_clean_up.call_count, 2) + @patch('src.validation.Validation.clean_up') + @patch('src.validation.Validation.download_single_file') + def test_validate_invalid_file_with_errors(self, mock_download_file, mock_clean_up): + """Test the validate method for a invalid file.""" + mock_download_file.return_value = f'{SAVED_FILE_PATH}/{FAILURE_FILE_NAME}' + error_in_file = 'wa.microsoft.graph.edges.OSW.geojson' + feature_indexes = [3, 6, 8, 25] + error_message = "Additional properties are not allowed ('crossing' was unexpected)" + # Act + result = self.validation.validate(max_errors=10) + + # Assert that validation is marked as valid + self.assertFalse(result.is_valid) + errors = json.loads(result.validation_message) + count = 0 + for error in errors: + self.assertEqual(error['filename'], error_in_file) + self.assertEqual(error['error_message'][0], error_message) + self.assertEqual(error['feature_index'], feature_indexes[count]) + count += 1 + # Ensure clean_up is called twice (once for the file, once for the folder) + self.assertEqual(mock_clean_up.call_count, 2) + @patch('src.validation.OSWValidation') @patch('src.validation.Validation.clean_up') def test_validate_invalid_zip(self, mock_clean_up, mock_osw_validation):