From 5d26e641c230ecc4073a48c445738d2602a1407d Mon Sep 17 00:00:00 2001 From: cbullinger Date: Wed, 5 Nov 2025 19:24:22 -0500 Subject: [PATCH] test(python): add test infrastructure --- .../scripts/generate-test-summary-pytest.sh | 70 ++++++++ .github/workflows/run-python-tests.yml | 61 +++++++ server/python/pytest.ini | 33 ++++ server/python/tests/__init__.py | 2 + server/python/tests/conftest.py | 90 ++++++++++ server/python/tests/test_movie_routes.py | 160 ++++++++++++++++++ 6 files changed, 416 insertions(+) create mode 100644 .github/scripts/generate-test-summary-pytest.sh create mode 100644 .github/workflows/run-python-tests.yml create mode 100644 server/python/pytest.ini create mode 100644 server/python/tests/__init__.py create mode 100644 server/python/tests/conftest.py create mode 100644 server/python/tests/test_movie_routes.py diff --git a/.github/scripts/generate-test-summary-pytest.sh b/.github/scripts/generate-test-summary-pytest.sh new file mode 100644 index 0000000..f88ffff --- /dev/null +++ b/.github/scripts/generate-test-summary-pytest.sh @@ -0,0 +1,70 @@ +#!/bin/bash +set -e + +# Generate Test Summary from Pytest JUnit XML Output +# Usage: ./generate-test-summary-pytest.sh + +XML_FILE="${1:-test-results.xml}" + +echo "## Test Results" >> $GITHUB_STEP_SUMMARY +echo "" >> $GITHUB_STEP_SUMMARY + +# Parse test results from JUnit XML +if [ -f "$XML_FILE" ]; then + # Extract test counts from XML + # JUnit XML structure: + + tests=$(grep -oP 'tests="\K[0-9]+' "$XML_FILE" | head -1) + failures=$(grep -oP 'failures="\K[0-9]+' "$XML_FILE" | head -1) + errors=$(grep -oP 'errors="\K[0-9]+' "$XML_FILE" | head -1) + skipped=$(grep -oP 'skipped="\K[0-9]+' "$XML_FILE" | head -1) + + # Default to 0 if values are empty + tests=${tests:-0} + failures=${failures:-0} + errors=${errors:-0} + skipped=${skipped:-0} + + passed=$((tests - failures - errors - skipped)) + + echo "| Status | Count |" >> $GITHUB_STEP_SUMMARY + echo "|--------|-------|" >> $GITHUB_STEP_SUMMARY + echo "| ✅ Passed | $passed |" >> $GITHUB_STEP_SUMMARY + echo "| ❌ Failed | $((failures + errors)) |" >> $GITHUB_STEP_SUMMARY + echo "| ⏭️ Skipped | $skipped |" >> $GITHUB_STEP_SUMMARY + echo "| **Total** | **$tests** |" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + # List failed tests if any + if [ $((failures + errors)) -gt 0 ]; then + echo "### ❌ Failed Tests" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + # Extract failed test names from XML + failed_tests_file=$(mktemp) + + # Find testcase elements with failure or error children + grep -oP ']*classname="[^"]*"[^>]*name="[^"]*"[^>]*>.*?<(failure|error)' "$XML_FILE" | \ + grep -oP 'classname="\K[^"]*|name="\K[^"]*' | \ + paste -d '.' - - >> "$failed_tests_file" 2>/dev/null || true + + if [ -s "$failed_tests_file" ]; then + while IFS= read -r test; do + echo "- \`$test\`" >> $GITHUB_STEP_SUMMARY + done < "$failed_tests_file" + else + echo "_Unable to parse individual test names_" >> $GITHUB_STEP_SUMMARY + fi + + echo "" >> $GITHUB_STEP_SUMMARY + echo "❌ **Tests failed!**" >> $GITHUB_STEP_SUMMARY + rm -f "$failed_tests_file" + exit 1 + else + echo "✅ **All tests passed!**" >> $GITHUB_STEP_SUMMARY + fi +else + echo "⚠️ No test results found at: $XML_FILE" >> $GITHUB_STEP_SUMMARY + exit 1 +fi + diff --git a/.github/workflows/run-python-tests.yml b/.github/workflows/run-python-tests.yml new file mode 100644 index 0000000..26109ab --- /dev/null +++ b/.github/workflows/run-python-tests.yml @@ -0,0 +1,61 @@ +name: Run Python Tests + +on: + pull_request_target: + branches: + - development + push: + branches: + - development + +jobs: + test: + name: Run Python Tests + runs-on: ubuntu-latest + # Require manual approval for fork PRs + environment: testing + + defaults: + run: + working-directory: server/python + + steps: + - name: Checkout code + uses: actions/checkout@v5 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.13' + cache: 'pip' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Run tests + run: pytest --verbose --tb=short --junit-xml=test-results.xml || true + env: + MONGO_URI: ${{ secrets.MFLIX_URI }} + MONGO_DB: sample_mflix + + - name: Upload test results + uses: actions/upload-artifact@v4 + if: always() + with: + name: test-results + path: | + server/python/test-results.xml + server/python/htmlcov/ + retention-days: 30 + + - name: Generate Test Summary + if: always() + working-directory: . + run: | + chmod +x .github/scripts/generate-test-summary-pytest.sh + .github/scripts/generate-test-summary-pytest.sh server/python/test-results.xml + diff --git a/server/python/pytest.ini b/server/python/pytest.ini new file mode 100644 index 0000000..f17276f --- /dev/null +++ b/server/python/pytest.ini @@ -0,0 +1,33 @@ +[pytest] +# Pytest configuration for Python FastAPI backend tests + +# Test discovery patterns +python_files = test_*.py *_test.py +python_classes = Test* +python_functions = test_* + +# Test paths +testpaths = tests + +# Output options +addopts = + -v + --strict-markers + --tb=short + --asyncio-mode=auto + --color=yes + +# Markers for categorizing tests +markers = + unit: Unit tests with mocked dependencies + integration: Integration tests requiring database + slow: Tests that take longer to run + +# Async settings +asyncio_mode = auto +asyncio_default_fixture_loop_scope = function + +# Coverage settings (optional) +# Uncomment to enable coverage reporting +# addopts = --cov=src --cov-report=html --cov-report=term + diff --git a/server/python/tests/__init__.py b/server/python/tests/__init__.py new file mode 100644 index 0000000..e7991ee --- /dev/null +++ b/server/python/tests/__init__.py @@ -0,0 +1,2 @@ +# Tests package + diff --git a/server/python/tests/conftest.py b/server/python/tests/conftest.py new file mode 100644 index 0000000..440cedf --- /dev/null +++ b/server/python/tests/conftest.py @@ -0,0 +1,90 @@ +""" +Pytest configuration and fixtures for testing. + +This file contains shared fixtures and configuration for all tests. +""" + +import pytest +import sys +from pathlib import Path + +# Add the parent directory to the path so we can import from src +sys.path.insert(0, str(Path(__file__).parent.parent)) + +# Configure pytest-asyncio +pytest_plugins = ('pytest_asyncio',) + + +@pytest.fixture +def sample_movie(): + """Sample movie data for testing.""" + return { + "_id": "507f1f77bcf86cd799439011", + "title": "Test Movie", + "year": 2024, + "plot": "A test movie plot", + "genres": ["Action", "Drama"], + "directors": ["Test Director"], + "cast": ["Actor 1", "Actor 2"], + "runtime": 120, + "rated": "PG-13" + } + + +@pytest.fixture +def sample_movies(): + """Multiple sample movies for testing.""" + return [ + { + "_id": "507f1f77bcf86cd799439011", + "title": "Test Movie 1", + "year": 2024, + "plot": "First test movie", + "genres": ["Action"], + }, + { + "_id": "507f1f77bcf86cd799439012", + "title": "Test Movie 2", + "year": 2023, + "plot": "Second test movie", + "genres": ["Comedy"], + }, + { + "_id": "507f1f77bcf86cd799439013", + "title": "Test Movie 3", + "year": 2024, + "plot": "Third test movie", + "genres": ["Drama"], + } + ] + + +@pytest.fixture +def mock_success_response(): + """Mock success response structure.""" + def _create_response(data, message="Success"): + return { + "success": True, + "message": message, + "data": data, + "timestamp": "2024-01-01T00:00:00.000Z" + } + return _create_response + + +@pytest.fixture +def mock_error_response(): + """Mock error response structure.""" + def _create_response(message, code=None, details=None): + return { + "success": False, + "message": message, + "error": { + "message": message, + "code": code, + "details": details + }, + "timestamp": "2024-01-01T00:00:00.000Z" + } + return _create_response + diff --git a/server/python/tests/test_movie_routes.py b/server/python/tests/test_movie_routes.py new file mode 100644 index 0000000..a4735ca --- /dev/null +++ b/server/python/tests/test_movie_routes.py @@ -0,0 +1,160 @@ +""" +Unit Tests for Movie Routes + +These tests verify basic API validation and structure. +Following the pattern from PR #21 Express tests - simple validation tests. + +Note: These are basic validation tests only. Full integration tests +would require a test database or mocking strategy that handles +AsyncMongoClient event loop binding issues. +""" + +import pytest +from pydantic import ValidationError +from src.models.models import CreateMovieRequest, UpdateMovieRequest + + +# Test constants +TEST_MOVIE_ID = "507f1f77bcf86cd799439011" +INVALID_MOVIE_ID = "invalid-id" + + +@pytest.mark.unit +class TestMovieCreateValidation: + """Tests for CreateMovieRequest model validation.""" + + def test_create_movie_with_valid_data(self): + """Should accept valid movie data.""" + movie_data = { + "title": "Test Movie", + "year": 2024, + "plot": "A test movie plot", + "genres": ["Action", "Drama"], + "runtime": 120 + } + + movie = CreateMovieRequest(**movie_data) + assert movie.title == "Test Movie" + assert movie.year == 2024 + assert movie.plot == "A test movie plot" + + def test_create_movie_missing_required_field(self): + """Should raise ValidationError when title is missing.""" + movie_data = { + "year": 2024, + "plot": "A movie without title" + } + + with pytest.raises(ValidationError) as exc_info: + CreateMovieRequest(**movie_data) + + errors = exc_info.value.errors() + assert any(error["loc"] == ("title",) for error in errors) + + def test_create_movie_invalid_year_type(self): + """Should raise ValidationError when year is not an integer.""" + movie_data = { + "title": "Test Movie", + "year": "not-a-number" + } + + with pytest.raises(ValidationError) as exc_info: + CreateMovieRequest(**movie_data) + + errors = exc_info.value.errors() + assert any(error["loc"] == ("year",) for error in errors) + + def test_create_movie_with_optional_fields(self): + """Should accept movie with only required fields.""" + movie_data = { + "title": "Minimal Movie" + } + + movie = CreateMovieRequest(**movie_data) + assert movie.title == "Minimal Movie" + assert movie.year is None + assert movie.plot is None + + +@pytest.mark.unit +class TestMovieUpdateValidation: + """Tests for UpdateMovieRequest model validation.""" + + def test_update_movie_with_valid_data(self): + """Should accept valid update data.""" + update_data = { + "title": "Updated Title", + "year": 2025 + } + + movie_update = UpdateMovieRequest(**update_data) + assert movie_update.title == "Updated Title" + assert movie_update.year == 2025 + + def test_update_movie_with_partial_data(self): + """Should accept partial update data.""" + update_data = { + "title": "Only Title Updated" + } + + movie_update = UpdateMovieRequest(**update_data) + assert movie_update.title == "Only Title Updated" + assert movie_update.year is None + + def test_update_movie_empty_data(self): + """Should accept empty update (all fields optional).""" + update_data = {} + + movie_update = UpdateMovieRequest(**update_data) + assert movie_update.title is None + assert movie_update.year is None + + +@pytest.mark.unit +class TestMovieDataStructure: + """Tests for movie data structure and types.""" + + def test_movie_with_all_fields(self): + """Should handle movie with all possible fields.""" + movie_data = { + "title": "Complete Movie", + "year": 2024, + "plot": "Full plot", + "fullplot": "Extended plot description", + "genres": ["Action", "Drama", "Thriller"], + "runtime": 142, + "cast": ["Actor 1", "Actor 2", "Actor 3"], + "directors": ["Director 1"], + "writers": ["Writer 1", "Writer 2"], + "languages": ["English", "Spanish"], + "rated": "PG-13", + "countries": ["USA"] + } + + movie = CreateMovieRequest(**movie_data) + assert movie.title == "Complete Movie" + assert len(movie.genres) == 3 + assert len(movie.cast) == 3 + + def test_movie_genres_as_list(self): + """Should accept genres as a list.""" + movie_data = { + "title": "Genre Test", + "genres": ["Sci-Fi", "Adventure"] + } + + movie = CreateMovieRequest(**movie_data) + assert isinstance(movie.genres, list) + assert "Sci-Fi" in movie.genres + + def test_movie_with_numeric_fields(self): + """Should handle numeric fields correctly.""" + movie_data = { + "title": "Numeric Test", + "year": 2024, + "runtime": 120 + } + + movie = CreateMovieRequest(**movie_data) + assert isinstance(movie.year, int) + assert isinstance(movie.runtime, int)