Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
131 changes: 85 additions & 46 deletions .github/scripts/generate-test-summary-pytest.sh
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,70 +1,109 @@
#!/bin/bash
set -e

# Generate Test Summary from Pytest JUnit XML Output
# Usage: ./generate-test-summary-pytest.sh <path-to-junit-xml>
# Generate Detailed Test Summary from Multiple Pytest JUnit XML Output Files
# Shows breakdown by test type (unit vs integration)
# Usage: ./generate-test-summary-pytest-detailed.sh <unit-xml> <integration-xml>

XML_FILE="${1:-test-results.xml}"
UNIT_XML="${1:-}"
INTEGRATION_XML="${2:-}"

echo "## Test Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY

# Parse test results from JUnit XML
if [ -f "$XML_FILE" ]; then
# Extract test counts from XML
# JUnit XML structure: <testsuite tests="N" failures="N" errors="N" skipped="N">
# Function to parse XML file
parse_xml() {
local xml_file="$1"
local test_type="$2"

tests=$(grep -oP 'tests="\K[0-9]+' "$XML_FILE" | head -1)
failures=$(grep -oP 'failures="\K[0-9]+' "$XML_FILE" | head -1)
errors=$(grep -oP 'errors="\K[0-9]+' "$XML_FILE" | head -1)
skipped=$(grep -oP 'skipped="\K[0-9]+' "$XML_FILE" | head -1)
if [ ! -f "$xml_file" ]; then
echo "0 0 0 0 0"
return
fi

tests=$(grep -oP 'tests="\K[0-9]+' "$xml_file" | head -1)
failures=$(grep -oP 'failures="\K[0-9]+' "$xml_file" | head -1)
errors=$(grep -oP 'errors="\K[0-9]+' "$xml_file" | head -1)
skipped=$(grep -oP 'skipped="\K[0-9]+' "$xml_file" | head -1)

# Default to 0 if values are empty
tests=${tests:-0}
failures=${failures:-0}
errors=${errors:-0}
skipped=${skipped:-0}

passed=$((tests - failures - errors - skipped))

echo "| Status | Count |" >> $GITHUB_STEP_SUMMARY
echo "|--------|-------|" >> $GITHUB_STEP_SUMMARY
echo "| ✅ Passed | $passed |" >> $GITHUB_STEP_SUMMARY
echo "| ❌ Failed | $((failures + errors)) |" >> $GITHUB_STEP_SUMMARY
echo "| ⏭️ Skipped | $skipped |" >> $GITHUB_STEP_SUMMARY
echo "| **Total** | **$tests** |" >> $GITHUB_STEP_SUMMARY
echo "$tests $failures $errors $skipped $passed"
}

# Parse both files
read -r unit_tests unit_failures unit_errors unit_skipped unit_passed <<< "$(parse_xml "$UNIT_XML" "Unit")"
read -r int_tests int_failures int_errors int_skipped int_passed <<< "$(parse_xml "$INTEGRATION_XML" "Integration")"

# Calculate totals
total_tests=$((unit_tests + int_tests))
total_failures=$((unit_failures + int_failures))
total_errors=$((unit_errors + int_errors))
total_skipped=$((unit_skipped + int_skipped))
total_passed=$((unit_passed + int_passed))
total_failed=$((total_failures + total_errors))

# Display detailed breakdown
echo "### Summary by Test Type" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "| Test Type | Passed | Failed | Skipped | Total |" >> $GITHUB_STEP_SUMMARY
echo "|-----------|--------|--------|---------|-------|" >> $GITHUB_STEP_SUMMARY

if [ -f "$UNIT_XML" ]; then
echo "| 🔧 Unit Tests | $unit_passed | $((unit_failures + unit_errors)) | $unit_skipped | $unit_tests |" >> $GITHUB_STEP_SUMMARY
fi

if [ -f "$INTEGRATION_XML" ]; then
echo "| 🔗 Integration Tests | $int_passed | $((int_failures + int_errors)) | $int_skipped | $int_tests |" >> $GITHUB_STEP_SUMMARY
fi

echo "| **Total** | **$total_passed** | **$total_failed** | **$total_skipped** | **$total_tests** |" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY

# Overall status
echo "### Overall Status" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "| Status | Count |" >> $GITHUB_STEP_SUMMARY
echo "|--------|-------|" >> $GITHUB_STEP_SUMMARY
echo "| ✅ Passed | $total_passed |" >> $GITHUB_STEP_SUMMARY
echo "| ❌ Failed | $total_failed |" >> $GITHUB_STEP_SUMMARY
echo "| ⏭️ Skipped | $total_skipped |" >> $GITHUB_STEP_SUMMARY
echo "| **Total** | **$total_tests** |" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY

# List failed tests if any
if [ $total_failed -gt 0 ]; then
echo "### ❌ Failed Tests" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY

# List failed tests if any
if [ $((failures + errors)) -gt 0 ]; then
echo "### ❌ Failed Tests" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY

# Extract failed test names from XML
failed_tests_file=$(mktemp)

# Find testcase elements with failure or error children
grep -oP '<testcase[^>]*classname="[^"]*"[^>]*name="[^"]*"[^>]*>.*?<(failure|error)' "$XML_FILE" | \
grep -oP 'classname="\K[^"]*|name="\K[^"]*' | \
paste -d '.' - - >> "$failed_tests_file" 2>/dev/null || true

if [ -s "$failed_tests_file" ]; then
while IFS= read -r test; do
echo "- \`$test\`" >> $GITHUB_STEP_SUMMARY
done < "$failed_tests_file"
else
echo "_Unable to parse individual test names_" >> $GITHUB_STEP_SUMMARY
failed_tests_file=$(mktemp)

# Extract failed tests from both files
for xml_file in "$UNIT_XML" "$INTEGRATION_XML"; do
if [ -f "$xml_file" ]; then
grep -oP '<testcase[^>]*classname="[^"]*"[^>]*name="[^"]*"[^>]*>.*?<(failure|error)' "$xml_file" | \
grep -oP 'classname="\K[^"]*|name="\K[^"]*' | \
paste -d '.' - - >> "$failed_tests_file" 2>/dev/null || true
fi

echo "" >> $GITHUB_STEP_SUMMARY
echo "❌ **Tests failed!**" >> $GITHUB_STEP_SUMMARY
rm -f "$failed_tests_file"
exit 1
done

if [ -s "$failed_tests_file" ]; then
while IFS= read -r test; do
echo "- \`$test\`" >> $GITHUB_STEP_SUMMARY
done < "$failed_tests_file"
else
echo "✅ **All tests passed!**" >> $GITHUB_STEP_SUMMARY
echo "_Unable to parse individual test names_" >> $GITHUB_STEP_SUMMARY
fi
else
echo "⚠️ No test results found at: $XML_FILE" >> $GITHUB_STEP_SUMMARY

echo "" >> $GITHUB_STEP_SUMMARY
echo "❌ **Tests failed!**" >> $GITHUB_STEP_SUMMARY
rm -f "$failed_tests_file"
exit 1
else
echo "✅ **All tests passed!**" >> $GITHUB_STEP_SUMMARY
fi

18 changes: 13 additions & 5 deletions .github/workflows/run-python-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,14 @@ jobs:
python -m pip install --upgrade pip
pip install -r requirements.txt

- name: Run tests
run: pytest --verbose --tb=short --junit-xml=test-results.xml || true
- name: Run unit tests
run: pytest -m unit --verbose --tb=short --junit-xml=test-results-unit.xml
env:
MONGO_URI: ${{ secrets.MFLIX_URI }}
MONGO_DB: sample_mflix

- name: Run integration tests
run: pytest -m integration --verbose --tb=short --junit-xml=test-results-integration.xml || true
env:
MONGO_URI: ${{ secrets.MFLIX_URI }}
MONGO_DB: sample_mflix
Expand All @@ -48,7 +54,8 @@ jobs:
with:
name: test-results
path: |
server/python/test-results.xml
server/python/test-results-unit.xml
server/python/test-results-integration.xml
server/python/htmlcov/
retention-days: 30

Expand All @@ -57,5 +64,6 @@ jobs:
working-directory: .
run: |
chmod +x .github/scripts/generate-test-summary-pytest.sh
.github/scripts/generate-test-summary-pytest.sh server/python/test-results.xml

.github/scripts/generate-test-summary-pytest.sh \
server/python/test-results-unit.xml \
server/python/test-results-integration.xml
192 changes: 192 additions & 0 deletions server/python/tests/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,192 @@
# Testing Guide for FastAPI MongoDB Sample Application

This document describes the testing strategy and how to run tests for the FastAPI MongoDB MFlix sample application.

## Test Structure

The test suite is organized into three categories:

### 1. **Schema Tests** (`test_movie_schemas.py`)
- Tests Pydantic model validation
- Validates request/response data structures
- No database or external dependencies required
- **10 tests** covering `CreateMovieRequest`, `UpdateMovieRequest`, and `Movie` models

### 2. **Unit Tests** (`test_movie_routes.py`)
- Tests route handler functions in isolation
- Uses `unittest.mock.AsyncMock` to mock MongoDB operations
- No database connection required
- Fast execution (< 2 seconds)
- **51 tests** covering:
- CRUD operations (create, read, update, delete)
- Batch operations
- Search functionality
- Vector search
- Aggregation pipelines

### 3. **Integration Tests** (`tests/integration/test_movie_routes_integration.py`)
- Tests the full HTTP request/response cycle
- Requires a running MongoDB instance with MFlix dataset
- Uses a real server running in a subprocess
- Tests are idempotent (clean up after themselves)
- **10 tests** covering:
- CRUD operations
- Batch operations
- Search functionality
- Aggregation pipelines

## Running Tests

### Prerequisites

1. **For all tests:**
```bash
cd server/python
source .venv/bin/activate # or `.venv\Scripts\activate` on Windows
```

2. **For integration tests only:**
- MongoDB instance running with MFlix dataset loaded
- Connection string configured in `.env` file
- Port 8001 available (used for test server)

### Run All Tests

```bash
pytest tests/ -v
```

**Expected output:** 71 passed in ~6 seconds

### Run Only Unit Tests (Fast, No Database Required)

```bash
pytest -m unit -v
```

**Expected output:** 61 passed, 10 deselected in ~1.5 seconds

### Run Only Integration Tests (Requires Database)

```bash
pytest -m integration -v
```

**Expected output:** 10 passed, 61 deselected in ~5 seconds

### Run Specific Test File

```bash
# Schema tests
pytest tests/test_movie_schemas.py -v

# Unit tests
pytest tests/test_movie_routes.py -v

# Integration tests
pytest tests/integration/test_movie_routes_integration.py -v
```

### Run Specific Test Class or Method

```bash
# Run a specific test class
pytest tests/test_movie_routes.py::TestCreateMovie -v

# Run a specific test method
pytest tests/test_movie_routes.py::TestCreateMovie::test_create_movie_success -v
```

## Test Markers

Tests are marked with pytest markers for selective execution:

- `@pytest.mark.unit` - Unit tests with mocked dependencies
- `@pytest.mark.integration` - Integration tests requiring database

## Integration Test Strategy

### Why Use a Running Server?

The integration tests start a real FastAPI server in a subprocess because:

1. **Event Loop Isolation**: AsyncMongoClient binds to the event loop it was created in. Using a real server avoids event loop conflicts.
2. **Real-World Testing**: Tests the actual deployment configuration, including middleware, CORS, and startup events.
3. **Educational Value**: Demonstrates a practical integration testing pattern for async Python applications.

### Idempotent Tests

All integration tests are designed to be idempotent:

- **Create operations**: Tests create new documents with unique identifiers
- **Cleanup**: Fixtures automatically delete created documents after tests
- **Read-only tests**: Tests against existing MFlix data don't modify anything
- **Batch operations**: Create and delete multiple documents with proper cleanup

### Fixtures

Integration tests use pytest fixtures for test data lifecycle management:

- `client`: AsyncClient connected to the test server
- `test_movie_data`: Sample movie data for creating test documents
- `created_movie`: Creates a movie and cleans it up automatically
- `multiple_test_movies`: Creates 3 movies for batch operation testing

## Known Issues

### Batch Create Bug (Skipped Test)

The `test_batch_create_movies` test is currently skipped due to a known bug in the API:

- **Issue**: `create_movies_batch` function calls `insert_many` twice (lines 1006 and 1015 in `movies.py`)
- **Impact**: Causes 500 error on batch create operations
- **Status**: To be fixed in a separate PR
- **Test behavior**: Test detects the error and skips gracefully

## Troubleshooting

### Integration Tests Fail to Start Server

**Error**: `Port 8001 is already in use`

**Solution**:
- Kill any process using port 8001: `lsof -ti:8001 | xargs kill -9`
- Or change the port in `tests/integration/conftest.py`

### Integration Tests Can't Connect to MongoDB

**Error**: Connection timeout or authentication error

**Solution**:
- Verify MongoDB is running
- Check `.env` file has correct `MONGODB_URI`
- Ensure MFlix dataset is loaded
- Test connection: `mongosh <your-connection-string>`

### Unit Tests Fail with Import Errors

**Error**: `ModuleNotFoundError`

**Solution**:
- Ensure virtual environment is activated
- Install dependencies: `pip install -r requirements.txt`
- Run from `server/python` directory

## Contributing

When adding new routes or functionality:

1. **Add unit tests** in `test_movie_routes.py` with mocked dependencies
2. **Add integration tests** in `tests/integration/test_movie_routes_integration.py` for end-to-end validation
3. **Use appropriate markers** (`@pytest.mark.unit` or `@pytest.mark.integration`)
4. **Follow fixture patterns** for test data lifecycle management
5. **Ensure idempotency** - tests should clean up after themselves
6. **Document test purpose** with clear docstrings

## Additional Resources

- [pytest documentation](https://docs.pytest.org/)
- [pytest-asyncio documentation](https://pytest-asyncio.readthedocs.io/)
- [FastAPI testing guide](https://fastapi.tiangolo.com/tutorial/testing/)
- [MongoDB Motor documentation](https://motor.readthedocs.io/)

2 changes: 2 additions & 0 deletions server/python/tests/integration/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
"""Integration tests for the FastAPI MongoDB sample application."""

Loading