diff --git a/.bumpversion.cfg b/.bumpversion.cfg new file mode 100644 index 0000000..33a4eba --- /dev/null +++ b/.bumpversion.cfg @@ -0,0 +1,10 @@ +[bumpversion] +current_version = 1.0.0 +commit = True +tag = True +tag_name = v{new_version} +message = [RELEASE] Bump version: {current_version} → {new_version} + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index aad77dd..618d144 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,10 +12,10 @@ env: jobs: test: name: Test Suite - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] + python-version: ['3.9'] steps: - name: Checkout code @@ -43,7 +43,7 @@ jobs: integration-test: name: Integration Tests - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest needs: test if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..376774b --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,265 @@ +name: Release + +on: + push: + branches: + - main + - develop + tags: + - 'v*.*.*' + workflow_dispatch: + inputs: + version_type: + description: 'Type of version bump' + required: true + default: 'patch' + type: choice + options: + - patch + - minor + - major + branch: + description: 'Branch to release from' + required: true + default: 'main' + type: choice + options: + - main + - develop + skip_tests: + description: 'Skip tests before release' + required: false + default: false + type: boolean + +env: + PYTHON_VERSION: '3.9' + +jobs: + # Test before releasing - reuse existing CI strategy + test: + name: Test Suite + runs-on: ubuntu-latest + if: ${{ !inputs.skip_tests }} + strategy: + matrix: + python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e ".[dev]" + + - name: Run linting + run: | + make lint + - name: Run formatting + run: | + make format + - name: Run tests + run: | + make test + + integration-test: + name: Integration Tests + runs-on: ubuntu-latest + needs: test + if: ${{ !inputs.skip_tests && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop') }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: '3.9' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e ".[dev]" + + - name: Run integration tests + env: + LABELLERR_API_KEY: ${{ secrets.LABELLERR_API_KEY }} + LABELLERR_API_SECRET: ${{ secrets.LABELLERR_API_SECRET }} + LABELLERR_CLIENT_ID: ${{ secrets.LABELLERR_CLIENT_ID }} + LABELLERR_TEST_EMAIL: ${{ secrets.LABELLERR_TEST_EMAIL }} + run: | + python -m pytest labellerr_use_case_tests.py -v + + release: + name: Create Release + runs-on: ubuntu-latest + needs: [test, integration-test] + if: always() && (needs.test.result == 'success' || needs.test.result == 'skipped') && (needs.integration-test.result == 'success' || needs.integration-test.result == 'skipped') + outputs: + version: ${{ steps.version.outputs.version }} + tag: ${{ steps.version.outputs.tag }} + ticket: ${{ steps.ticket.outputs.ticket }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install bump2version + run: pip install bump2version + + - name: Configure git + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + + - name: Extract ticket from commit or branch + id: ticket + run: | + # Extract LABIMP ticket from latest commit message or branch name + COMMIT_MSG=$(git log -1 --pretty=format:"%s") + TICKET=$(echo "$COMMIT_MSG" | grep -oE 'LABIMP-[0-9]+' | head -1 || echo "") + if [ -z "$TICKET" ]; then + TICKET=$(echo "${{ github.head_ref || github.ref_name }}" | grep -oE 'LABIMP-[0-9]+' | head -1 || echo "RELEASE") + fi + echo "ticket=$TICKET" >> $GITHUB_OUTPUT + echo "Found ticket: $TICKET" + + - name: Determine version bump type + id: bump_type + run: | + if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + echo "type=${{ inputs.version_type }}" >> $GITHUB_OUTPUT + elif [[ "${{ github.ref }}" == "refs/heads/main" ]]; then + echo "type=patch" >> $GITHUB_OUTPUT + elif [[ "${{ github.ref }}" == "refs/heads/develop" ]]; then + echo "type=minor" >> $GITHUB_OUTPUT + else + echo "type=patch" >> $GITHUB_OUTPUT + fi + + - name: Get current version + id: current_version + run: | + VERSION=$(grep '^version = ' pyproject.toml | cut -d'"' -f2) + echo "current=$VERSION" >> $GITHUB_OUTPUT + + - name: Bump version + id: version + run: | + BUMP_TYPE=${{ steps.bump_type.outputs.type }} + + # Create .bumpversion.cfg with LABIMP commit message format + cat > .bumpversion.cfg << EOF + [bumpversion] + current_version = ${{ steps.current_version.outputs.current }} + commit = True + tag = True + tag_name = v{new_version} + message = [${{ steps.ticket.outputs.ticket }}] Bump version: {current_version} → {new_version} + + [bumpversion:file:pyproject.toml] + search = version = "{current_version}" + replace = version = "{new_version}" + EOF + + # Bump the version + bump2version $BUMP_TYPE + + # Get the new version + NEW_VERSION=$(grep '^version = ' pyproject.toml | cut -d'"' -f2) + echo "version=$NEW_VERSION" >> $GITHUB_OUTPUT + echo "tag=v$NEW_VERSION" >> $GITHUB_OUTPUT + + - name: Push changes + run: | + git push origin ${{ github.ref_name }} + git push origin ${{ steps.version.outputs.tag }} + + - name: Create GitHub Release + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ steps.version.outputs.tag }} + name: '[${{ steps.ticket.outputs.ticket }}] Release ${{ steps.version.outputs.tag }}' + draft: false + prerelease: ${{ github.ref == 'refs/heads/develop' }} + generate_release_notes: true + body: | + ## [${{ steps.ticket.outputs.ticket }}] Changes in ${{ steps.version.outputs.tag }} + + ### What's Changed + - Automated release from `${{ github.ref_name }}` branch + - Version bumped from `${{ steps.current_version.outputs.current }}` to `${{ steps.version.outputs.version }}` + + ### Branch Strategy + - **Main branch** releases are production-ready patches + - **Develop branch** releases are pre-release minor versions for testing + + build: + name: Build Package + runs-on: ubuntu-latest + needs: [release] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ needs.release.outputs.tag }} + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install build dependencies + run: | + python -m pip install --upgrade pip + pip install build + + - name: Build package + run: | + python -m build + + - name: Upload build artifacts + uses: actions/upload-artifact@v4 + with: + name: dist-${{ needs.release.outputs.version }} + path: dist/* + retention-days: 90 + + notify: + name: Notify Release Completion + runs-on: ubuntu-latest + needs: [release, build] + if: always() + + steps: + - name: Release Success + if: needs.release.result == 'success' && needs.build.result == 'success' + run: | + echo "🎉 Release ${{ needs.release.outputs.version }} completed successfully!" + echo "📦 Package built and artifacts uploaded" + echo "🏷️ Git tag: ${{ needs.release.outputs.tag }}" + echo "📋 Release URL: https://github.com/${{ github.repository }}/releases/tag/${{ needs.release.outputs.tag }}" + + - name: Release Failed + if: needs.release.result == 'failure' || needs.build.result == 'failure' + run: | + echo "❌ Release failed!" + echo "Release job: ${{ needs.release.result }}" + echo "Build job: ${{ needs.build.result }}" + exit 1 \ No newline at end of file diff --git a/.gitignore b/.gitignore index 2b8217e..74b2fb6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,14 +1,71 @@ +# Python .venv __pycache__ +*.pyc +*/*.pyc +*.pyo +*.pyd +.Python build/ +develop-eggs/ dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ *.egg-info/ -*.pyc -*/*.pyc +.installed.cfg +*.egg + +# IDE .idea -.env +.vscode/ +*.swp +*.swo +*~ + +# OS .DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db + +# Environment +.env +.env.local +.env.development.local +.env.test.local +.env.production.local + +# Testing +.coverage +htmlcov/ +.pytest_cache/ +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.tox/ + +# Documentation +docs/_build/ +site/ + +# Release files +.bumpversion.cfg.bak +*.bak + +# Claude .claude + +# Test data tests/test_data - if file == '.DS_Store': - continue diff --git a/Makefile b/Makefile index 877d9fb..5053e2a 100644 --- a/Makefile +++ b/Makefile @@ -37,8 +37,21 @@ build: ## Build package version: @grep '^version = ' pyproject.toml | cut -d'"' -f2 | sed 's/^/Current version: /' || echo "Version not found" -info: +info: @echo "Python: $(shell $(PYTHON) --version)" @echo "Working directory: $(shell pwd)" @echo "Git branch: $(shell git branch --show-current 2>/dev/null || echo 'Not a git repository')" - @make version \ No newline at end of file + @make version + +check-release: ## Check if everything is ready for release + @echo "Checking release readiness..." + @git status --porcelain | grep -q . && echo "❌ Git working directory is not clean" || echo "✅ Git working directory is clean" + @git branch --show-current | grep -q "main\|develop" && echo "✅ On main or develop branch" || echo "⚠️ Not on main or develop branch" + @make version + @echo "✅ Release check complete" + @echo "" + @echo "To create a release:" + @echo "1. Create feature branch: git checkout -b feature/LABIMP-XXXX-release-vX.X.X" + @echo "2. Update version in pyproject.toml" + @echo "3. Commit: git commit -m '[LABIMP-XXXX] Prepare release vX.X.X'" + @echo "4. Push and create PR to main (patch) or develop (minor)" \ No newline at end of file diff --git a/labellerr/async_client.py b/labellerr/async_client.py index 9afb8d7..01526dc 100644 --- a/labellerr/async_client.py +++ b/labellerr/async_client.py @@ -79,7 +79,7 @@ def _build_headers( api_secret=self.api_secret, source="sdk-async", client_id=client_id, - extra_headers=extra_headers + extra_headers=extra_headers, ) async def _handle_response( diff --git a/labellerr/client.py b/labellerr/client.py index a9b4480..aaa7281 100644 --- a/labellerr/client.py +++ b/labellerr/client.py @@ -127,7 +127,7 @@ def _build_headers(self, client_id=None, extra_headers=None): api_secret=self.api_secret, source="sdk", client_id=client_id, - extra_headers=extra_headers + extra_headers=extra_headers, ) def _handle_response(self, response, request_id=None, success_codes=None): @@ -622,9 +622,11 @@ def _upload_preannotation_sync( "annotation_format": annotation_format, "annotation_file": annotation_file, } - client_utils.validate_required_params(required_params, list(required_params.keys())) + client_utils.validate_required_params( + required_params, list(required_params.keys()) + ) client_utils.validate_annotation_format(annotation_format, annotation_file) - + url = f"{self.base_url}/actions/upload_answers?project_id={project_id}&answer_format={annotation_format}&client_id={client_id}" file_name = client_utils.validate_file_exists(annotation_file) # get the direct upload url @@ -922,9 +924,7 @@ def create_local_export(self, project_id, client_id, export_config): logging.error(f"Failed to create local export: {str(e)}") raise LabellerrError(f"Failed to create local export: {str(e)}") - def fetch_download_url( - self, project_id, uuid, export_id, client_id - ): + def fetch_download_url(self, project_id, uuid, export_id, client_id): try: headers = self._build_headers( client_id=client_id, extra_headers={"Content-Type": "application/json"} @@ -954,9 +954,7 @@ def fetch_download_url( logging.error(f"Unexpected error in download_function: {str(e)}") raise LabellerrError(f"Unexpected error in download_function: {str(e)}") - def check_export_status( - self, project_id, report_ids, client_id - ): + def check_export_status(self, project_id, report_ids, client_id): request_uuid = client_utils.generate_request_id() try: if not project_id: diff --git a/labellerr/client_utils.py b/labellerr/client_utils.py index ae416a1..e786889 100644 --- a/labellerr/client_utils.py +++ b/labellerr/client_utils.py @@ -1,6 +1,7 @@ """ Shared utilities for both sync and async Labellerr clients. """ + import uuid from typing import Dict, Optional, Any from . import constants @@ -47,7 +48,7 @@ def validate_rotation_config(rotation_config: Dict[str, Any]) -> None: :raises LabellerrError: If the configuration is invalid. """ from .exceptions import LabellerrError - + annotation_rotation_count = rotation_config.get("annotation_rotation_count") review_rotation_count = rotation_config.get("review_rotation_count") client_review_rotation_count = rotation_config.get("client_review_rotation_count") @@ -74,13 +75,13 @@ def validate_rotation_config(rotation_config: Dict[str, Any]) -> None: def validate_required_params(params: Dict[str, Any], required_list: list) -> None: """ Validates that all required parameters are present. - + :param params: Dictionary of parameters to validate :param required_list: List of required parameter names :raises LabellerrError: If any required parameter is missing """ from .exceptions import LabellerrError - + for param in required_list: if param not in params: raise LabellerrError(f"Required parameter {param} is missing") @@ -89,14 +90,14 @@ def validate_required_params(params: Dict[str, Any], required_list: list) -> Non def validate_file_exists(file_path: str) -> str: """ Validates that a file exists and returns the basename. - + :param file_path: Path to the file :return: basename of the file :raises LabellerrError: If file doesn't exist """ import os from .exceptions import LabellerrError - + if os.path.exists(file_path): return os.path.basename(file_path) else: @@ -106,19 +107,19 @@ def validate_file_exists(file_path: str) -> str: def validate_annotation_format(annotation_format: str, annotation_file: str) -> None: """ Validates annotation format and file extension compatibility. - + :param annotation_format: Format of the annotation :param annotation_file: Path to the annotation file :raises LabellerrError: If format/extension mismatch """ import os from .exceptions import LabellerrError - + if annotation_format not in constants.ANNOTATION_FORMAT: raise LabellerrError( f"Invalid annotation_format. Must be one of {constants.ANNOTATION_FORMAT}" ) - + # Check if the file extension is .json when annotation_format is coco_json if annotation_format == "coco_json": file_extension = os.path.splitext(annotation_file)[1].lower() @@ -131,29 +132,29 @@ def validate_annotation_format(annotation_format: str, annotation_file: str) -> def validate_export_config(export_config: Dict[str, Any]) -> None: """ Validates export configuration parameters. - + :param export_config: Export configuration dictionary :raises LabellerrError: If configuration is invalid """ from .exceptions import LabellerrError - + required_params = [ "export_name", - "export_description", + "export_description", "export_format", "statuses", ] - + for param in required_params: if param not in export_config: raise LabellerrError(f"Required parameter {param} is missing") - + if param == "export_format": if export_config[param] not in constants.LOCAL_EXPORT_FORMAT: raise LabellerrError( f"Invalid export_format. Must be one of {constants.LOCAL_EXPORT_FORMAT}" ) - + if param == "statuses": if not isinstance(export_config[param], list): raise LabellerrError(f"Invalid statuses. Must be an array {param}") @@ -166,4 +167,4 @@ def validate_export_config(export_config: Dict[str, Any]) -> None: def generate_request_id() -> str: """Generate a unique request ID.""" - return str(uuid.uuid4()) \ No newline at end of file + return str(uuid.uuid4()) diff --git a/labellerr_use_case_tests.py b/labellerr_use_case_tests.py index 56c38f1..901fd4b 100644 --- a/labellerr_use_case_tests.py +++ b/labellerr_use_case_tests.py @@ -1,5 +1,3 @@ - - import os import sys import time @@ -10,54 +8,58 @@ from labellerr.client import LabellerrClient from labellerr.exceptions import LabellerrError import dotenv + dotenv.load_dotenv() + class LabelerUseCaseIntegrationTests(unittest.TestCase): def setUp(self): - self.api_key = os.getenv('API_KEY', 'test-api-key') - self.api_secret = os.getenv('API_SECRET', 'test-api-secret') - self.client_id = os.getenv('CLIENT_ID', 'test-client-id') - self.test_email = os.getenv('CLIENT_EMAIL', 'test@example.com') - - if (self.api_key == 'test-api-key' or - self.api_secret == 'test-api-secret' or - self.client_id == 'test-client-id' or - self.test_email == 'test@example.com'): - + self.api_key = os.getenv("API_KEY", "test-api-key") + self.api_secret = os.getenv("API_SECRET", "test-api-secret") + self.client_id = os.getenv("CLIENT_ID", "test-client-id") + self.test_email = os.getenv("CLIENT_EMAIL", "test@example.com") + + if ( + self.api_key == "test-api-key" + or self.api_secret == "test-api-secret" + or self.client_id == "test-client-id" + or self.test_email == "test@example.com" + ): + raise ValueError( "Real Labellerr credentials are required for integration testing. " "Please set environment variables: " "LABELLERR_API_KEY, LABELLERR_API_SECRET, LABELLERR_CLIENT_ID, LABELLERR_TEST_EMAIL" ) - + # Initialize the client self.client = LabellerrClient(self.api_key, self.api_secret) - + # Common test data self.test_project_name = f"SDK_Test_Project_{int(time.time())}" self.test_dataset_name = f"SDK_Test_Dataset_{int(time.time())}" - + # Sample annotation guide as per documentation requirements self.annotation_guide = [ { "question": "What objects do you see?", "option_type": "select", - "options": ["cat", "dog", "car", "person", "other"] + "options": ["cat", "dog", "car", "person", "other"], }, { "question": "Image quality rating", "option_type": "radio", - "options": ["excellent", "good", "fair", "poor"] - } + "options": ["excellent", "good", "fair", "poor"], + }, ] - + # Valid rotation configuration self.rotation_config = { - 'annotation_rotation_count': 1, - 'review_rotation_count': 1, - 'client_review_rotation_count': 1 + "annotation_rotation_count": 1, + "review_rotation_count": 1, + "client_review_rotation_count": 1, } def test_use_case_1_complete_project_creation_workflow(self): @@ -67,38 +69,41 @@ def test_use_case_1_complete_project_creation_workflow(self): try: # Create sample image files for testing for i in range(3): - temp_file = tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) - temp_file.write(b'fake_image_data_' + str(i).encode()) + temp_file = tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) + temp_file.write(b"fake_image_data_" + str(i).encode()) temp_file.close() test_files.append(temp_file.name) - + # Step 1: Prepare project payload with all required parameters project_payload = { - 'client_id': self.client_id, - 'dataset_name': self.test_dataset_name, - 'dataset_description': 'Test dataset for SDK integration testing', - 'data_type': 'image', - 'created_by': self.test_email, - 'project_name': self.test_project_name, - 'autolabel': False, - 'files_to_upload': test_files, - 'annotation_guide': self.annotation_guide, - 'rotation_config': self.rotation_config + "client_id": self.client_id, + "dataset_name": self.test_dataset_name, + "dataset_description": "Test dataset for SDK integration testing", + "data_type": "image", + "created_by": self.test_email, + "project_name": self.test_project_name, + "autolabel": False, + "files_to_upload": test_files, + "annotation_guide": self.annotation_guide, + "rotation_config": self.rotation_config, } - + # Step 2: Execute complete project creation workflow result = self.client.initiate_create_project(project_payload) - - # Step 3: Validate the workflow execution - self.assertIsInstance(result, dict, "Project creation should return a dictionary") - self.assertEqual(result.get('status'), 'success', "Project creation should be successful") - self.assertIn('message', result, "Result should contain a success message") - self.assertIn('project_id', result, "Result should contain project_id") - - - # Store project details for potential cleanup - self.created_project_id = result.get('project_id') + + # Step 3: Validate the workflow execution + self.assertIsInstance( + result, dict, "Project creation should return a dictionary" + ) + self.assertEqual( + result.get("status"), "success", "Project creation should be successful" + ) + self.assertIn("message", result, "Result should contain a success message") + self.assertIn("project_id", result, "Result should contain project_id") + + # Store project details for potential cleanup + self.created_project_id = result.get("project_id") self.created_dataset_name = self.test_dataset_name except LabellerrError as e: @@ -115,133 +120,140 @@ def test_use_case_1_complete_project_creation_workflow(self): def test_use_case_1_validation_requirements(self): """Table-driven test for project creation validation requirements""" - + validation_test_cases = [ { - 'test_name': 'Missing client_id', - 'payload_overrides': {'client_id': None}, - 'remove_keys': ['client_id'], - 'expected_error': 'Required parameter client_id is missing' + "test_name": "Missing client_id", + "payload_overrides": {"client_id": None}, + "remove_keys": ["client_id"], + "expected_error": "Required parameter client_id is missing", }, { - 'test_name': 'Invalid email format', - 'payload_overrides': {'created_by': 'invalid-email'}, - 'remove_keys': [], - 'expected_error': 'Please enter email id in created_by' + "test_name": "Invalid email format", + "payload_overrides": {"created_by": "invalid-email"}, + "remove_keys": [], + "expected_error": "Please enter email id in created_by", }, { - 'test_name': 'Invalid data type', - 'payload_overrides': {'data_type': 'invalid_type'}, - 'remove_keys': [], - 'expected_error': 'Invalid data_type' + "test_name": "Invalid data type", + "payload_overrides": {"data_type": "invalid_type"}, + "remove_keys": [], + "expected_error": "Invalid data_type", }, { - 'test_name': 'Missing dataset_name', - 'payload_overrides': {}, - 'remove_keys': ['dataset_name'], - 'expected_error': 'Required parameter dataset_name is missing' + "test_name": "Missing dataset_name", + "payload_overrides": {}, + "remove_keys": ["dataset_name"], + "expected_error": "Required parameter dataset_name is missing", }, { - 'test_name': 'Missing annotation guide and template ID', - 'payload_overrides': {}, - 'remove_keys': ['annotation_guide'], - 'expected_error': 'Please provide either annotation guide or annotation template id' - } + "test_name": "Missing annotation guide and template ID", + "payload_overrides": {}, + "remove_keys": ["annotation_guide"], + "expected_error": "Please provide either annotation guide or annotation template id", + }, ] - + # Base valid payload base_payload = { - 'client_id': self.client_id, - 'dataset_name': 'test_dataset', - 'dataset_description': 'test description', - 'data_type': 'image', - 'created_by': 'test@example.com', - 'project_name': 'test_project', - 'autolabel': False, - 'files_to_upload': [], - 'annotation_guide': self.annotation_guide + "client_id": self.client_id, + "dataset_name": "test_dataset", + "dataset_description": "test description", + "data_type": "image", + "created_by": "test@example.com", + "project_name": "test_project", + "autolabel": False, + "files_to_upload": [], + "annotation_guide": self.annotation_guide, } - + for i, test_case in enumerate(validation_test_cases, 1): - with self.subTest(test_name=test_case['test_name']): - + with self.subTest(test_name=test_case["test_name"]): + # Create test payload by modifying base payload test_payload = base_payload.copy() - test_payload.update(test_case['payload_overrides']) - + test_payload.update(test_case["payload_overrides"]) + # Remove keys if specified - for key in test_case['remove_keys']: + for key in test_case["remove_keys"]: test_payload.pop(key, None) - + # Execute test and verify expected error with self.assertRaises(LabellerrError) as context: self.client.initiate_create_project(test_payload) - + # Verify error message contains expected substring error_message = str(context.exception) - self.assertIn(test_case['expected_error'], error_message, - f"Expected error '{test_case['expected_error']}' not found in '{error_message}'") + self.assertIn( + test_case["expected_error"], + error_message, + f"Expected error '{test_case['expected_error']}' not found in '{error_message}'", + ) def test_use_case_1_multiple_data_types_table_driven(self): project_test_scenarios = [ { - 'scenario_name': 'Image Classification Project', - 'data_type': 'image', - 'file_extensions': ['.jpg', '.png'], - 'annotation_types': ['select', 'radio'], - 'expected_success': True + "scenario_name": "Image Classification Project", + "data_type": "image", + "file_extensions": [".jpg", ".png"], + "annotation_types": ["select", "radio"], + "expected_success": True, }, { - 'scenario_name': 'Document Processing Project', - 'data_type': 'document', - 'file_extensions': ['.pdf'], - 'annotation_types': ['input', 'boolean'], - 'expected_success': True - } + "scenario_name": "Document Processing Project", + "data_type": "document", + "file_extensions": [".pdf"], + "annotation_types": ["input", "boolean"], + "expected_success": True, + }, ] - + test_scenario = project_test_scenarios[0] # Image classification - - + test_files = [] try: - for ext in test_scenario['file_extensions'][:2]: # Limit to 2 files + for ext in test_scenario["file_extensions"][:2]: # Limit to 2 files temp_file = tempfile.NamedTemporaryFile(suffix=ext, delete=False) temp_file.write(f'fake_{test_scenario["data_type"]}_data'.encode()) temp_file.close() test_files.append(temp_file.name) - + annotation_guide = [] - for i, annotation_type in enumerate(test_scenario['annotation_types']): - annotation_guide.append({ - "question": f"Test question {i+1}", - "option_type": annotation_type, - "options": ["option1", "option2", "option3"] if annotation_type in ['select', 'radio'] else [] - }) - + for i, annotation_type in enumerate(test_scenario["annotation_types"]): + annotation_guide.append( + { + "question": f"Test question {i+1}", + "option_type": annotation_type, + "options": ( + ["option1", "option2", "option3"] + if annotation_type in ["select", "radio"] + else [] + ), + } + ) + # Build project payload project_payload = { - 'client_id': self.client_id, - 'dataset_name': f"SDK_Test_{test_scenario['data_type']}_{int(time.time())}", - 'dataset_description': f"Test dataset for {test_scenario['scenario_name']}", - 'data_type': test_scenario['data_type'], - 'created_by': self.test_email, - 'project_name': f"SDK_Test_Project_{test_scenario['data_type']}_{int(time.time())}", - 'autolabel': False, - 'files_to_upload': test_files, - 'annotation_guide': annotation_guide, - 'rotation_config': self.rotation_config + "client_id": self.client_id, + "dataset_name": f"SDK_Test_{test_scenario['data_type']}_{int(time.time())}", + "dataset_description": f"Test dataset for {test_scenario['scenario_name']}", + "data_type": test_scenario["data_type"], + "created_by": self.test_email, + "project_name": f"SDK_Test_Project_{test_scenario['data_type']}_{int(time.time())}", + "autolabel": False, + "files_to_upload": test_files, + "annotation_guide": annotation_guide, + "rotation_config": self.rotation_config, } - + # Execute test based on credentials result = self.client.initiate_create_project(project_payload) - + self.assertIsInstance(result, dict) - self.assertEqual(result.get('status'), 'success') + self.assertEqual(result.get("status"), "success") print(f"✓ {test_scenario['scenario_name']} project created successfully") - finally: # Clean up test files for file_path in test_files: @@ -259,70 +271,60 @@ def test_use_case_2_preannotation_upload_workflow(self): "category_id": 1, "bbox": [100, 100, 200, 200], "area": 40000, - "iscrowd": 0 + "iscrowd": 0, } ], "images": [ - { - "id": 1, - "width": 640, - "height": 480, - "file_name": "test_image.jpg" - } + {"id": 1, "width": 640, "height": 480, "file_name": "test_image.jpg"} ], - "categories": [ - { - "id": 1, - "name": "person", - "supercategory": "human" - } - ] + "categories": [{"id": 1, "name": "person", "supercategory": "human"}], } - + temp_annotation_file = None try: - temp_annotation_file = tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) + temp_annotation_file = tempfile.NamedTemporaryFile( + mode="w", suffix=".json", delete=False + ) json.dump(annotation_data, temp_annotation_file) temp_annotation_file.close() - - - test_project_id = 'test-project-id' - annotation_format = 'coco_json' - - - if hasattr(self, 'created_project_id') and self.created_project_id: + + test_project_id = "test-project-id" + annotation_format = "coco_json" + + if hasattr(self, "created_project_id") and self.created_project_id: actual_project_id = self.created_project_id else: actual_project_id = test_project_id - - print("Calling actual Labellerr pre-annotation API with real credentials...") - + print( + "Calling actual Labellerr pre-annotation API with real credentials..." + ) + try: - with patch.object(self.client, 'preannotation_job_status', create=True) as mock_status: + with patch.object( + self.client, "preannotation_job_status", create=True + ) as mock_status: mock_status.return_value = { - 'response': { - 'status': 'completed', - 'job_id': 'real-job-id' - } + "response": {"status": "completed", "job_id": "real-job-id"} } - + result = self.client._upload_preannotation_sync( project_id=actual_project_id, client_id=self.client_id, annotation_format=annotation_format, - annotation_file=temp_annotation_file.name + annotation_file=temp_annotation_file.name, + ) + + self.assertIsInstance( + result, dict, "Upload should return a dictionary" + ) + self.assertIn( + "response", result, "Result should contain response" ) - - self.assertIsInstance(result, dict, "Upload should return a dictionary") - self.assertIn('response', result, "Result should contain response") - - + except Exception as api_error: raise api_error - - except LabellerrError as e: self.fail(f"Pre-annotation upload failed with LabellerrError: {e}") except Exception as e: @@ -338,65 +340,67 @@ def test_use_case_2_format_validation(self): format_test_cases = [ { - 'test_name': 'Invalid annotation format', - 'project_id': 'test-project', - 'annotation_format': 'invalid_format', - 'annotation_file': 'test.json', - 'expected_error': 'Invalid annotation_format', - 'create_temp_file': False, - 'temp_suffix': None + "test_name": "Invalid annotation format", + "project_id": "test-project", + "annotation_format": "invalid_format", + "annotation_file": "test.json", + "expected_error": "Invalid annotation_format", + "create_temp_file": False, + "temp_suffix": None, }, { - 'test_name': 'File not found', - 'project_id': 'test-project', - 'annotation_format': 'json', - 'annotation_file': 'non_existent_file.json', - 'expected_error': 'File not found', - 'create_temp_file': False, - 'temp_suffix': None + "test_name": "File not found", + "project_id": "test-project", + "annotation_format": "json", + "annotation_file": "non_existent_file.json", + "expected_error": "File not found", + "create_temp_file": False, + "temp_suffix": None, }, { - 'test_name': 'Wrong file extension for COCO format', - 'project_id': 'test-project', - 'annotation_format': 'coco_json', - 'annotation_file': None, # Will be set to temp file - 'expected_error': 'For coco_json annotation format, the file must have a .json extension', - 'create_temp_file': True, - 'temp_suffix': '.txt' - } + "test_name": "Wrong file extension for COCO format", + "project_id": "test-project", + "annotation_format": "coco_json", + "annotation_file": None, # Will be set to temp file + "expected_error": "For coco_json annotation format, the file must have a .json extension", + "create_temp_file": True, + "temp_suffix": ".txt", + }, ] - + for i, test_case in enumerate(format_test_cases, 1): - with self.subTest(test_name=test_case['test_name']): - + with self.subTest(test_name=test_case["test_name"]): + temp_file = None try: # Create temporary file if needed - if test_case['create_temp_file']: + if test_case["create_temp_file"]: temp_file = tempfile.NamedTemporaryFile( - suffix=test_case['temp_suffix'], - delete=False + suffix=test_case["temp_suffix"], delete=False ) - temp_file.write(b'test content') + temp_file.write(b"test content") temp_file.close() annotation_file = temp_file.name else: - annotation_file = test_case['annotation_file'] - + annotation_file = test_case["annotation_file"] + # Execute test and verify expected error with self.assertRaises(LabellerrError) as context: self.client._upload_preannotation_sync( - project_id=test_case['project_id'], + project_id=test_case["project_id"], client_id=self.client_id, - annotation_format=test_case['annotation_format'], - annotation_file=annotation_file + annotation_format=test_case["annotation_format"], + annotation_file=annotation_file, ) - + # Verify error message contains expected substring error_message = str(context.exception) - self.assertIn(test_case['expected_error'], error_message, - f"Expected error '{test_case['expected_error']}' not found in '{error_message}'") - + self.assertIn( + test_case["expected_error"], + error_message, + f"Expected error '{test_case['expected_error']}' not found in '{error_message}'", + ) + finally: # Clean up temporary file if temp_file: @@ -409,68 +413,82 @@ def test_use_case_2_multiple_formats_table_driven(self): preannotation_scenarios = [ { - 'scenario_name': 'COCO JSON Upload', - 'annotation_format': 'coco_json', - 'file_extension': '.json', - 'sample_data': { - "annotations": [{"id": 1, "image_id": 1, "category_id": 1, "bbox": [0, 0, 100, 100]}], - "images": [{"id": 1, "file_name": "test.jpg", "width": 640, "height": 480}], - "categories": [{"id": 1, "name": "test", "supercategory": "object"}] + "scenario_name": "COCO JSON Upload", + "annotation_format": "coco_json", + "file_extension": ".json", + "sample_data": { + "annotations": [ + { + "id": 1, + "image_id": 1, + "category_id": 1, + "bbox": [0, 0, 100, 100], + } + ], + "images": [ + {"id": 1, "file_name": "test.jpg", "width": 640, "height": 480} + ], + "categories": [ + {"id": 1, "name": "test", "supercategory": "object"} + ], }, - 'expected_success': True + "expected_success": True, }, { - 'scenario_name': 'JSON Annotations Upload', - 'annotation_format': 'json', - 'file_extension': '.json', - 'sample_data': { - "labels": [{"image": "test.jpg", "annotations": [{"label": "cat", "confidence": 0.95}]}] + "scenario_name": "JSON Annotations Upload", + "annotation_format": "json", + "file_extension": ".json", + "sample_data": { + "labels": [ + { + "image": "test.jpg", + "annotations": [{"label": "cat", "confidence": 0.95}], + } + ] }, - 'expected_success': True - } + "expected_success": True, + }, ] - + test_scenario = preannotation_scenarios[0] # COCO JSON - - + temp_annotation_file = None try: temp_annotation_file = tempfile.NamedTemporaryFile( - mode='w', - suffix=test_scenario['file_extension'], - delete=False + mode="w", suffix=test_scenario["file_extension"], delete=False ) - json.dump(test_scenario['sample_data'], temp_annotation_file) + json.dump(test_scenario["sample_data"], temp_annotation_file) temp_annotation_file.close() - - + # Use project ID from previous tests if available - test_project_id = getattr(self, 'created_project_id', 'test-project-id-table-driven') - + test_project_id = getattr( + self, "created_project_id", "test-project-id-table-driven" + ) try: - # Only patch the missing method, let everything else be real - with patch.object(self.client, 'preannotation_job_status', create=True) as mock_status: - mock_status.return_value = { - 'response': { - 'status': 'completed', - 'job_id': f'job-{test_scenario["annotation_format"]}-{int(time.time())}' - } + # Only patch the missing method, let everything else be real + with patch.object( + self.client, "preannotation_job_status", create=True + ) as mock_status: + mock_status.return_value = { + "response": { + "status": "completed", + "job_id": f'job-{test_scenario["annotation_format"]}-{int(time.time())}', } - - result = self.client._upload_preannotation_sync( - project_id=test_project_id, - client_id=self.client_id, - annotation_format=test_scenario['annotation_format'], - annotation_file=temp_annotation_file.name - ) - - self.assertIsInstance(result, dict) - + } + + result = self.client._upload_preannotation_sync( + project_id=test_project_id, + client_id=self.client_id, + annotation_format=test_scenario["annotation_format"], + annotation_file=temp_annotation_file.name, + ) + + self.assertIsInstance(result, dict) + except Exception as api_error: - raise api_error + raise api_error - finally: # Clean up annotation file if temp_annotation_file: @@ -485,7 +503,7 @@ def tearDown(self): @classmethod def setUpClass(cls): """Set up test suite.""" - + @classmethod def tearDownClass(cls): """Tear down test suite.""" @@ -495,41 +513,35 @@ def run_use_case_tests(): # Create test suite suite = unittest.TestLoader().loadTestsFromTestCase(LabelerUseCaseIntegrationTests) - + # Run tests with verbose output runner = unittest.TextTestRunner(verbosity=2, stream=sys.stdout) result = runner.run(suite) - + # Return success status return result.wasSuccessful() -if __name__ == '__main__': +if __name__ == "__main__": """ Main execution block for running use case integration tests. - + Environment Variables Required: - API_KEY: Your Labellerr API key - API_SECRET: Your Labellerr API secret - CLIENT_ID: Your Labellerr client ID - TEST_EMAIL: Valid email address for testing - + Run with: python use_case_tests.py """ # Check for required environment variables - required_env_vars = [ - 'API_KEY', - 'API_SECRET', - 'CLIENT_ID', - 'TEST_EMAIL' - ] - + required_env_vars = ["API_KEY", "API_SECRET", "CLIENT_ID", "TEST_EMAIL"] + missing_vars = [var for var in required_env_vars if not os.getenv(var)] - # Run the tests success = run_use_case_tests() - + # Exit with appropriate code - sys.exit(0 if success else 1) \ No newline at end of file + sys.exit(0 if success else 1)