diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..b48690fb --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,148 @@ +name: CI + +on: + workflow_dispatch: + push: + branches: + - main + pull_request: + branches: + - main + +concurrency: + group: pr-${{ github.head_ref || github.sha }} + cancel-in-progress: true + +jobs: + lint: + name: Lint (ruff) + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install Poetry + run: pipx install poetry + + - name: Configure Poetry + run: poetry config virtualenvs.in-project true + + - name: Cache virtualenv + uses: actions/cache@v4 + with: + path: .venv + key: venv-lint-${{ runner.os }}-3.10-${{ hashFiles('poetry.lock') }} + + - name: Install dependencies + run: poetry install --with dev + + - name: Run ruff check + run: poetry run ruff check . + + - name: Run ruff format check + run: poetry run ruff format --check . + + typecheck: + name: Type check (pyright) + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install Poetry + run: pipx install poetry + + - name: Configure Poetry + run: poetry config virtualenvs.in-project true + + - name: Cache virtualenv + uses: actions/cache@v4 + with: + path: .venv + key: venv-typecheck-${{ runner.os }}-3.10-${{ hashFiles('poetry.lock') }} + + - name: Install dependencies + run: poetry install --with dev + + - name: Run pyright + run: poetry run pyright + + build: + name: Build package + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install Poetry + run: pipx install poetry + + - name: Configure Poetry + run: poetry config virtualenvs.in-project true + + - name: Cache virtualenv + uses: actions/cache@v4 + with: + path: .venv + key: venv-build-${{ runner.os }}-3.10-${{ hashFiles('poetry.lock') }} + + - name: Install dependencies + run: poetry install --only main + + - name: Build package + run: poetry build + + - name: Check package + run: | + python -m pip install --upgrade twine + twine check dist/* + + tests: + name: Tests (Python ${{ matrix.python-version }}, ${{ matrix.os }}) + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ["3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Poetry + run: pipx install poetry + + - name: Configure Poetry + run: poetry config virtualenvs.in-project true + + - name: Cache virtualenv + uses: actions/cache@v4 + with: + path: .venv + key: venv-tests-${{ runner.os }}-${{ matrix.python-version }}-${{ hashFiles('poetry.lock') }} + + - name: Install dependencies + run: poetry install + + - name: Run unit tests + run: poetry run pytest tests/ -v diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 00000000..e6fdb37e --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,74 @@ +name: Docs + +on: + workflow_dispatch: + pull_request: + # Only run on PRs that touch docs-related files to keep checks fast. + paths: + - "docs/**" + - "README.md" + - "pyproject.toml" + - "deeptab/**" + push: + # No paths filter here: tag pushes must always build docs regardless of + # which files changed in the tagged commit. Paths filters in GitHub Actions + # apply to both branches and tags under the same push: block, so a tag + # like v1.7.0 would be silently skipped if docs files weren't in that commit. + branches: + - main + tags: + - "v*" + +concurrency: + group: docs-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + build-docs: + name: Build docs (Sphinx) + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Install system dependencies + run: sudo apt-get update && sudo apt-get install -y pandoc + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install Poetry + run: pipx install poetry + + - name: Configure Poetry + run: poetry config virtualenvs.in-project true + + - name: Cache virtualenv + uses: actions/cache@v4 + with: + path: .venv + key: venv-docs-${{ runner.os }}-3.10-${{ hashFiles('poetry.lock') }} + + - name: Install package and docs dependencies + run: poetry install --with docs + + - name: Build Sphinx docs + run: poetry run sphinx-build -b html docs/ docs/_build/html -W --keep-going + + # ── Triggered on push to main ────────────────────────────────────────── + # RTD listens to its own webhook and publishes "latest" automatically. + # The step below is informational; actual publishing is done by RTD. + - name: Notify latest/dev docs will be published + if: github.event_name == 'push' && github.ref == 'refs/heads/main' + run: echo "Docs build succeeded. RTD will publish the 'latest' version." + + # ── Triggered on a release tag ───────────────────────────────────────── + # RTD listens to tag pushes and publishes a versioned snapshot automatically + # when "Build tags" is enabled in the RTD project settings. + - name: Notify stable/versioned docs will be published + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') + run: | + TAG="${GITHUB_REF_NAME}" + echo "Docs build succeeded for tag ${TAG}. RTD will publish the '${TAG}' versioned docs." diff --git a/.github/workflows/pr-tests.yml b/.github/workflows/pr-tests.yml deleted file mode 100644 index 825ebf18..00000000 --- a/.github/workflows/pr-tests.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: PR Unit Tests - -on: - workflow_dispatch: - pull_request: - branches: - - develop - - master # Add any other branches where you want to enforce tests - -concurrency: - group: pytest-${{ github.head_ref || github.sha }} - cancel-in-progress: true - -jobs: - test: - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - python-version: ["3.10", "3.11", "3.12", "3.13"] - - steps: - - name: Checkout Repository - uses: actions/checkout@v4 - - - name: Install Poetry - uses: abatilo/actions-poetry@v4 - - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - cache: 'poetry' - - - name: Install Dependencies - run: poetry install - - - name: Run Unit Tests - run: poetry run pytest tests/ -v - - - name: Verify Tests Passed - if: ${{ success() }} - run: echo "All tests passed! Pull request is allowed." - - - name: Fail PR on Test Failure - if: ${{ failure() }} - run: exit 1 # This ensures the PR cannot be merged if tests fail diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml new file mode 100644 index 00000000..8d2602a0 --- /dev/null +++ b/.github/workflows/publish-pypi.yml @@ -0,0 +1,83 @@ +name: Publish to PyPI + +# Triggered when a maintainer pushes a stable release tag (e.g. v1.7.0). +# RC tags (v1.7.0rc1) are handled by publish-testpypi.yml instead. +# +# Requires the "pypi-publish" GitHub Environment with tag-based protection. +# Uses OIDC trusted publishing — no PYPI_TOKEN secret required. + +on: + push: + tags: + - "v*.*.*" + +permissions: + contents: write + id-token: write + +jobs: + publish: + runs-on: ubuntu-latest + environment: pypi-publish + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install Poetry + run: pipx install poetry + + - name: Configure Poetry + run: poetry config virtualenvs.in-project true + + - name: Cache virtualenv + uses: actions/cache@v4 + with: + path: .venv + key: venv-publish-${{ runner.os }}-3.10-${{ hashFiles('poetry.lock') }} + + - name: Install dependencies + run: poetry install --only main + + - name: Check tag matches pyproject version + run: | + VERSION=$(python3 -c "import tomllib; d=tomllib.load(open('pyproject.toml','rb')); print(d['tool']['poetry']['version'])") + TAG="${GITHUB_REF_NAME#v}" + + echo "pyproject version: $VERSION" + echo "git tag version: $TAG" + + if [ "$VERSION" != "$TAG" ]; then + echo "❌ Tag version and pyproject.toml version do not match." + exit 1 + fi + + - name: Build package + run: poetry build + + - name: Check package + run: | + python -m pip install --upgrade twine + twine check dist/* + + - name: Test wheel install + run: | + python -m venv /tmp/deeptab-wheel-test + source /tmp/deeptab-wheel-test/bin/activate + pip install dist/*.whl + python -c "import deeptab; print(deeptab.__version__)" + + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + + - name: Create GitHub Release + uses: softprops/action-gh-release@v2 + with: + prerelease: false + generate_release_notes: true + files: dist/* diff --git a/.github/workflows/publish-testpypi.yml b/.github/workflows/publish-testpypi.yml new file mode 100644 index 00000000..53b7e710 --- /dev/null +++ b/.github/workflows/publish-testpypi.yml @@ -0,0 +1,86 @@ +name: Publish to TestPyPI (RC) + +# Triggered when a maintainer pushes an RC tag (e.g. v1.7.0rc1). +# Stable release tags are handled by publish-pypi.yml instead. +# +# Requires the "testpypi-publish" GitHub Environment with tag-based protection. +# Uses OIDC trusted publishing — no token secret required. +# Also creates a GitHub pre-release for visibility. + +on: + push: + tags: + - "v*.*.*rc*" + +permissions: + contents: write + id-token: write + +jobs: + publish-rc: + runs-on: ubuntu-latest + environment: testpypi-publish + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install Poetry + run: pipx install poetry + + - name: Configure Poetry + run: poetry config virtualenvs.in-project true + + - name: Cache virtualenv + uses: actions/cache@v4 + with: + path: .venv + key: venv-publish-rc-${{ runner.os }}-3.10-${{ hashFiles('poetry.lock') }} + + - name: Install dependencies + run: poetry install --only main + + - name: Check tag matches pyproject version + run: | + VERSION=$(python3 -c "import tomllib; d=tomllib.load(open('pyproject.toml','rb')); print(d['tool']['poetry']['version'])") + TAG="${GITHUB_REF_NAME#v}" + + echo "pyproject version: $VERSION" + echo "git tag version: $TAG" + + if [ "$VERSION" != "$TAG" ]; then + echo "❌ Tag version and pyproject.toml version do not match." + exit 1 + fi + + - name: Build package + run: poetry build + + - name: Check package + run: | + python -m pip install --upgrade twine + twine check dist/* + + - name: Test wheel install + run: | + python -m venv /tmp/deeptab-wheel-test + source /tmp/deeptab-wheel-test/bin/activate + pip install dist/*.whl + python -c "import deeptab; print(deeptab.__version__)" + + - name: Publish to TestPyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + repository-url: https://test.pypi.org/legacy/ + + - name: Create GitHub pre-release + uses: softprops/action-gh-release@v2 + with: + prerelease: true + generate_release_notes: true + files: dist/* diff --git a/.github/workflows/semantic-release.yml b/.github/workflows/semantic-release.yml deleted file mode 100644 index 7882d6e5..00000000 --- a/.github/workflows/semantic-release.yml +++ /dev/null @@ -1,62 +0,0 @@ -name: Semantic Release - -on: - push: - branches: - - master - - main - -permissions: - contents: write - issues: write - pull-requests: write - -jobs: - release: - runs-on: ubuntu-latest - concurrency: release - - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - fetch-depth: 0 - token: ${{ secrets.GITHUB_TOKEN }} - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - - name: Install Poetry - uses: snok/install-poetry@v1 - with: - version: latest - virtualenvs-create: true - virtualenvs-in-project: true - - - name: Cache Poetry dependencies - uses: actions/cache@v4 - with: - path: .venv - key: venv-${{ runner.os }}-${{ hashFiles('**/poetry.lock') }} - - - name: Install dependencies - run: | - poetry install - - - name: Python Semantic Release - id: release - uses: python-semantic-release/python-semantic-release@v9.12.0 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - env: - PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Publish package distributions to GitHub Releases - uses: python-semantic-release/upload-to-gh-release@main - if: steps.release.outputs.released == 'true' - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - tag: ${{ steps.release.outputs.tag }} diff --git a/.github/workflows/sync-develop.yml b/.github/workflows/sync-develop.yml deleted file mode 100644 index a764e692..00000000 --- a/.github/workflows/sync-develop.yml +++ /dev/null @@ -1,182 +0,0 @@ -name: Sync Develop with Master - -on: - workflow_run: - workflows: ["Semantic Release"] - types: - - completed - branches: - - master - -permissions: - contents: write - pull-requests: write - -jobs: - sync-develop: - runs-on: ubuntu-latest - # Only run if semantic release succeeded and actually released - if: ${{ github.event.workflow_run.conclusion == 'success' }} - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - token: ${{ secrets.GITHUB_TOKEN }} - - - name: Configure Git - run: | - git config user.name "github-actions[bot]" - git config user.email "github-actions[bot]@users.noreply.github.com" - - - name: Fetch all branches - run: | - git fetch origin master:master - git fetch origin develop:develop - - - name: Check if develop is behind master - id: check - run: | - git checkout develop - BEHIND=$(git rev-list --count develop..master) - echo "commits_behind=$BEHIND" >> $GITHUB_OUTPUT - - if [ "$BEHIND" -eq "0" ]; then - echo "Develop is already up to date with master" - echo "needs_sync=false" >> $GITHUB_OUTPUT - else - echo "Develop is $BEHIND commits behind master" - echo "needs_sync=true" >> $GITHUB_OUTPUT - fi - - - name: Attempt automatic merge - id: merge - if: steps.check.outputs.needs_sync == 'true' - run: | - git checkout develop - - # Try to merge master into develop - if git merge master --no-edit; then - echo "Merge successful - no conflicts" - echo "status=success" >> $GITHUB_OUTPUT - echo "has_conflicts=false" >> $GITHUB_OUTPUT - else - echo "Merge has conflicts" - git merge --abort - echo "status=conflict" >> $GITHUB_OUTPUT - echo "has_conflicts=true" >> $GITHUB_OUTPUT - fi - - - name: Push changes if no conflicts - if: steps.merge.outputs.status == 'success' - run: | - git push origin develop - echo "✅ Successfully synced develop with master" - - - name: Get latest version tag - id: version - if: steps.merge.outputs.has_conflicts == 'true' - run: | - VERSION=$(git describe --tags --abbrev=0 master) - echo "tag=$VERSION" >> $GITHUB_OUTPUT - - - name: Create PR if conflicts exist - if: steps.merge.outputs.has_conflicts == 'true' - uses: peter-evans/create-pull-request@v6 - with: - token: ${{ secrets.GITHUB_TOKEN }} - branch: sync/master-to-develop-${{ steps.version.outputs.tag }} - base: develop - title: "chore: sync develop with master ${{ steps.version.outputs.tag }}" - body: | - ## 🔄 Automatic Sync: Master → Develop - - This PR syncs `develop` branch with the latest release from `master`. - - **Release Version:** `${{ steps.version.outputs.tag }}` - **Triggered by:** Semantic Release workflow completion - - ### ⚠️ Merge Conflicts Detected - - Automatic merge failed due to conflicts. Please resolve conflicts manually: - - ```bash - git checkout develop - git pull origin develop - git merge master - # Resolve conflicts - git add . - git commit - git push origin develop - ``` - - ### Changes from Master: - - Updated version files (`pyproject.toml`, `__version__.py`) - - Updated `CHANGELOG.md` - - New release tag: `${{ steps.version.outputs.tag }}` - - --- - - 🤖 This PR was created automatically by the sync-develop workflow. - labels: | - chore - sync - automated - assignees: ${{ github.repository_owner }} - - - name: Add comment with instructions - if: steps.merge.outputs.has_conflicts == 'true' - uses: peter-evans/create-or-update-comment@v4 - with: - issue-number: ${{ steps.pr.outputs.pull-request-number }} - body: | - ### 📋 Manual Merge Instructions - - Since automatic merge failed, follow these steps: - - 1. **Checkout and update develop:** - ```bash - git checkout develop - git pull origin develop - ``` - - 2. **Merge master:** - ```bash - git merge master - ``` - - 3. **Resolve conflicts** in: - - `pyproject.toml` (keep master version) - - `deeptab/__version__.py` (keep master version) - - `CHANGELOG.md` (merge both) - - Any other conflicting files - - 4. **Complete the merge:** - ```bash - git add . - git commit -m "chore: sync develop with master ${{ steps.version.outputs.tag }}" - git push origin develop - ``` - - 5. **Close this PR** (changes will be in develop) - - 💡 **Tip:** Version files should always use master's values after a release. - - - name: Summary - if: always() - run: | - echo "## Sync Develop Workflow Summary" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - if [ "${{ steps.check.outputs.needs_sync }}" == "false" ]; then - echo "✅ Develop is already up to date with master" >> $GITHUB_STEP_SUMMARY - elif [ "${{ steps.merge.outputs.status }}" == "success" ]; then - echo "✅ Successfully merged master into develop automatically" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "**Commits synced:** ${{ steps.check.outputs.commits_behind }}" >> $GITHUB_STEP_SUMMARY - elif [ "${{ steps.merge.outputs.has_conflicts }}" == "true" ]; then - echo "⚠️ Merge conflicts detected - PR created for manual resolution" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "**Action required:** Review and merge the auto-created PR" >> $GITHUB_STEP_SUMMARY - fi diff --git a/.gitignore b/.gitignore index f6a5ec46..47221ead 100644 --- a/.gitignore +++ b/.gitignore @@ -173,4 +173,4 @@ docs/_build/doctrees/* docs/_build/html/* dev -dev/* \ No newline at end of file +dev/* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2173ee0a..dfea5213 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ fail_fast: false default_stages: [commit, push] repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: 3e8a8703264a2f4a69428a0aa4dcb512790b2c8c # frozen: v6.0.0 hooks: - id: check-case-conflict - id: check-merge-conflict @@ -13,16 +13,16 @@ repos: args: [--markdown-linebreak-ext=md] - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.1.14 + rev: 6fec9b7edb08fd9989088709d864a7826dc74e80 # frozen: v0.15.12 hooks: - id: ruff-format types_or: [python, pyi, jupyter] - id: ruff types_or: [python, pyi, jupyter] - args: [ --fix, --exit-non-zero-on-fix ] + args: [--fix, --exit-non-zero-on-fix] - repo: https://github.com/pre-commit/mirrors-prettier - rev: v4.0.0-alpha.8 + rev: f12edd9c7be1c20cfa42420fd0e6df71e42b51ea # frozen: v4.0.0-alpha.8 hooks: - id: prettier types: @@ -31,7 +31,17 @@ repos: - json - repo: https://github.com/commitizen-tools/commitizen - rev: v3.29.1 + rev: 2ca29f9297911f8f5a4e8f97100b7832f045e8d3 # frozen: v4.13.10 hooks: - id: commitizen stages: [commit-msg] + + - repo: local + hooks: + - id: pyright + name: pyright + entry: poetry run pyright + language: system + pass_filenames: false + types_or: [python] + stages: [push] diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..e06ed4d6 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,94 @@ +# CHANGELOG + +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](https://semver.org/) and uses +[Conventional Commits](https://www.conventionalcommits.org/). + +Going forward, this file is updated automatically by `cz bump` on each release. + +--- + +## v1.6.1 (2025-04-26) + +### Changes + +- Renamed package from `mambular` / `deeptabular` to `deeptab` +- Dynamic versioning: version is now sourced from `pyproject.toml` via `importlib.metadata`; removed `__version__.py` +- CI rework: split into lint, typecheck, build, and test jobs; manual tagging with OIDC PyPI publishing; removed semantic-release automation + +--- + +## v1.5.0 (2025-04-14) + +### Changes + +- Moved preprocessing to the `pretab` package; removed bundled preprocessor +- Added TabR model integration +- Fixed LSS bug affecting distributional output +- Updated docstrings for documentation generation compatibility + +--- + +## v1.4.0 (2025-03-24) + +### Features + +- Added ModernNCA model +- Added training candidates support during prediction and validation in the lightning module + +--- + +## v1.3.2 (2025-03-19) + +### Bug Fixes + +- Fixed `num_classes` argument for binary classification +- Fixed version info extraction + +--- + +## v1.3.1 (2025-03-17) + +### Features + +- Added Tangos model (classifier, regressor, and distributional variants) + +--- + +## v1.3.0 (2025-03-13) + +### Features + +- Added AutoInt model +- Added Trompt model +- Added ENode (embedding oblivious trees) model +- Fixed preprocessor bug causing `ValueError: not enough values to unpack` + +--- + +## v1.2.0 (2025-02-17) + +### Features + +- Added `BaseConfig` parent class; restructured all configs to inherit from it +- Added `JohnsonSU` distribution and individual preprocessing per column +- Adapted embedding layer for new preprocessing pipeline +- Added unit tests for PRs +- Fixed column name handling (int → string) in datamodule + +--- + +## v1.1.0 (2025-01-03) + +### Features + +- Added `BaseConfig` class to public init +- Added `JohnsonSU` distribution support +- Removed dependency on rotary embeddings + +--- + +## v1.0.0 (2024-12-04) + +Initial stable release. diff --git a/CONVENTIONAL_COMMITS.md b/CONVENTIONAL_COMMITS.md new file mode 100644 index 00000000..f1cde46f --- /dev/null +++ b/CONVENTIONAL_COMMITS.md @@ -0,0 +1,105 @@ +# Conventional Commits Quick Reference + +## Commit Format + +``` +(): +``` + +## Types + +| Type | Description | Version Bump | +| ---------- | ----------------------- | ------------- | +| `feat` | New feature | Minor (0.x.0) | +| `fix` | Bug fix | Patch (0.0.x) | +| `docs` | Documentation only | None | +| `style` | Code style/formatting | None | +| `refactor` | Code refactoring | None | +| `perf` | Performance improvement | Patch | +| `test` | Adding/updating tests | None | +| `build` | Build system changes | None | +| `ci` | CI/CD changes | None | +| `chore` | Other changes | None | + +## Examples + +```bash +# Feature (minor bump: 1.6.1 → 1.7.0) +git commit -m "feat(models): add TabNet architecture" + +# Bug fix (patch bump: 1.6.1 → 1.6.2) +git commit -m "fix(datamodule): resolve memory leak in batch loading" + +# Performance (patch bump) +git commit -m "perf(transformer): optimize attention computation" + +# Documentation (no bump) +git commit -m "docs: update API reference for MambaTab" + +# Breaking change (major bump: 1.6.1 → 2.0.0) +git commit -m "feat!: remove Python 3.9 support + +BREAKING CHANGE: Python 3.10+ is now required" +``` + +## Scopes (Optional) + +Common scopes in this project: + +- `models`: Model implementations +- `configs`: Configuration classes +- `data`: Data utilities and dataloaders +- `arch`: Architecture utilities +- `utils`: General utilities +- `ci`: CI/CD related +- `deps`: Dependencies + +## Quick Commands + +```bash +# Interactive commit (recommended) +just commit + +# Version bump +just bump + +# View changelog +cat CHANGELOG.md + +# Dry-run semantic release +just release-dry +``` + +## Breaking Changes + +Use `!` after type and explain in footer: + +``` +feat!: change API signature + +BREAKING CHANGE: The `fit()` method now requires `x_train` and `y_train` as separate arguments instead of a tuple. +``` + +## Multi-line Commits + +```bash +# Using editor +git commit + +# In editor: +feat(models): add multi-head attention support + +This commit introduces multi-head attention mechanism +to improve model performance on large datasets. + +Closes #123 +``` + +## Pre-commit Hook + +Commits are validated automatically. If rejected: + +1. Check format: `type(scope): description` +2. Use allowed types only +3. Keep header under 72 characters +4. Don't end subject with period diff --git a/deeptab/__init__.py b/deeptab/__init__.py index 358c7bd5..68ae7e3b 100644 --- a/deeptab/__init__.py +++ b/deeptab/__init__.py @@ -1,5 +1,5 @@ from . import base_models, data_utils, models, utils -from .__version__ import __version__ +from ._version import __version__ __all__ = [ "__version__", diff --git a/deeptab/__version__.py b/deeptab/__version__.py deleted file mode 100644 index 9cc2e428..00000000 --- a/deeptab/__version__.py +++ /dev/null @@ -1,21 +0,0 @@ -# PEP0440 compatible formatted version, see: -# https://www.python.org/dev/peps/pep-0440/ -# -# Generic release markers: -# X.Y.0 # For first release after an increment in Y -# X.Y.Z # For bugfix releases -# -# Admissible pre-release markers: -# X.Y.ZaN # Alpha release -# X.Y.ZbN # Beta release -# X.Y.ZrcN # Release Candidate -# X.Y.Z # Final release -# -# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. -# 'X.Y.dev0' is the canonical version of 'X.Y.dev' -# - -# The following line *must* be the last in the module, exactly as formatted: - -__version__ = "1.6.1" - diff --git a/deeptab/_version.py b/deeptab/_version.py new file mode 100644 index 00000000..aabf4a7a --- /dev/null +++ b/deeptab/_version.py @@ -0,0 +1,6 @@ +from importlib.metadata import PackageNotFoundError, version + +try: + __version__ = version("deeptab") +except PackageNotFoundError: + __version__ = "0+unknown" diff --git a/deeptab/arch_utils/enode_utils.py b/deeptab/arch_utils/enode_utils.py index d8116f17..e03529ba 100644 --- a/deeptab/arch_utils/enode_utils.py +++ b/deeptab/arch_utils/enode_utils.py @@ -1,15 +1,17 @@ +from warnings import warn + +import numpy as np import torch import torch.nn as nn import torch.nn.functional as F + from deeptab.arch_utils.layer_utils.sparsemax import sparsemax, sparsemoid + from .data_aware_initialization import ModuleWithInit from .numpy_utils import check_numpy -import numpy as np -from warnings import warn class ODSTE(ModuleWithInit): - def __init__( self, in_features, # J (number of features) @@ -41,22 +43,16 @@ def __init__( ) # Response values for each leaf - self.response = nn.Parameter( - torch.zeros([num_trees, tree_dim, embed_dim, 2**depth]), requires_grad=True - ) + self.response = nn.Parameter(torch.zeros([num_trees, tree_dim, embed_dim, 2**depth]), requires_grad=True) initialize_response_(self.response) # Feature selection logits (choose J) - self.feature_selection_logits = nn.Parameter( - torch.zeros([num_trees, depth, in_features]), requires_grad=True - ) + self.feature_selection_logits = nn.Parameter(torch.zeros([num_trees, depth, in_features]), requires_grad=True) initialize_selection_logits_(self.feature_selection_logits) # Embedding selection logits (choose D within J) - self.embedding_selection_logits = nn.Parameter( - torch.randn([num_trees, depth, in_features, embed_dim]) - ) + self.embedding_selection_logits = nn.Parameter(torch.randn([num_trees, depth, in_features, embed_dim])) # Thresholds & temperatures (random initialization) self.feature_thresholds = nn.Parameter(torch.randn([num_trees, depth])) @@ -66,9 +62,7 @@ def __init__( with torch.no_grad(): indices = torch.arange(2**self.depth) offsets = 2 ** torch.arange(self.depth) - bin_codes = (indices.view(1, -1) // offsets.view(-1, 1) % 2).to( - torch.float32 - ) + bin_codes = (indices.view(1, -1) // offsets.view(-1, 1) % 2).to(torch.float32) bin_codes_1hot = torch.stack([bin_codes, 1.0 - bin_codes], dim=-1) self.bin_codes_1hot = nn.Parameter(bin_codes_1hot, requires_grad=False) @@ -86,7 +80,7 @@ def initialize(self, x, eps=1e-6): raise ValueError("Input tensor must have shape (batch_size, J, D)") if x.shape[0] < 1000: - warn( + warn( # noqa: B028 "Data-aware initialization is performed on less than 1000 data points. This may cause instability." "To avoid potential problems, run this model on a data batch with at least 1000 data samples." "You can do so manually before training. Use with torch.no_grad() for memory efficiency." @@ -94,23 +88,17 @@ def initialize(self, x, eps=1e-6): with torch.no_grad(): # Select features (J) - feature_selectors = self.choice_function( - self.feature_selection_logits, dim=-1 - ) + feature_selectors = self.choice_function(self.feature_selection_logits, dim=-1) # feature_selectors shape: (num_trees, depth, J) selected_features = torch.einsum("bjd,ntj->bntd", x, feature_selectors) # selected_features shape: (B, num_trees, depth, D) # Select embeddings (D) - embedding_selectors = self.choice_function( - self.embedding_selection_logits, dim=-1 - ) + embedding_selectors = self.choice_function(self.embedding_selection_logits, dim=-1) # embedding_selectors shape: (num_trees, depth, J, D) - selected_embeddings = torch.einsum( - "bntd,ntjd->bntd", selected_features, embedding_selectors - ) + selected_embeddings = torch.einsum("bntd,ntjd->bntd", selected_features, embedding_selectors) # selected_embeddings shape: (B, num_trees, depth, D) # Initialize thresholds using percentiles from the data @@ -120,9 +108,7 @@ def initialize(self, x, eps=1e-6): size=[self.num_trees, self.depth], ) - reshaped_embeddings = selected_embeddings.permute(1, 2, 0, 3).reshape( - self.num_trees * self.depth, -1 - ) + reshaped_embeddings = selected_embeddings.permute(1, 2, 0, 3).reshape(self.num_trees * self.depth, -1) self.feature_thresholds.data[...] = torch.as_tensor( list( map( @@ -137,9 +123,7 @@ def initialize(self, x, eps=1e-6): # Initialize temperatures based on the threshold differences temperatures = np.percentile( - check_numpy( - abs(selected_embeddings - self.feature_thresholds.unsqueeze(-1)) - ), + check_numpy(abs(selected_embeddings - self.feature_thresholds.unsqueeze(-1))), q=100 * min(1.0, self.threshold_init_cutoff), axis=0, ) @@ -161,26 +145,20 @@ def forward(self, x): raise ValueError("Input tensor must have shape (batch_size, J, D)") # Select feature (J) and embedding dimension (D) separately - feature_selectors = self.choice_function( - self.feature_selection_logits, dim=-1 - ) # [num_trees, depth, J] + feature_selectors = self.choice_function(self.feature_selection_logits, dim=-1) # [num_trees, depth, J] - embedding_selectors = self.choice_function( - self.embedding_selection_logits, dim=-1 - ) # [num_trees, depth, J, D] + embedding_selectors = self.choice_function(self.embedding_selection_logits, dim=-1) # [num_trees, depth, J, D] # Select features (J) first selected_features = torch.einsum("bjd,ntj->bntd", x, feature_selectors) # Select embeddings (D) within selected features - selected_embeddings = torch.einsum( - "bntd,ntjd->bntd", selected_features, embedding_selectors - ) + selected_embeddings = torch.einsum("bntd,ntjd->bntd", selected_features, embedding_selectors) # Compute threshold logits - threshold_logits = ( - selected_embeddings - self.feature_thresholds.unsqueeze(0).unsqueeze(-1) - ) * torch.exp(-self.log_temperatures.unsqueeze(0).unsqueeze(-1)) + threshold_logits = (selected_embeddings - self.feature_thresholds.unsqueeze(0).unsqueeze(-1)) * torch.exp( + -self.log_temperatures.unsqueeze(0).unsqueeze(-1) + ) threshold_logits = torch.stack([-threshold_logits, threshold_logits], dim=-1) @@ -267,9 +245,7 @@ def __init__( **kwargs, ) ) - input_dim = min( - input_dim + layer_dim * tree_dim, max_features or float("inf") - ) + input_dim = min(input_dim + layer_dim * tree_dim, max_features or float("inf")) def forward(self, x): """Forward pass through the DenseBlock. @@ -292,9 +268,7 @@ def forward(self, x): if self.max_features is not None: tail_features = min(self.max_features, x.shape[1]) - initial_features if tail_features > 0: - x = torch.cat( - [x[:, :initial_features, :], x[:, -tail_features:, :]], dim=1 - ) + x = torch.cat([x[:, :initial_features, :], x[:, -tail_features:, :]], dim=1) if self.training and self.input_dropout: x = F.dropout(x, self.input_dropout) diff --git a/deeptab/arch_utils/layer_utils/embedding_layer.py b/deeptab/arch_utils/layer_utils/embedding_layer.py index cb7a6675..9d6c0960 100644 --- a/deeptab/arch_utils/layer_utils/embedding_layer.py +++ b/deeptab/arch_utils/layer_utils/embedding_layer.py @@ -22,12 +22,8 @@ def __init__(self, num_feature_info, cat_feature_info, emb_feature_info, config) super().__init__() self.d_model = getattr(config, "d_model", 128) - self.embedding_activation = getattr( - config, "embedding_activation", nn.Identity() - ) - self.layer_norm_after_embedding = getattr( - config, "layer_norm_after_embedding", False - ) + self.embedding_activation = getattr(config, "embedding_activation", nn.Identity()) + self.layer_norm_after_embedding = getattr(config, "layer_norm_after_embedding", False) self.embedding_projection = getattr(config, "embedding_projection", True) self.use_cls = getattr(config, "use_cls", False) self.cls_position = getattr(config, "cls_position", 0) @@ -76,9 +72,7 @@ def __init__(self, num_feature_info, cat_feature_info, emb_feature_info, config) # for splines and other embeddings # splines followed by linear if n_knots actual knots is less than the defined knots else: - raise ValueError( - "Invalid embedding_type. Choose from 'linear', 'ndt', or 'plr'." - ) + raise ValueError("Invalid embedding_type. Choose from 'linear', 'ndt', or 'plr'.") self.cat_embeddings = nn.ModuleList( [ @@ -158,11 +152,7 @@ def forward(self, num_features, cat_features, emb_features): # Process categorical embeddings if self.cat_embeddings and cat_features is not None: cat_embeddings = [ - ( - emb(cat_features[i]) - if emb(cat_features[i]).ndim == 3 - else emb(cat_features[i]).unsqueeze(1) - ) + (emb(cat_features[i]) if emb(cat_features[i]).ndim == 3 else emb(cat_features[i]).unsqueeze(1)) for i, emb in enumerate(self.cat_embeddings) ] @@ -194,19 +184,14 @@ def forward(self, num_features, cat_features, emb_features): if emb_features != []: if self.embedding_projection: - emb_embeddings = [ - emb(emb_features[i]) for i, emb in enumerate(self.emb_embeddings) - ] + emb_embeddings = [emb(emb_features[i]) for i, emb in enumerate(self.emb_embeddings)] emb_embeddings = torch.stack(emb_embeddings, dim=1) else: - emb_embeddings = torch.stack(emb_features, dim=1) if self.layer_norm_after_embedding: emb_embeddings = self.embedding_norm(emb_embeddings) - embeddings = [ - e for e in [cat_embeddings, num_embeddings, emb_embeddings] if e is not None - ] + embeddings = [e for e in [cat_embeddings, num_embeddings, emb_embeddings] if e is not None] if embeddings: x = torch.cat(embeddings, dim=1) if len(embeddings) > 1 else embeddings[0] @@ -221,26 +206,24 @@ def forward(self, num_features, cat_features, emb_features): elif self.cls_position == 1: x = torch.cat([x, cls_tokens], dim=1) # type: ignore else: - raise ValueError( - "Invalid cls_position value. It should be either 0 or 1." - ) + raise ValueError("Invalid cls_position value. It should be either 0 or 1.") # Apply dropout to embeddings if specified in config if self.embedding_dropout is not None: x = self.embedding_dropout(x) return x - - def check_plr_embedding_compatibility(self, feature_info:tuple): + + def check_plr_embedding_compatibility(self, feature_info: tuple): # List of incompatible preprocessing terms for PLR embedding - incompatible_terms = ['ple', 'one-hot', 'polynomial', 'splines', 'sigmoid', 'rbf'] - + incompatible_terms = ["ple", "one-hot", "polynomial", "splines", "sigmoid", "rbf"] + # Iterate through each dictionary in the tuple (data) for sub_dict in feature_info: # Iterate through each feature in the current dictionary for feature, properties in sub_dict.items(): - preprocessing = properties.get('preprocessing', '') - + preprocessing = properties.get("preprocessing", "") + # Check for incompatible terms in the preprocessing string for term in incompatible_terms: if term in preprocessing: diff --git a/deeptab/arch_utils/layer_utils/importance.py b/deeptab/arch_utils/layer_utils/importance.py index 01641d3b..b61af197 100644 --- a/deeptab/arch_utils/layer_utils/importance.py +++ b/deeptab/arch_utils/layer_utils/importance.py @@ -1,5 +1,5 @@ -import torch.nn as nn import torch +import torch.nn as nn class ImportanceGetter(nn.Module): # Figure 3 part 1 @@ -16,7 +16,7 @@ def __init__(self, P, C, d): self.laynorm1 = nn.LayerNorm(self.d) self.laynorm2 = nn.LayerNorm(self.d) - def forward(self, O): + def forward(self, O): # noqa: E741 eprompt = self.pemb.unsqueeze(0).repeat(O.shape[0], 1, 1) dense_out = self.dense(torch.cat((self.laynorm1(eprompt), O), dim=-1)) diff --git a/deeptab/arch_utils/layer_utils/normalization_layers.py b/deeptab/arch_utils/layer_utils/normalization_layers.py index 9f09aba0..f635ef45 100644 --- a/deeptab/arch_utils/layer_utils/normalization_layers.py +++ b/deeptab/arch_utils/layer_utils/normalization_layers.py @@ -71,12 +71,12 @@ def forward(self, x): # Use unbiased=False for consistency with BatchNorm var = x.var(dim=0, unbiased=False) # Update running stats in-place - self.running_mean.mul_(1 - self.momentum).add_(self.momentum * mean) - self.running_var.mul_(1 - self.momentum).add_(self.momentum * var) + self.running_mean.mul_(1 - self.momentum).add_(self.momentum * mean) # type: ignore[union-attr] + self.running_var.mul_(1 - self.momentum).add_(self.momentum * var) # type: ignore[union-attr] else: mean = self.running_mean var = self.running_var - output = (x - mean) / torch.sqrt(var + self.eps) + output = (x - mean) / torch.sqrt(var + self.eps) # type: ignore[operator] output = output * self.weight + self.bias return output diff --git a/deeptab/arch_utils/layer_utils/rotary_utils.py b/deeptab/arch_utils/layer_utils/rotary_utils.py index adb8a510..c38cc515 100644 --- a/deeptab/arch_utils/layer_utils/rotary_utils.py +++ b/deeptab/arch_utils/layer_utils/rotary_utils.py @@ -3,7 +3,7 @@ import torch import torch.nn as nn from einops import rearrange -from rotary_embedding_torch import RotaryEmbedding +from rotary_embedding_torch import RotaryEmbedding # type: ignore[import-untyped] class RotaryEmbeddingLayer(nn.Module): @@ -105,3 +105,4 @@ def __init__( def forward(self, src, mask=None, src_key_padding_mask=None): # type: ignore return super().forward(src, mask, src_key_padding_mask) + return super().forward(src, mask, src_key_padding_mask) diff --git a/deeptab/arch_utils/lstm_utils.py b/deeptab/arch_utils/lstm_utils.py index 856ebfd6..72514eb9 100644 --- a/deeptab/arch_utils/lstm_utils.py +++ b/deeptab/arch_utils/lstm_utils.py @@ -156,12 +156,12 @@ def forward(self, x): ct_1 = self.ct_1 - ct = f * ct_1 + i * v * k + ct = f * ct_1 + i * v * k # type: ignore[operator] ct = torch.mean(self.ln_c(ct), [0, 1], keepdim=True) self.ct_1 = ct.detach() nt_1 = self.nt_1 - nt = f * nt_1 + i * k + nt = f * nt_1 + i * k # type: ignore[operator] nt = torch.mean(self.ln_n(nt), [0, 1], keepdim=True) self.nt_1 = nt.detach() @@ -321,12 +321,12 @@ def forward(self, x): z = torch.tanh(self.ln_z(self.z_gate(x) + self.rz_gate(ht_1))) ct_1 = self.ct_1 - ct = f * ct_1 + i * z + ct = f * ct_1 + i * z # type: ignore[operator] ct = torch.mean(self.ln_c(ct), [0, 1], keepdim=True) self.ct_1 = ct.detach() nt_1 = self.nt_1 - nt = f * nt_1 + i + nt = f * nt_1 + i # type: ignore[operator] nt = torch.mean(self.ln_n(nt), [0, 1], keepdim=True) self.nt_1 = nt.detach() diff --git a/deeptab/arch_utils/mamba_utils/mamba_arch.py b/deeptab/arch_utils/mamba_utils/mamba_arch.py index afe78662..ab9c4b58 100644 --- a/deeptab/arch_utils/mamba_utils/mamba_arch.py +++ b/deeptab/arch_utils/mamba_utils/mamba_arch.py @@ -43,9 +43,7 @@ def __init__( norm=get_normalization_layer(config), # type: ignore activation=getattr(config, "activation", nn.SiLU()), bidirectional=getattr(config, "bidirectional", False), - use_learnable_interaction=getattr( - config, "use_learnable_interaction", False - ), + use_learnable_interaction=getattr(config, "use_learnable_interaction", False), layer_norm_eps=getattr(config, "layer_norm_eps", 1e-5), AD_weight_decay=getattr(config, "AD_weight_decay", True), BC_layer_norm=getattr(config, "BC_layer_norm", False), @@ -325,10 +323,7 @@ def __init__( self.pscan = pscan # Store the imported pscan function except ImportError: self.pscan = None # Set to None if pscan is not available - print( - "The 'mambapy' package is not installed. Please install it by running:\n" - "pip install mambapy" - ) + print("The 'mambapy' package is not installed. Please install it by running:\npip install mambapy") else: self.pscan = None @@ -385,18 +380,16 @@ def __init__( else: raise NotImplementedError - dt_fwd = torch.exp( - torch.rand(self.d_inner) * (math.log(dt_max) - math.log(dt_min)) - + math.log(dt_min) - ).clamp(min=dt_init_floor) + dt_fwd = torch.exp(torch.rand(self.d_inner) * (math.log(dt_max) - math.log(dt_min)) + math.log(dt_min)).clamp( + min=dt_init_floor + ) inv_dt_fwd = dt_fwd + torch.log(-torch.expm1(-dt_fwd)) with torch.no_grad(): self.dt_proj_fwd.bias.copy_(inv_dt_fwd) if self.bidirectional: dt_bwd = torch.exp( - torch.rand(self.d_inner) * (math.log(dt_max) - math.log(dt_min)) - + math.log(dt_min) + torch.rand(self.d_inner) * (math.log(dt_max) - math.log(dt_min)) + math.log(dt_min) ).clamp(min=dt_init_floor) inv_dt_bwd = dt_bwd + torch.log(-torch.expm1(-dt_bwd)) with torch.no_grad(): diff --git a/deeptab/arch_utils/mamba_utils/mamba_original.py b/deeptab/arch_utils/mamba_utils/mamba_original.py index 0746b9f1..c44b69c6 100644 --- a/deeptab/arch_utils/mamba_utils/mamba_original.py +++ b/deeptab/arch_utils/mamba_utils/mamba_original.py @@ -187,7 +187,7 @@ def __init__(self, config): def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): return { i: layer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) - for i, layer in enumerate(self.layers) + for i, layer in enumerate(self.layers) # type: ignore[arg-type] } def forward(self, x): diff --git a/deeptab/arch_utils/rnn_utils.py b/deeptab/arch_utils/rnn_utils.py index 6b2ba931..9822b433 100644 --- a/deeptab/arch_utils/rnn_utils.py +++ b/deeptab/arch_utils/rnn_utils.py @@ -38,10 +38,7 @@ def __init__(self, config): if self.residuals: self.residual_matrix = nn.ParameterList( - [ - nn.Parameter(torch.randn(self.hidden_size, self.hidden_size)) - for _ in range(self.num_layers) - ] + [nn.Parameter(torch.randn(self.hidden_size, self.hidden_size)) for _ in range(self.num_layers)] ) # First Conv1d layer uses input_size @@ -165,10 +162,7 @@ def __init__( if self.residuals: self.residual_matrix = nn.ParameterList( - [ - nn.Parameter(torch.randn(self.hidden_size, self.hidden_size)) - for _ in range(self.num_layers) - ] + [nn.Parameter(torch.randn(self.hidden_size, self.hidden_size)) for _ in range(self.num_layers)] ) # First Conv1d layer uses input_size diff --git a/deeptab/arch_utils/simple_utils.py b/deeptab/arch_utils/simple_utils.py index ba1067a8..8d6a27be 100644 --- a/deeptab/arch_utils/simple_utils.py +++ b/deeptab/arch_utils/simple_utils.py @@ -1,29 +1,24 @@ import torch import torch.nn as nn + class MLP_Block(nn.Module): def __init__(self, d_in: int, d: int, dropout: float): super().__init__() self.block = nn.Sequential( - nn.BatchNorm1d(d_in), - nn.Linear(d_in, d), - nn.ReLU(inplace=True), - nn.Dropout(dropout), - nn.Linear(d, d_in) + nn.BatchNorm1d(d_in), nn.Linear(d_in, d), nn.ReLU(inplace=True), nn.Dropout(dropout), nn.Linear(d, d_in) ) + def forward(self, x: torch.Tensor) -> torch.Tensor: return self.block(x) - -import torch -def make_random_batches( - train_size: int, batch_size: int, device = None -) : +import torch # noqa: E402 + + +def make_random_batches(train_size: int, batch_size: int, device=None): permutation = torch.randperm(train_size, device=device) batches = permutation.split(batch_size) - assert torch.equal( - torch.arange(train_size, device=device), permutation.sort().values - ) - return batches \ No newline at end of file + assert torch.equal(torch.arange(train_size, device=device), permutation.sort().values) # noqa: S101 + return batches diff --git a/deeptab/arch_utils/trompt_utils.py b/deeptab/arch_utils/trompt_utils.py index 40fc871f..634ed3f3 100644 --- a/deeptab/arch_utils/trompt_utils.py +++ b/deeptab/arch_utils/trompt_utils.py @@ -1,8 +1,9 @@ -import torch.nn as nn +import numpy as np import torch +import torch.nn as nn + from .layer_utils.embedding_layer import EmbeddingLayer from .layer_utils.importance import ImportanceGetter -import numpy as np class Expander(nn.Module): # Figure 3 part 3 @@ -29,7 +30,7 @@ def __init__(self, feature_information, config): self.fe = ImportanceGetter(config.P, C, config.d_model) self.ex = Expander(config.P) - def forward(self, *data, O=None): + def forward(self, *data, O=None): # noqa: E741 x_res = self.ex(self.enc(*data)) M = self.fe(O) diff --git a/deeptab/base_models/__init__.py b/deeptab/base_models/__init__.py index 3411d9be..91685e92 100644 --- a/deeptab/base_models/__init__.py +++ b/deeptab/base_models/__init__.py @@ -1,8 +1,11 @@ +from .autoint import AutoInt +from .enode import ENODE from .ft_transformer import FTTransformer from .mambatab import MambaTab from .mambattn import MambAttention from .mambular import Mambular from .mlp import MLP +from .modern_nca import ModernNCA from .ndtf import NDTF from .node import NODE from .resnet import ResNet @@ -10,28 +13,25 @@ from .tabm import TabM from .tabtransformer import TabTransformer from .tabularnn import TabulaRNN -from .autoint import AutoInt -from .trompt import Trompt -from .enode import ENODE from .tangos import Tangos -from .modern_nca import ModernNCA +from .trompt import Trompt __all__ = [ - "ModernNCA", - "Tangos", "ENODE", - "Trompt", - "AutoInt", "MLP", "NDTF", "NODE", "SAINT", + "AutoInt", "FTTransformer", "MambAttention", "MambaTab", "Mambular", + "ModernNCA", "ResNet", "TabM", "TabTransformer", "TabulaRNN", + "Tangos", + "Trompt", ] diff --git a/deeptab/base_models/autoint.py b/deeptab/base_models/autoint.py index f393f9c0..a8a6b970 100644 --- a/deeptab/base_models/autoint.py +++ b/deeptab/base_models/autoint.py @@ -1,9 +1,10 @@ +import numpy as np import torch.nn as nn -from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer -from .utils.basemodel import BaseModel import torch.nn.init as nn_init -import numpy as np + +from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer from ..configs.autoint_config import DefaultAutoIntConfig +from .utils.basemodel import BaseModel class AutoInt(BaseModel): @@ -80,9 +81,7 @@ def make_kv_compression(): return compression self.shared_kv_compression = ( - make_kv_compression() - if self.kv_compression and self.kv_compression_sharing == "layerwise" - else None + make_kv_compression() if self.kv_compression and self.kv_compression_sharing == "layerwise" else None ) # Transformer-based Interaction Layers @@ -106,14 +105,12 @@ def make_kv_compression(): if self.kv_compression_sharing == "headwise": layer["value_compression"] = make_kv_compression() else: - assert self.kv_compression_sharing == "key-value" + assert self.kv_compression_sharing == "key-value" # noqa: S101 self.layers.append(layer) # Final Normalization & Output Head - self.last_norm = ( - nn.LayerNorm(config.d_model) if getattr(config, "prenorm", False) else None - ) + self.last_norm = nn.LayerNorm(config.d_model) if getattr(config, "prenorm", False) else None self.head = nn.Linear(config.d_model * n_inputs, num_classes) @@ -138,9 +135,7 @@ def _get_kv_compressions(self, layer): (layer["key_compression"], layer["value_compression"]) if "key_compression" in layer and "value_compression" in layer else ( - (layer["key_compression"], layer["key_compression"]) - if "key_compression" in layer - else (None, None) + (layer["key_compression"], layer["key_compression"]) if "key_compression" in layer else (None, None) ) ) ) @@ -165,19 +160,19 @@ def forward(self, *data): x_residual = x # Store original input for residual connection # Apply normalization before attention if prenormalization is enabled - x_residual = layer["norm0"](x_residual) + x_residual = layer["norm0"](x_residual) # type: ignore[index] # Retrieve key-value compression layers key_compression, value_compression = self._get_kv_compressions(layer) # Multihead Attention - x_residual, _ = layer["attention"](x_residual, x_residual, x_residual) + x_residual, _ = layer["attention"](x_residual, x_residual, x_residual) # type: ignore[index] # Apply residual connection x = x + x_residual # Apply the linear transformation - x_residual = layer["linear"](x) + x_residual = layer["linear"](x) # type: ignore[index] x = x + x_residual # Second residual connection if self.last_norm: diff --git a/deeptab/base_models/enode.py b/deeptab/base_models/enode.py index 7a1eb352..3b4ff780 100644 --- a/deeptab/base_models/enode.py +++ b/deeptab/base_models/enode.py @@ -1,13 +1,13 @@ +import numpy as np import torch +import torch.nn as nn +from ..arch_utils.enode_utils import DenseBlock from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer from ..arch_utils.mlp_utils import MLPhead -from ..arch_utils.enode_utils import DenseBlock from ..configs.enode_config import DefaultENODEConfig from ..utils.get_feature_dimensions import get_feature_dimensions from .utils.basemodel import BaseModel -import numpy as np -import torch.nn as nn class ENODE(BaseModel): diff --git a/deeptab/base_models/ft_transformer.py b/deeptab/base_models/ft_transformer.py index 9ec108ed..d9571628 100644 --- a/deeptab/base_models/ft_transformer.py +++ b/deeptab/base_models/ft_transformer.py @@ -1,3 +1,4 @@ +import numpy as np import torch.nn as nn from ..arch_utils.get_norm_fn import get_normalization_layer @@ -6,7 +7,6 @@ from ..arch_utils.transformer_utils import CustomTransformerEncoderLayer from ..configs.fttransformer_config import DefaultFTTransformerConfig from .utils.basemodel import BaseModel -import numpy as np class FTTransformer(BaseModel): @@ -100,7 +100,7 @@ def forward(self, *data): Tensor The output predictions of the model. """ - + x = self.embedding_layer(*data) x = self.encoder(x) diff --git a/deeptab/base_models/mambatab.py b/deeptab/base_models/mambatab.py index ce22d660..872851f7 100644 --- a/deeptab/base_models/mambatab.py +++ b/deeptab/base_models/mambatab.py @@ -5,8 +5,8 @@ from ..arch_utils.mamba_utils.mamba_arch import Mamba from ..arch_utils.mamba_utils.mamba_original import MambaOriginal from ..arch_utils.mlp_utils import MLPhead -from ..utils.get_feature_dimensions import get_feature_dimensions from ..configs.mambatab_config import DefaultMambaTabConfig +from ..utils.get_feature_dimensions import get_feature_dimensions from .utils.basemodel import BaseModel diff --git a/deeptab/base_models/mambattn.py b/deeptab/base_models/mambattn.py index e0f36d85..56ea8768 100644 --- a/deeptab/base_models/mambattn.py +++ b/deeptab/base_models/mambattn.py @@ -1,5 +1,6 @@ -import torch import numpy as np +import torch + from ..arch_utils.get_norm_fn import get_normalization_layer from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer from ..arch_utils.mamba_utils.mambattn_arch import MambAttn diff --git a/deeptab/base_models/mambular.py b/deeptab/base_models/mambular.py index c5984b96..990a4001 100644 --- a/deeptab/base_models/mambular.py +++ b/deeptab/base_models/mambular.py @@ -1,3 +1,4 @@ +import numpy as np import torch from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer @@ -6,7 +7,6 @@ from ..arch_utils.mlp_utils import MLPhead from ..configs.mambular_config import DefaultMambularConfig from .utils.basemodel import BaseModel -import numpy as np class Mambular(BaseModel): diff --git a/deeptab/base_models/mlp.py b/deeptab/base_models/mlp.py index 5d4e46ea..6d08eeef 100644 --- a/deeptab/base_models/mlp.py +++ b/deeptab/base_models/mlp.py @@ -1,6 +1,7 @@ +import numpy as np import torch import torch.nn as nn -import numpy as np + from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer from ..configs.mlp_config import DefaultMLPConfig from ..utils.get_feature_dimensions import get_feature_dimensions @@ -75,9 +76,7 @@ def __init__( *feature_information, config=config, ) - input_dim = np.sum( - [len(info) * self.hparams.d_model for info in feature_information] - ) + input_dim = np.sum([len(info) * self.hparams.d_model for info in feature_information]) else: input_dim = get_feature_dimensions(*feature_information) @@ -95,9 +94,7 @@ def __init__( # Hidden layers for i in range(1, len(self.hparams.layer_sizes)): - self.layers.append( - nn.Linear(self.hparams.layer_sizes[i - 1], self.hparams.layer_sizes[i]) - ) + self.layers.append(nn.Linear(self.hparams.layer_sizes[i - 1], self.hparams.layer_sizes[i])) if self.hparams.batch_norm: self.layers.append(nn.BatchNorm1d(self.hparams.layer_sizes[i])) if self.hparams.layer_norm: diff --git a/deeptab/base_models/modern_nca.py b/deeptab/base_models/modern_nca.py index d6790b3d..c1d24456 100644 --- a/deeptab/base_models/modern_nca.py +++ b/deeptab/base_models/modern_nca.py @@ -1,12 +1,13 @@ +import numpy as np import torch import torch.nn as nn import torch.nn.functional as F -import numpy as np -from ..utils.get_feature_dimensions import get_feature_dimensions + from ..arch_utils.get_norm_fn import get_normalization_layer from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer from ..arch_utils.mlp_utils import MLPhead from ..configs.modernnca_config import DefaultModernNCAConfig +from ..utils.get_feature_dimensions import get_feature_dimensions from .utils.basemodel import BaseModel @@ -31,10 +32,8 @@ def __init__( *feature_information, config=config, ) - - input_dim = np.sum( - [len(info) * self.hparams.d_model for info in feature_information] - ) + + input_dim = np.sum([len(info) * self.hparams.d_model for info in feature_information]) else: input_dim = get_feature_dimensions(*feature_information) @@ -86,11 +85,8 @@ def train_with_candidates(self, *data, targets, candidate_x, candidate_y): B, S, D = candidate_x.shape candidate_x = candidate_x.reshape(B, S * D) else: - x = torch.cat([t for tensors in data for t in tensors], dim=1) - candidate_x = torch.cat( - [t for tensors in candidate_x for t in tensors], dim=1 - ) + candidate_x = torch.cat([t for tensors in candidate_x for t in tensors], dim=1) # Encode input x = self.encoder(x) @@ -113,9 +109,7 @@ def train_with_candidates(self, *data, targets, candidate_x, candidate_y): # One-hot encode if classification if self.hparams.num_classes > 1: - candidate_y = F.one_hot( - candidate_y, num_classes=self.hparams.num_classes - ).to(x.dtype) + candidate_y = F.one_hot(candidate_y, num_classes=self.hparams.num_classes).to(x.dtype) elif len(candidate_y.shape) == 1: candidate_y = candidate_y.unsqueeze(-1) @@ -142,9 +136,7 @@ def validate_with_candidates(self, *data, candidate_x, candidate_y): candidate_x = candidate_x.reshape(B, S * D) else: x = torch.cat([t for tensors in data for t in tensors], dim=1) - candidate_x = torch.cat( - [t for tensors in candidate_x for t in tensors], dim=1 - ) + candidate_x = torch.cat([t for tensors in candidate_x for t in tensors], dim=1) # Encode input x = self.encoder(x) @@ -156,9 +148,7 @@ def validate_with_candidates(self, *data, candidate_x, candidate_y): # One-hot encode if classification if self.hparams.num_classes > 1: - candidate_y = F.one_hot( - candidate_y, num_classes=self.hparams.num_classes - ).to(x.dtype) + candidate_y = F.one_hot(candidate_y, num_classes=self.hparams.num_classes).to(x.dtype) elif len(candidate_y.shape) == 1: candidate_y = candidate_y.unsqueeze(-1) @@ -185,9 +175,7 @@ def predict_with_candidates(self, *data, candidate_x, candidate_y): candidate_x = candidate_x.reshape(B, S * D) else: x = torch.cat([t for tensors in data for t in tensors], dim=1) - candidate_x = torch.cat( - [t for tensors in candidate_x for t in tensors], dim=1 - ) + candidate_x = torch.cat([t for tensors in candidate_x for t in tensors], dim=1) # Encode input x = self.encoder(x) @@ -199,9 +187,7 @@ def predict_with_candidates(self, *data, candidate_x, candidate_y): # One-hot encode if classification if self.hparams.num_classes > 1: - candidate_y = F.one_hot( - candidate_y, num_classes=self.hparams.num_classes - ).to(x.dtype) + candidate_y = F.one_hot(candidate_y, num_classes=self.hparams.num_classes).to(x.dtype) elif len(candidate_y.shape) == 1: candidate_y = candidate_y.unsqueeze(-1) diff --git a/deeptab/base_models/ndtf.py b/deeptab/base_models/ndtf.py index 7707e976..483b19e3 100644 --- a/deeptab/base_models/ndtf.py +++ b/deeptab/base_models/ndtf.py @@ -75,13 +75,10 @@ def __init__( [ NeuralDecisionTree( input_dim=self.input_dimensions[idx], - depth=np.random.randint( - self.hparams.min_depth, self.hparams.max_depth - ), + depth=np.random.randint(self.hparams.min_depth, self.hparams.max_depth), output_dim=num_classes, lamda=self.hparams.lamda, - temperature=self.hparams.temperature - + np.abs(np.random.normal(0, 0.1)), + temperature=self.hparams.temperature + np.abs(np.random.normal(0, 0.1)), node_sampling=self.hparams.node_sampling, ) for idx in range(self.hparams.n_ensembles) diff --git a/deeptab/base_models/node.py b/deeptab/base_models/node.py index d6bd5544..2b114254 100644 --- a/deeptab/base_models/node.py +++ b/deeptab/base_models/node.py @@ -1,3 +1,4 @@ +import numpy as np import torch from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer @@ -6,7 +7,6 @@ from ..configs.node_config import DefaultNODEConfig from ..utils.get_feature_dimensions import get_feature_dimensions from .utils.basemodel import BaseModel -import numpy as np class NODE(BaseModel): @@ -68,9 +68,7 @@ def __init__( *feature_information, config=config, ) - input_dim = np.sum( - [len(info) * self.hparams.d_model for info in feature_information] - ) + input_dim = np.sum([len(info) * self.hparams.d_model for info in feature_information]) else: input_dim = get_feature_dimensions(*feature_information) diff --git a/deeptab/base_models/resnet.py b/deeptab/base_models/resnet.py index 7a1c72e2..a80fd94e 100644 --- a/deeptab/base_models/resnet.py +++ b/deeptab/base_models/resnet.py @@ -1,6 +1,7 @@ +import numpy as np import torch import torch.nn as nn -import numpy as np + from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer from ..arch_utils.resnet_utils import ResidualBlock from ..configs.resnet_config import DefaultResNetConfig @@ -71,9 +72,7 @@ def __init__( *feature_information, config=config, ) - input_dim = np.sum( - [len(info) * self.hparams.d_model for info in feature_information] - ) + input_dim = np.sum([len(info) * self.hparams.d_model for info in feature_information]) else: input_dim = get_feature_dimensions(*feature_information) diff --git a/deeptab/base_models/saint.py b/deeptab/base_models/saint.py index b2818755..875c3829 100644 --- a/deeptab/base_models/saint.py +++ b/deeptab/base_models/saint.py @@ -1,10 +1,11 @@ +import numpy as np + from ..arch_utils.get_norm_fn import get_normalization_layer from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer from ..arch_utils.mlp_utils import MLPhead from ..arch_utils.transformer_utils import RowColTransformer from ..configs.saint_config import DefaultSAINTConfig from .utils.basemodel import BaseModel -import numpy as np class SAINT(BaseModel): diff --git a/deeptab/base_models/tabm.py b/deeptab/base_models/tabm.py index 3d05ae2b..aa42c58a 100644 --- a/deeptab/base_models/tabm.py +++ b/deeptab/base_models/tabm.py @@ -1,6 +1,7 @@ +import numpy as np import torch import torch.nn as nn -import numpy as np + from ..arch_utils.get_norm_fn import get_normalization_layer from ..arch_utils.layer_utils.batch_ensemble_layer import LinearBatchEnsembleLayer from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer @@ -11,7 +12,6 @@ class TabM(BaseModel): - def __init__( self, feature_information: tuple, # Expecting (num_feature_info, cat_feature_info, embedding_feature_info) @@ -42,9 +42,7 @@ def __init__( if self.hparams.average_embeddings: input_dim = self.hparams.d_model else: - input_dim = np.sum( - [len(info) * self.hparams.d_model for info in feature_information] - ) + input_dim = np.sum([len(info) * self.hparams.d_model for info in feature_information]) else: input_dim = get_feature_dimensions(*feature_information) @@ -72,11 +70,7 @@ def __init__( if self.hparams.use_glu: self.layers.append(nn.GLU()) else: - self.layers.append( - self.hparams.activation - if hasattr(self.hparams, "activation") - else nn.SELU() - ) + self.layers.append(self.hparams.activation if hasattr(self.hparams, "activation") else nn.SELU()) if self.hparams.dropout > 0.0: self.layers.append(nn.Dropout(self.hparams.dropout)) @@ -110,11 +104,7 @@ def __init__( if self.hparams.use_glu: self.layers.append(nn.GLU()) else: - self.layers.append( - self.hparams.activation - if hasattr(self.hparams, "activation") - else nn.SELU() - ) + self.layers.append(self.hparams.activation if hasattr(self.hparams, "activation") else nn.SELU()) if self.hparams.dropout > 0.0: self.layers.append(nn.Dropout(self.hparams.dropout)) @@ -159,11 +149,7 @@ def forward(self, *data) -> torch.Tensor: if isinstance(self.layers[i], LinearBatchEnsembleLayer): out = self.layers[i](x) # `out` shape is expected to be (batch_size, ensemble_size, out_features) - if ( - hasattr(self, "skip_connections") - and self.skip_connections - and x.shape == out.shape - ): + if hasattr(self, "skip_connections") and self.skip_connections and x.shape == out.shape: x = x + out else: x = out diff --git a/deeptab/base_models/tabr.py b/deeptab/base_models/tabr.py index b603cab5..4f327cab 100644 --- a/deeptab/base_models/tabr.py +++ b/deeptab/base_models/tabr.py @@ -1,13 +1,16 @@ +import math + +import numpy as np import torch import torch.nn as nn import torch.nn.functional as F -import numpy as np -from ..utils.get_feature_dimensions import get_feature_dimensions +from torch import Tensor + from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer from ..configs.tabr_config import DefaultTabRConfig +from ..utils.get_feature_dimensions import get_feature_dimensions from .utils.basemodel import BaseModel -from torch import Tensor -import math + class TabR(BaseModel): delu = None @@ -24,8 +27,8 @@ def __init__( super().__init__(config=config, **kwargs) self.save_hyperparameters(ignore=["feature_information"]) - # lazy import - if TabR.delu or TabR.faiss or TabR.faiss_torch_utils is None: + # lazy import + if TabR.delu or TabR.faiss or TabR.faiss_torch_utils is None: self._lazy_import_dependencies() self.returns_ensemble = False @@ -37,9 +40,7 @@ def __init__( config=config, ) print(self.embedding_layer) - input_dim = np.sum( - [len(info) * self.hparams.d_model for info in feature_information] - ) + input_dim = np.sum([len(info) * self.hparams.d_model for info in feature_information]) else: input_dim = get_feature_dimensions(*feature_information) @@ -59,12 +60,12 @@ def __init__( context_dropout = self.hparams.context_dropout if memory_efficient: - assert self.candidate_encoding_batch_size !=0 + assert self.candidate_encoding_batch_size != 0 # noqa: S101 - if mixer_normalization == 'auto': + if mixer_normalization == "auto": mixer_normalization = encoder_n_blocks > 0 if encoder_n_blocks == 0: - assert not mixer_normalization + assert not mixer_normalization # noqa: S101 # Encoder Module: E d_in = input_dim @@ -73,7 +74,6 @@ def __init__( self.linear = nn.Linear(d_in, d_main) self.context_size = self.hparams.context_size - def make_block(prenorm: bool) -> nn.Sequential: return nn.Sequential( *([Normalization(d_main)] if prenorm else []), @@ -83,12 +83,10 @@ def make_block(prenorm: bool) -> nn.Sequential: nn.Linear(d_block, d_main), nn.Dropout(dropout1), ) - + # here in the TabR paper, for first block of Encoder(E), # LayerNorm is omitted. In code, we omitted Normalization. - self.blocks0 = nn.ModuleList( - [make_block(i > 0) for i in range(encoder_n_blocks)] - ) + self.blocks0 = nn.ModuleList([make_block(i > 0) for i in range(encoder_n_blocks)]) # Retrieval Module: R self.normalization = Normalization(d_main) if mixer_normalization else None @@ -98,24 +96,24 @@ def make_block(prenorm: bool) -> nn.Sequential: nn.Linear(1, d_main) if num_classes == 1 else nn.Sequential( - nn.Embedding(num_classes, d_main), - # gives depreciation warning - delu.nn.Lambda(lambda x: x.squeeze(-2)) # Removes the unnecessary extra dimension added by the embedding layer + nn.Embedding(num_classes, d_main), + # gives depreciation warning + delu.nn.Lambda( # type: ignore[union-attr] + lambda x: x.squeeze(-2) + ), # Removes the unnecessary extra dimension added by the embedding layer ) ) - self.K = nn.Linear(d_main, d_main) # W_k in paper + self.K = nn.Linear(d_main, d_main) # W_k in paper self.T = nn.Sequential( nn.Linear(d_main, d_block), activation, nn.Dropout(dropout0), nn.Linear(d_block, d_main, bias=False), - ) # T for T(k-k_i) form the TabR paper. + ) # T for T(k-k_i) form the TabR paper. self.dropout = nn.Dropout(context_dropout) # Predictor Module : P - self.blocks1 = nn.ModuleList( - [make_block(True) for _ in range(predictor_n_blocks)] - ) + self.blocks1 = nn.ModuleList([make_block(True) for _ in range(predictor_n_blocks)]) self.head = nn.Sequential( Normalization(d_main), activation, @@ -126,45 +124,47 @@ def make_block(prenorm: bool) -> nn.Sequential: self.search_index = None self.memory_efficient = memory_efficient self.reset_parameters() - + def reset_parameters(self): - if isinstance(self.label_encoder, nn.Linear): # if num_classes==1 - bound = 1 / math.sqrt(2.0) # He initialization (common for layers with ReLU activation) - nn.init.uniform_(self.label_encoder.weight, -bound, bound) # type: ignore[code] # noqa: E501 - nn.init.uniform_(self.label_encoder.bias, -bound, bound) # type: ignore[code] # noqa: E501 - else: - assert isinstance(self.label_encoder[0], nn.Embedding) - nn.init.uniform_(self.label_encoder[0].weight, -1.0, 1.0) # type: ignore[code] # noqa: E501 + if isinstance(self.label_encoder, nn.Linear): # if num_classes==1 + bound = 1 / math.sqrt(2.0) # He initialization (common for layers with ReLU activation) + nn.init.uniform_(self.label_encoder.weight, -bound, bound) # type: ignore[code] + nn.init.uniform_(self.label_encoder.bias, -bound, bound) # type: ignore[code] + else: + assert isinstance(self.label_encoder[0], nn.Embedding) # noqa: S101 + nn.init.uniform_(self.label_encoder[0].weight, -1.0, 1.0) # type: ignore[code] def _lazy_import_dependencies(self): """Lazily import external dependencies and store them as class attributes.""" - if TabR.delu is None: + if TabR.delu is None: try: - import delu + import delu # type: ignore[import-untyped] + TabR.delu = delu print("Successfully lazy imported delu dependency.") except ImportError: - raise ImportError("Failed to import delu module for TabR. Ensure all dependencies are installed\n" - "You can install delu running 'pip install delu'.") from None - - if TabR.faiss is None: - try: - import faiss - import faiss.contrib.torch_utils - + raise ImportError( + "Failed to import delu module for TabR. Ensure all dependencies are installed\n" + "You can install delu running 'pip install delu'." + ) from None + + if TabR.faiss is None: + try: + import faiss # type: ignore[import-untyped] + import faiss.contrib.torch_utils # type: ignore[import-untyped] + TabR.faiss = faiss TabR.faiss_torch_utils = faiss.contrib.torch_utils print("Successfully lazy imported faiss dependency") except ImportError as e: - raise ImportError("Failed to import faiss module for TabR. Ensure all dependencies are installed\n" - "You can install faiss running 'pip install faiss-cpu' for CPU and 'pip install faiss-gpu' for GPU.") from None + raise ImportError( + "Failed to import faiss module for TabR. Ensure all dependencies are installed\n" + "You can install faiss running 'pip install faiss-cpu' for CPU and 'pip install faiss-gpu' for GPU." + ) from None - def _encode( - self, - a - ): + def _encode(self, a): # x = x.double() # issue x = a.float() # x=a.clone().detach().requires_grad_(True) @@ -173,13 +173,10 @@ def _encode( for block in self.blocks0: x = x + block(x) k = self.K(x if self.normalization is None else self.normalization(x)) - + return x, k - def forward( - self, - *data - ): + def forward(self, *data): """ Standard forward pass without candidate selection (for baseline compatibility). """ @@ -189,8 +186,8 @@ def forward( x = x.reshape(B, S * D) else: x = torch.cat([t for tensors in data for t in tensors], dim=1) - x,k = self._encode(x) - context_k = k.unsqueeze(1).expand(-1, self.context_size, -1) # using the batch itself as context + x, k = self._encode(x) + context_k = k.unsqueeze(1).expand(-1, self.context_size, -1) # using the batch itself as context similarities = ( -k.square().sum(-1, keepdim=True) + (2 * (k[..., None, :] @ context_k.transpose(-1, -2))).squeeze(-2) @@ -203,15 +200,9 @@ def forward( x = x + block(x + t) return self.head(x) - def train_with_candidates( - self, - *data, - targets, - candidate_x, - candidate_y - ): + def train_with_candidates(self, *data, targets, candidate_x, candidate_y): """TabR-style training forward pass selecting candidates.""" - assert targets is not None + assert targets is not None # noqa: S101 if self.hparams.use_embeddings: x = self.embedding_layer(*data) @@ -221,27 +212,18 @@ def train_with_candidates( B, S, D = candidate_x.shape candidate_x = candidate_x.reshape(B, S * D) else: - x = torch.cat([t for tensors in data for t in tensors], dim=1) - candidate_x = torch.cat( - [t for tensors in candidate_x for t in tensors], dim=1 - ) - - with torch.set_grad_enabled( - torch.is_grad_enabled() and not self.memory_efficient - ): - + candidate_x = torch.cat([t for tensors in candidate_x for t in tensors], dim=1) + + with torch.set_grad_enabled(torch.is_grad_enabled() and not self.memory_efficient): candidate_k = ( - self._encode(candidate_x)[1] # normalized candidate_x - if self.candidate_encoding_batch_size ==0 + self._encode(candidate_x)[1] # normalized candidate_x + if self.candidate_encoding_batch_size == 0 else torch.cat( [ - self._encode(x)[1] # normalized x + self._encode(x)[1] # normalized x # for x in delu.iter_batches( - for x in TabR.delu.iter_batches( - candidate_x, - self.candidate_encoding_batch_size - ) + for x in TabR.delu.iter_batches(candidate_x, self.candidate_encoding_batch_size) # type: ignore[union-attr] ] ) ) @@ -257,12 +239,9 @@ def train_with_candidates( # initializing the search index if self.search_index is None: self.search_index = ( - TabR.faiss.GpuIndexFlatL2( - TabR.faiss.StandardGpuResources(), - d_main - ) - if device.type == 'cuda' - else TabR.faiss.IndexFlatL2(d_main) + TabR.faiss.GpuIndexFlatL2(TabR.faiss.StandardGpuResources(), d_main) # type: ignore[union-attr] + if device.type == "cuda" + else TabR.faiss.IndexFlatL2(d_main) # type: ignore[union-attr] ) # Updating the index is much faster than creating a new one. self.search_index.reset() @@ -274,18 +253,16 @@ def train_with_candidates( ) # NOTE: to avoid leakage, the index i must be removed from the i-th row, # (because of how candidate_k is constructed). - distances[ - context_idx == torch.arange(batch_size, device=device)[:, None] - ] = torch.inf + distances[context_idx == torch.arange(batch_size, device=device)[:, None]] = torch.inf # Not the most elegant solution to remove the argmax, but anyway. context_idx = context_idx.gather(-1, distances.argsort()[:, :-1]) if self.memory_efficient and torch.is_grad_enabled(): # Repeating the same computation, # but now only for the context objects and with autograd on. - context_k = self._encode( - torch.cat([x,candidate_x])[context_idx].flatten(0,1) - )[1].reshape(batch_size, context_size, -1) + context_k = self._encode(torch.cat([x, candidate_x])[context_idx].flatten(0, 1))[1].reshape( + batch_size, context_size, -1 + ) else: context_k = candidate_k[context_idx] @@ -300,31 +277,26 @@ def train_with_candidates( ) probs = F.softmax(similarities, dim=-1) probs = self.dropout(probs) - - if self.hparams.num_classes > 1: # for classification + + if self.hparams.num_classes > 1: # for classification context_y_emb = self.label_encoder(candidate_y[context_idx][..., None].long()) - else: # for regression + else: # for regression context_y_emb = self.label_encoder(candidate_y[context_idx][..., None]) if len(context_y_emb.shape) == 4: - context_y_emb = context_y_emb[:,:,0,:] + context_y_emb = context_y_emb[:, :, 0, :] # Combine keys and labels with a transformation T. values = context_y_emb + self.T(k[:, None] - context_k) context_x = (probs[:, None] @ values).squeeze(1) x = x + context_x - # Predictor has LayerNorm, ReLU and Linear after the N_P number of blocks. + # Predictor has LayerNorm, ReLU and Linear after the N_P number of blocks. for block in self.blocks1: x = x + block(x) x = self.head(x) return x - def validate_with_candidates( - self, - *data, - candidate_x, - candidate_y - ): + def validate_with_candidates(self, *data, candidate_x, candidate_y): """Validation forward pass with TabR-style candidate selection.""" if self.hparams.use_embeddings: x = self.embedding_layer(*data) @@ -335,39 +307,34 @@ def validate_with_candidates( candidate_x = candidate_x.reshape(B, S * D) else: x = torch.cat([t for tensors in data for t in tensors], dim=1) - candidate_x = torch.cat( - [t for tensors in candidate_x for t in tensors], dim=1 - ) + candidate_x = torch.cat([t for tensors in candidate_x for t in tensors], dim=1) if not self.memory_efficient: candidate_k = ( - self._encode(candidate_x)[1] # normalized candidate_x + self._encode(candidate_x)[1] # normalized candidate_x if self.candidate_encoding_batch_size == 0 else torch.cat( [ - self._encode(x)[1] # normalized x - for x in TabR.delu.iter_batches( - candidate_x, - self.candidate_encoding_batch_size - ) + self._encode(x)[1] # normalized x + for x in TabR.delu.iter_batches(candidate_x, self.candidate_encoding_batch_size) # type: ignore[union-attr] ] ) ) - else: - candidate_x, candidate_k = self._encode(candidate_x) + else: + candidate_x, candidate_k = self._encode(candidate_x) - x, k = self._encode(x) # encoded x and k + x, k = self._encode(x) # encoded x and k batch_size, d_main = k.shape device = k.device context_size = self.context_size if self.search_index is None: self.search_index = ( - TabR.faiss.GpuIndexFlatL2(TabR.faiss.StandardGpuResources(), d_main) - if device.type == 'cuda' - else TabR.faiss.IndexFlatL2(d_main) + TabR.faiss.GpuIndexFlatL2(TabR.faiss.StandardGpuResources(), d_main) # type: ignore[union-attr] + if device.type == "cuda" + else TabR.faiss.IndexFlatL2(d_main) # type: ignore[union-attr] ) - + # Updating the index is much faster than creating a new one. self.search_index.reset() self.search_index.add(candidate_k.to(torch.float32)) # type: ignore[code] @@ -382,16 +349,16 @@ def validate_with_candidates( -k.square().sum(-1, keepdim=True) + (2 * (k[..., None, :] @ context_k.transpose(-1, -2))).squeeze(-2) - context_k.square().sum(-1) - ) + ) probs = F.softmax(similarities, dim=-1) probs = self.dropout(probs) - - if self.hparams.num_classes > 1: # for classification + + if self.hparams.num_classes > 1: # for classification context_y_emb = self.label_encoder(candidate_y[context_idx][..., None].long()) - else: # for regression + else: # for regression context_y_emb = self.label_encoder(candidate_y[context_idx][..., None]) if len(context_y_emb.shape) == 4: - context_y_emb = context_y_emb[:,:,0,:] + context_y_emb = context_y_emb[:, :, 0, :] values = context_y_emb + self.T(k[:, None] - context_k) context_x = (probs[:, None] @ values).squeeze(1) @@ -403,13 +370,7 @@ def validate_with_candidates( x = self.head(x) return x - - def predict_with_candidates( - self, - *data, - candidate_x, - candidate_y - ): + def predict_with_candidates(self, *data, candidate_x, candidate_y): """Prediction forward pass with TabR-style candidate selection.""" if self.hparams.use_embeddings: x = self.embedding_layer(*data) @@ -420,40 +381,34 @@ def predict_with_candidates( candidate_x = candidate_x.reshape(B, S * D) else: x = torch.cat([t for tensors in data for t in tensors], dim=1) - candidate_x = torch.cat( - [t for tensors in candidate_x for t in tensors], dim=1 - ) + candidate_x = torch.cat([t for tensors in candidate_x for t in tensors], dim=1) if not self.memory_efficient: candidate_k = ( - self._encode(candidate_x)[1] # normalized candidate_x + self._encode(candidate_x)[1] # normalized candidate_x if self.candidate_encoding_batch_size == 0 else torch.cat( [ - self._encode(x)[1] # normalized x - for x in TabR.delu.iter_batches( - candidate_x, - self.candidate_encoding_batch_size - ) + self._encode(x)[1] # normalized x + for x in TabR.delu.iter_batches(candidate_x, self.candidate_encoding_batch_size) # type: ignore[union-attr] ] ) ) - else: - candidate_x, candidate_k = self._encode(candidate_x) + else: + candidate_x, candidate_k = self._encode(candidate_x) - x, k = self._encode(x) # encoded x and k + x, k = self._encode(x) # encoded x and k batch_size, d_main = k.shape device = k.device context_size = self.context_size if self.search_index is None: self.search_index = ( - TabR.faiss.GpuIndexFlatL2(TabR.faiss.StandardGpuResources(), d_main) - if device.type == 'cuda' - else TabR.faiss.IndexFlatL2(d_main) + TabR.faiss.GpuIndexFlatL2(TabR.faiss.StandardGpuResources(), d_main) # type: ignore[union-attr] + if device.type == "cuda" + else TabR.faiss.IndexFlatL2(d_main) # type: ignore[union-attr] ) - # Updating the index is much faster than creating a new one. self.search_index.reset() self.search_index.add(candidate_k.to(torch.float32)) # type: ignore[code] @@ -468,16 +423,16 @@ def predict_with_candidates( -k.square().sum(-1, keepdim=True) + (2 * (k[..., None, :] @ context_k.transpose(-1, -2))).squeeze(-2) - context_k.square().sum(-1) - ) + ) probs = F.softmax(similarities, dim=-1) probs = self.dropout(probs) - - if self.hparams.num_classes > 1: # for classification + + if self.hparams.num_classes > 1: # for classification context_y_emb = self.label_encoder(candidate_y[context_idx][..., None].long()) - else: # for regression + else: # for regression context_y_emb = self.label_encoder(candidate_y[context_idx][..., None]) if len(context_y_emb.shape) == 4: - context_y_emb = context_y_emb[:,:,0,:] + context_y_emb = context_y_emb[:, :, 0, :] values = context_y_emb + self.T(k[:, None] - context_k) context_x = (probs[:, None] @ values).squeeze(1) diff --git a/deeptab/base_models/tabtransformer.py b/deeptab/base_models/tabtransformer.py index c7120e68..53e7ac2b 100644 --- a/deeptab/base_models/tabtransformer.py +++ b/deeptab/base_models/tabtransformer.py @@ -1,6 +1,7 @@ +import numpy as np import torch import torch.nn as nn -import numpy as np + from ..arch_utils.get_norm_fn import get_normalization_layer from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer from ..arch_utils.mlp_utils import MLPhead diff --git a/deeptab/base_models/tabularnn.py b/deeptab/base_models/tabularnn.py index 893acbab..e151866e 100644 --- a/deeptab/base_models/tabularnn.py +++ b/deeptab/base_models/tabularnn.py @@ -1,4 +1,5 @@ from dataclasses import replace + import torch import torch.nn as nn @@ -11,7 +12,6 @@ class TabulaRNN(BaseModel): - def __init__( self, feature_information: tuple, # Expecting (num_feature_info, cat_feature_info, embedding_feature_info) diff --git a/deeptab/base_models/tangos.py b/deeptab/base_models/tangos.py index d99c41c8..57e4c011 100644 --- a/deeptab/base_models/tangos.py +++ b/deeptab/base_models/tangos.py @@ -1,6 +1,7 @@ +import numpy as np import torch import torch.nn as nn -import numpy as np + from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer from ..configs.tangos_config import DefaultTangosConfig from ..utils.get_feature_dimensions import get_feature_dimensions @@ -9,7 +10,7 @@ class Tangos(BaseModel): """ - A Multi-Layer Perceptron (MLP) model with optional GLU activation, batch normalization, layer normalization, and dropout. + A Multi-Layer Perceptron (MLP) model with optional GLU activation, batch normalization, layer normalization, and dropout. # noqa: W505 It includes a penalty term for specialization and orthogonality. Parameters @@ -40,12 +41,13 @@ class Tangos(BaseModel): head : nn.Linear The final output layer. """ + def __init__( self, feature_information: tuple, num_classes=1, - config: DefaultTangosConfig = DefaultTangosConfig(), - **kwargs + config: DefaultTangosConfig = DefaultTangosConfig(), # noqa: B008 + **kwargs, ): super().__init__(config=config, **kwargs) self.save_hyperparameters(ignore=["feature_information"]) @@ -74,9 +76,7 @@ def __init__( # Hidden layers for i in range(1, len(self.hparams.layer_sizes)): - self.layers.append( - nn.Linear(self.hparams.layer_sizes[i - 1], self.hparams.layer_sizes[i]) - ) + self.layers.append(nn.Linear(self.hparams.layer_sizes[i - 1], self.hparams.layer_sizes[i])) if self.hparams.batch_norm: self.layers.append(nn.BatchNorm1d(self.hparams.layer_sizes[i])) if self.hparams.layer_norm: @@ -95,7 +95,7 @@ def repr_forward(self, x) -> torch.Tensor: """ Computes the forward pass for feature representations. - This method processes the input through the MLP layers, optionally using + This method processes the input through the MLP layers, optionally using skip connections. Parameters @@ -180,10 +180,10 @@ def penalty_forward(self, *data): x = torch.cat([t for tensors in data for t in tensors], dim=1) batch_size = x.shape[0] - subsample = np.int32(self.subsample*batch_size) + subsample = np.int32(self.subsample * batch_size) # Flatten before passing to jacrev - flat_data = torch.cat([t for tensors in data for t in tensors], dim=1) + flat_data = torch.cat([t for tensors in data for t in tensors], dim=1) # Compute Jacobian jacobian = torch.func.vmap(torch.func.jacrev(self.repr_forward), randomness="different")(flat_data) @@ -201,23 +201,16 @@ def penalty_forward(self, *data): orth_loss = torch.tensor(0.0, requires_grad=True).to(x.device) # apply subsampling routine for orthogonalization loss if self.subsample > 0 and self.subsample < h_dim * (h_dim - 1) / 2: - tensor_pairs = [ - list(np.random.choice(h_dim, size=(2), replace=False)) - for i in range(subsample) - ] + tensor_pairs = [list(np.random.choice(h_dim, size=(2), replace=False)) for i in range(subsample)] for tensor_pair in tensor_pairs: - pairwise_corr = cos( - neuron_attr[tensor_pair[0], :, :], neuron_attr[tensor_pair[1], :, :] - ).norm(p=1) + pairwise_corr = cos(neuron_attr[tensor_pair[0], :, :], neuron_attr[tensor_pair[1], :, :]).norm(p=1) orth_loss = orth_loss + pairwise_corr orth_loss = orth_loss / (batch_size * self.subsample) else: for neuron_i in range(1, h_dim): for neuron_j in range(0, neuron_i): - pairwise_corr = cos( - neuron_attr[neuron_i, :, :], neuron_attr[neuron_j, :, :] - ).norm(p=1) + pairwise_corr = cos(neuron_attr[neuron_i, :, :], neuron_attr[neuron_j, :, :]).norm(p=1) orth_loss = orth_loss + pairwise_corr num_pairs = h_dim * (h_dim - 1) / 2 orth_loss = orth_loss / (batch_size * num_pairs) diff --git a/deeptab/base_models/trompt.py b/deeptab/base_models/trompt.py index 66ad4c4a..689b672d 100644 --- a/deeptab/base_models/trompt.py +++ b/deeptab/base_models/trompt.py @@ -1,15 +1,15 @@ -import torch.nn as nn +import numpy as np import torch +import torch.nn as nn + from ..arch_utils.get_norm_fn import get_normalization_layer from ..arch_utils.layer_utils.embedding_layer import EmbeddingLayer +from ..arch_utils.trompt_utils import TromptCell, TromptDecoder from ..configs.trompt_config import DefaultTromptConfig from .utils.basemodel import BaseModel -from ..arch_utils.trompt_utils import TromptCell, TromptDecoder -import numpy as np class Trompt(BaseModel): - def __init__( self, feature_information: tuple, # Expecting (num_feature_info, cat_feature_info, embedding_feature_info) @@ -22,9 +22,7 @@ def __init__( self.returns_ensemble = True # embedding layer - self.cells = nn.ModuleList( - TromptCell(feature_information, config) for _ in range(config.n_cycles) - ) + self.cells = nn.ModuleList(TromptCell(feature_information, config) for _ in range(config.n_cycles)) self.decoder = TromptDecoder(config.d_model, num_classes) self.init_rec = nn.Parameter(torch.empty(config.P, config.d_model)) self.n_cycles = config.n_cycles @@ -42,11 +40,11 @@ def forward(self, *data): Tensor The output predictions of the model. """ - O = self.init_rec.unsqueeze(0).repeat(data[0][0].shape[0], 1, 1) + O = self.init_rec.unsqueeze(0).repeat(data[0][0].shape[0], 1, 1) # noqa: E741 outputs = [] for i in range(self.n_cycles): - O = self.cells[i](*data, O=O) + O = self.cells[i](*data, O=O) # noqa: E741 # print(O.shape) # print(self.tdown(O).shape) outputs.append(self.decoder(O)) diff --git a/deeptab/base_models/utils/basemodel.py b/deeptab/base_models/utils/basemodel.py index 53824fcc..6837b73f 100644 --- a/deeptab/base_models/utils/basemodel.py +++ b/deeptab/base_models/utils/basemodel.py @@ -33,11 +33,7 @@ def save_hyperparameters(self, ignore=[]): List of keys to ignore while saving hyperparameters, by default []. """ # Filter the config and extra hparams for ignored keys - config_hparams = ( - {k: v for k, v in vars(self.config).items() if k not in ignore} - if self.config - else {} - ) + config_hparams = {k: v for k, v in vars(self.config).items() if k not in ignore} if self.config else {} extra_hparams = {k: v for k, v in self.extra_hparams.items() if k not in ignore} config_hparams.update(extra_hparams) @@ -152,9 +148,7 @@ def initialize_pooling_layers(self, config, n_inputs): """Initializes the layers needed for learnable pooling methods based on self.hparams.pooling_method.""" if self.hparams.pooling_method == "learned_flatten": # Flattening + Linear layer - self.learned_flatten_pooling = nn.Linear( - n_inputs * config.dim_feedforward, config.dim_feedforward - ) + self.learned_flatten_pooling = nn.Linear(n_inputs * config.dim_feedforward, config.dim_feedforward) elif self.hparams.pooling_method == "attention": # Attention-based pooling with learnable attention weights @@ -229,9 +223,7 @@ def encode(self, data, grad=False): # Check if at least one of the contextualized embedding methods exists valid_layers = ["mamba", "rnn", "lstm", "encoder"] - available_layer = next( - (attr for attr in valid_layers if hasattr(self, attr)), None - ) + available_layer = next((attr for attr in valid_layers if hasattr(self, attr)), None) if not available_layer: raise ValueError("The model does not generate contextualized embeddings") @@ -239,33 +231,31 @@ def encode(self, data, grad=False): # Get the actual layer and call it if not grad: with torch.no_grad(): - # Get the actual layer and call it - x = self.embedding_layer(*data) - + x = self.embedding_layer(*data) # type: ignore[reportCallIssue] if getattr(self.hparams, "shuffle_embeddings", False): x = x[:, self.perm, :] layer = getattr(self, available_layer) if available_layer == "rnn": - embeddings, _ = layer(x) + embeddings, _ = layer(x) # type: ignore[reportCallIssue] else: - embeddings = self.encoder(x) - embeddings = layer(x) + embeddings = self.encoder(x) # type: ignore[reportCallIssue] + embeddings = layer(x) # type: ignore[reportCallIssue] else: - x = self.embedding_layer(*data) + x = self.embedding_layer(*data) # type: ignore[reportCallIssue] if getattr(self.hparams, "shuffle_embeddings", False): x = x[:, self.perm, :] layer = getattr(self, available_layer) if available_layer == "rnn": - embeddings, _ = layer(x) + embeddings, _ = layer(x) # type: ignore[reportCallIssue] else: - embeddings = layer(x) + embeddings = layer(x) # type: ignore[reportCallIssue] return embeddings - + def embedding_parameters(self): """Returns only embedding parameters for pretraining.""" return (p for name, p in self.named_parameters() if "embedding" in name) @@ -273,12 +263,11 @@ def embedding_parameters(self): def encode_features(self, num_features, cat_features, embeddings): """Encodes features using embeddings, returning their representations.""" return self.forward(num_features, cat_features, embeddings, output_embeddings=True) - + def get_embedding_state_dict(self): """Returns only the state dict of the embeddings.""" return {k: v for k, v in self.state_dict().items() if "embedding" in k} - + def load_embedding_state_dict(self, state_dict): """Loads pretrained embeddings into the model.""" self.load_state_dict(state_dict, strict=False) - diff --git a/deeptab/base_models/utils/lightning_wrapper.py b/deeptab/base_models/utils/lightning_wrapper.py index f7eb2868..ae6125dc 100644 --- a/deeptab/base_models/utils/lightning_wrapper.py +++ b/deeptab/base_models/utils/lightning_wrapper.py @@ -103,9 +103,9 @@ def setup(self, stage=None): all_train_embeddings = [] all_train_targets = [] - device = self.device if hasattr(self, "device") else self.trainer.device + device = self.device if hasattr(self, "device") else self.trainer.device # type: ignore[attr-defined] - for batch in self.trainer.datamodule.train_dataloader(): + for batch in self.trainer.datamodule.train_dataloader(): # type: ignore[attr-defined] (num_features, cat_features, embeddings), labels = batch all_train_num.append([f.to(device) for f in num_features]) # Keep lists @@ -116,13 +116,10 @@ def setup(self, stage=None): # Maintain structure: each feature type remains a list of tensors self.train_features = ( - [torch.cat(features, dim=0) for features in zip(*all_train_num)], - [torch.cat(features, dim=0) for features in zip(*all_train_cat)], + [torch.cat(features, dim=0) for features in zip(*all_train_num, strict=False)], + [torch.cat(features, dim=0) for features in zip(*all_train_cat, strict=False)], ( - [ - torch.cat(features, dim=0) - for features in zip(*all_train_embeddings) - ] + [torch.cat(features, dim=0) for features in zip(*all_train_embeddings, strict=False)] if all_train_embeddings else None ), @@ -177,10 +174,7 @@ def compute_loss(self, predictions, y_true): ) if getattr(self.estimator, "returns_ensemble", False): # Ensemble case - if ( - self.loss_fct.__class__.__name__ == "CrossEntropyLoss" - and predictions.dim() == 3 - ): + if self.loss_fct.__class__.__name__ == "CrossEntropyLoss" and predictions.dim() == 3: # Classification case with ensemble: predictions (N, E, k), y_true (N,) N, E, k = predictions.shape loss = 0.0 @@ -225,10 +219,10 @@ def training_step(self, batch, batch_idx): # type: ignore # Check if the model has a `penalty_forward` method if hasattr(self.estimator, "penalty_forward"): - preds, penalty = self.estimator.penalty_forward(*data) + preds, penalty = self.estimator.penalty_forward(*data) # type: ignore[reportCallIssue] loss = self.compute_loss(preds, labels) + penalty elif hasattr(self.estimator, "train_with_candidates"): - preds = self.estimator.train_with_candidates( + preds = self.estimator.train_with_candidates( # type: ignore[reportCallIssue] *data, targets=labels, candidate_x=self.train_features, @@ -240,9 +234,7 @@ def training_step(self, batch, batch_idx): # type: ignore loss = self.compute_loss(preds, labels) # Log the training loss - self.log( - "train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True - ) + self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True) # Log custom training metrics for metric_name, metric_fn in self.train_metrics.items(): @@ -276,9 +268,8 @@ def validation_step(self, batch, batch_idx): # type: ignore data, labels = batch if hasattr(self.estimator, "validate_with_candidates") and self.train_features is not None: - preds = self.estimator.validate_with_candidates( - *data, - candidate_x=self.train_features, candidate_y=self.train_targets + preds = self.estimator.validate_with_candidates( # type: ignore[reportCallIssue] + *data, candidate_x=self.train_features, candidate_y=self.train_targets ) else: preds = self(*data) @@ -323,10 +314,10 @@ def test_step(self, batch, batch_idx): # type: ignore Test loss. """ data, labels = batch - if hasattr(self.estimator, 'predict_with_candidates') and self.train_features is not None: - preds = self.estimator.predict_with_candidates( + if hasattr(self.estimator, "predict_with_candidates") and self.train_features is not None: + preds = self.estimator.predict_with_candidates( # type: ignore[reportCallIssue] *data, candidates_x=self.train_features, candidates_y=self.train_targets - ) + ) else: preds = self(*data) test_loss = self.compute_loss(preds, labels) @@ -358,7 +349,7 @@ def predict_step(self, batch, batch_idx): Predictions. """ if hasattr(self.estimator, "predict_with_candidates") and self.train_features is not None: - preds = self.estimator.predict_with_candidates( + preds = self.estimator.predict_with_candidates( # type: ignore[reportCallIssue] *batch, candidate_x=self.train_features, candidate_y=self.train_targets, @@ -407,13 +398,8 @@ def on_validation_epoch_end(self): # Apply pruning logic if needed if self.current_epoch >= self.pruning_epoch: - if ( - self.early_pruning_threshold is not None - and val_loss_value > self.early_pruning_threshold - ): - print( - f"Pruned at epoch {self.current_epoch}, val_loss {val_loss_value}" - ) + if self.early_pruning_threshold is not None and val_loss_value > self.early_pruning_threshold: + print(f"Pruned at epoch {self.current_epoch}, val_loss {val_loss_value}") self.trainer.should_stop = True # Stop training early def epoch_val_loss_at(self, epoch): @@ -503,7 +489,7 @@ def pretrain_embeddings( print("🚀 Pretraining embeddings...") self.estimator.train() - optimizer = torch.optim.Adam(self.estimator.embedding_parameters(), lr=lr) + optimizer = torch.optim.Adam(self.estimator.embedding_parameters(), lr=lr) # type: ignore[reportCallIssue] # 🔥 Single tqdm progress bar across all epochs and batches total_batches = pretrain_epochs * len(train_dataloader) @@ -517,7 +503,7 @@ def pretrain_embeddings( optimizer.zero_grad() # Forward pass through embeddings only - embeddings = self.estimator.encode(data, grad=True) + embeddings = self.estimator.encode(data, grad=True) # type: ignore[reportCallIssue] # Compute nearest neighbors based on task type knn_indices = self.get_knn(labels, k_neighbors, regression) @@ -539,7 +525,7 @@ def pretrain_embeddings( progress_bar.close() # Save pretrained embeddings - torch.save(self.estimator.get_embedding_state_dict(), save_path) + torch.save(self.estimator.get_embedding_state_dict(), save_path) # type: ignore[reportCallIssue] print(f"✅ Embeddings saved to {save_path}") def get_knn(self, labels, k_neighbors=5, regression=True, device=""): @@ -564,22 +550,16 @@ def get_knn(self, labels, k_neighbors=5, regression=True, device=""): # Ensure k_neighbors doesn't exceed available samples k_neighbors = min(k_neighbors, batch_size - 1) - knn_indices = torch.zeros( - batch_size, k_neighbors, dtype=torch.long, device=labels.device - ) + knn_indices = torch.zeros(batch_size, k_neighbors, dtype=torch.long, device=labels.device) if not regression: # Classification: Find samples with the same class label for i in range(batch_size): same_class_indices = (labels == labels[i]).nonzero(as_tuple=True)[0] - same_class_indices = same_class_indices[ - same_class_indices != i - ] # Remove self-index + same_class_indices = same_class_indices[same_class_indices != i] # Remove self-index if len(same_class_indices) >= k_neighbors: - knn_indices[i] = same_class_indices[ - torch.randperm(len(same_class_indices))[:k_neighbors] - ] + knn_indices[i] = same_class_indices[torch.randperm(len(same_class_indices))[:k_neighbors]] else: knn_indices[i, : len(same_class_indices)] = same_class_indices knn_indices[i, len(same_class_indices) :] = same_class_indices[ @@ -592,13 +572,9 @@ def get_knn(self, labels, k_neighbors=5, regression=True, device=""): else: # Regression: Find nearest neighbors using Euclidean distance with torch.no_grad(): - target_distances = torch.cdist( - labels.float(), labels.float(), p=2 - ).squeeze(-1) + target_distances = torch.cdist(labels.float(), labels.float(), p=2).squeeze(-1) - knn_indices = target_distances.topk(k_neighbors + 1, largest=False).indices[ - :, 1: - ] # Exclude self + knn_indices = target_distances.topk(k_neighbors + 1, largest=False).indices[:, 1:] # Exclude self return knn_indices @@ -629,9 +605,7 @@ def contrastive_loss(self, embeddings, knn_indices, temperature=0.1): loss_fn = torch.nn.CosineEmbeddingLoss(margin=0.0, reduction="mean") for s in range(S): # Loop over sequence length - embeddings_s = embeddings[ - :, s, : - ] # Shape: (N, D) -> Single token per sample + embeddings_s = embeddings[:, s, :] # Shape: (N, D) -> Single token per sample # Gather nearest neighbor embeddings for this time step positive_pairs = torch.gather( @@ -641,19 +615,15 @@ def contrastive_loss(self, embeddings, knn_indices, temperature=0.1): ) # Shape: (N, k_neighbors, D) # Flatten batch and neighbors into a single batch dimension - embeddings_s = embeddings_s.repeat_interleave( - k_neighbors, dim=0 - ) # (N * k_neighbors, D) + embeddings_s = embeddings_s.repeat_interleave(k_neighbors, dim=0) # (N * k_neighbors, D) positive_pairs = positive_pairs.view(-1, D) # (N * k_neighbors, D) # Labels: +1 for positive similarity - labels = torch.ones( - embeddings_s.shape[0], device=embeddings.device - ) # Shape: (N * k_neighbors) + labels = torch.ones(embeddings_s.shape[0], device=embeddings.device) # Shape: (N * k_neighbors) # Compute cosine embedding loss loss += -1.0 * loss_fn(embeddings_s, positive_pairs, labels) # Average loss across all sequence steps loss /= S - return loss \ No newline at end of file + return loss diff --git a/deeptab/base_models/utils/pretraining.py b/deeptab/base_models/utils/pretraining.py index 76c2347a..98dfa9bd 100644 --- a/deeptab/base_models/utils/pretraining.py +++ b/deeptab/base_models/utils/pretraining.py @@ -1,9 +1,10 @@ +from itertools import chain + +import lightning as pl import torch import torch.nn as nn import torch.nn.functional as F -import lightning as pl from lightning.pytorch.callbacks import ModelSummary -from itertools import chain class ContrastivePretrainer(pl.LightningModule): @@ -48,27 +49,17 @@ def get_knn(self, labels): if not self.regression: for i in range(batch_size): same_class_indices = (labels == labels[i]).nonzero(as_tuple=True)[0] - different_class_indices = (labels != labels[i]).nonzero(as_tuple=True)[ - 0 - ] + different_class_indices = (labels != labels[i]).nonzero(as_tuple=True)[0] same_class_indices = same_class_indices[same_class_indices != i] - knn_indices[i] = self._sample_indices(same_class_indices, k_neighbors) - neg_indices[i] = self._sample_indices( - different_class_indices, k_neighbors - ) + knn_indices[i] = self._sample_indices(same_class_indices, k_neighbors) # type: ignore[reportCallIssue] + neg_indices[i] = self._sample_indices(different_class_indices, k_neighbors) # type: ignore[reportCallIssue] else: with torch.no_grad(): - target_distances = torch.cdist( - labels.float(), labels.float(), p=2 - ).squeeze(-1) + target_distances = torch.cdist(labels.float(), labels.float(), p=2).squeeze(-1) - knn_indices = target_distances.topk(k_neighbors + 1, largest=False).indices[ - :, 1: - ] - neg_indices = target_distances.topk(k_neighbors, largest=True).indices[ - :, :k_neighbors - ] + knn_indices = target_distances.topk(k_neighbors + 1, largest=False).indices[:, 1:] + neg_indices = target_distances.topk(k_neighbors, largest=True).indices[:, :k_neighbors] return knn_indices.to(self.device), neg_indices.to(self.device) @@ -88,16 +79,14 @@ def contrastive_loss(self, embeddings, knn_indices, neg_indices): labels = [] if self.use_positive: - pairs.append(positive_pairs.view(-1, D)) + pairs.append(positive_pairs.view(-1, D)) # type: ignore[union-attr] labels.append(torch.ones(N * k_neighbors, device=self.device)) if self.use_negative: - pairs.append(negative_pairs.view(-1, D)) + pairs.append(negative_pairs.view(-1, D)) # type: ignore[union-attr] labels.append(-torch.ones(N * k_neighbors, device=self.device)) if not pairs: - raise ValueError( - "At least one of use_positive or use_negative must be True." - ) + raise ValueError("At least one of use_positive or use_negative must be True.") all_pairs = torch.cat(pairs, dim=0) all_labels = torch.cat(labels, dim=0) @@ -120,16 +109,14 @@ def contrastive_loss(self, embeddings, knn_indices, neg_indices): labels = [] if self.use_positive: - pairs.append(positive_pairs.view(-1, D)) + pairs.append(positive_pairs.view(-1, D)) # type: ignore[union-attr] labels.append(torch.ones(N * k_neighbors, device=self.device)) if self.use_negative: - pairs.append(negative_pairs.view(-1, D)) + pairs.append(negative_pairs.view(-1, D)) # type: ignore[union-attr] labels.append(-torch.ones(N * k_neighbors, device=self.device)) if not pairs: - raise ValueError( - "At least one of use_positive or use_negative must be True." - ) + raise ValueError("At least one of use_positive or use_negative must be True.") all_pairs = torch.cat(pairs, dim=0) all_labels = torch.cat(labels, dim=0) @@ -139,7 +126,6 @@ def contrastive_loss(self, embeddings, knn_indices, neg_indices): return loss def training_step(self, batch, batch_idx): - self.estimator.embedding_layer.train() data, labels = batch @@ -147,9 +133,7 @@ def training_step(self, batch, batch_idx): knn_indices, neg_indices = self.get_knn(labels) loss = self.contrastive_loss(embeddings, knn_indices, neg_indices) - self.log( - "train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True - ) + self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True) return loss def test_step(self, batch, batch_idx): @@ -157,9 +141,7 @@ def test_step(self, batch, batch_idx): embeddings = self(data) knn_indices, neg_indices = self.get_knn(labels) loss = self.contrastive_loss(embeddings, knn_indices, neg_indices) - self.log( - "test_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True - ) + self.log("test_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True) return loss def validation_step(self, batch, batch_idx): @@ -167,9 +149,7 @@ def validation_step(self, batch, batch_idx): embeddings = self(data) knn_indices, neg_indices = self.get_knn(labels) loss = self.contrastive_loss(embeddings, knn_indices, neg_indices) - self.log( - "val_loss", loss, on_step=False, on_epoch=True, prog_bar=True, logger=True - ) + self.log("val_loss", loss, on_step=False, on_epoch=True, prog_bar=True, logger=True) return loss def configure_optimizers(self): diff --git a/deeptab/configs/__init__.py b/deeptab/configs/__init__.py index 4f397c1f..63cf3761 100644 --- a/deeptab/configs/__init__.py +++ b/deeptab/configs/__init__.py @@ -1,8 +1,12 @@ +from .autoint_config import DefaultAutoIntConfig +from .base_config import BaseConfig +from .enode_config import DefaultENODEConfig from .fttransformer_config import DefaultFTTransformerConfig from .mambatab_config import DefaultMambaTabConfig from .mambattention_config import DefaultMambAttentionConfig from .mambular_config import DefaultMambularConfig from .mlp_config import DefaultMLPConfig +from .modernnca_config import DefaultModernNCAConfig from .ndtf_config import DefaultNDTFConfig from .node_config import DefaultNODEConfig from .resnet_config import DefaultResNetConfig @@ -10,24 +14,19 @@ from .tabm_config import DefaultTabMConfig from .tabtransformer_config import DefaultTabTransformerConfig from .tabularnn_config import DefaultTabulaRNNConfig -from .autoint_config import DefaultAutoIntConfig -from .trompt_config import DefaultTromptConfig -from .base_config import BaseConfig -from .enode_config import DefaultENODEConfig from .tangos_config import DefaultTangosConfig -from .modernnca_config import DefaultModernNCAConfig +from .trompt_config import DefaultTromptConfig __all__ = [ - "DefaultModernNCAConfig", - "DefaultTangosConfig", - "DefaultENODEConfig", - "DefaultTromptConfig", + "BaseConfig", "DefaultAutoIntConfig", + "DefaultENODEConfig", "DefaultFTTransformerConfig", "DefaultMLPConfig", "DefaultMambAttentionConfig", "DefaultMambaTabConfig", "DefaultMambularConfig", + "DefaultModernNCAConfig", "DefaultNDTFConfig", "DefaultNODEConfig", "DefaultResNetConfig", @@ -35,5 +34,6 @@ "DefaultTabMConfig", "DefaultTabTransformerConfig", "DefaultTabulaRNNConfig", - "BaseConfig", + "DefaultTangosConfig", + "DefaultTromptConfig", ] diff --git a/deeptab/configs/autoint_config.py b/deeptab/configs/autoint_config.py index e33b1513..80f18f53 100644 --- a/deeptab/configs/autoint_config.py +++ b/deeptab/configs/autoint_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from ..arch_utils.transformer_utils import ReGLU from .base_config import BaseConfig diff --git a/deeptab/configs/base_config.py b/deeptab/configs/base_config.py index 0e5a6396..d0874892 100644 --- a/deeptab/configs/base_config.py +++ b/deeptab/configs/base_config.py @@ -1,5 +1,6 @@ -from dataclasses import dataclass, field from collections.abc import Callable +from dataclasses import dataclass, field + import torch.nn as nn diff --git a/deeptab/configs/enode_config.py b/deeptab/configs/enode_config.py index ce54af53..f210e9cc 100644 --- a/deeptab/configs/enode_config.py +++ b/deeptab/configs/enode_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from .base_config import BaseConfig diff --git a/deeptab/configs/fttransformer_config.py b/deeptab/configs/fttransformer_config.py index 37bdcf4b..ab111130 100644 --- a/deeptab/configs/fttransformer_config.py +++ b/deeptab/configs/fttransformer_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from ..arch_utils.transformer_utils import ReGLU from .base_config import BaseConfig diff --git a/deeptab/configs/mambatab_config.py b/deeptab/configs/mambatab_config.py index ccfe459b..a4c79fd6 100644 --- a/deeptab/configs/mambatab_config.py +++ b/deeptab/configs/mambatab_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from .base_config import BaseConfig diff --git a/deeptab/configs/mambattention_config.py b/deeptab/configs/mambattention_config.py index 22dd319f..6044cdbf 100644 --- a/deeptab/configs/mambattention_config.py +++ b/deeptab/configs/mambattention_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from .base_config import BaseConfig diff --git a/deeptab/configs/mambular_config.py b/deeptab/configs/mambular_config.py index 1dfd44e4..8ef2f276 100644 --- a/deeptab/configs/mambular_config.py +++ b/deeptab/configs/mambular_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from .base_config import BaseConfig diff --git a/deeptab/configs/mlp_config.py b/deeptab/configs/mlp_config.py index 1dda45fa..bc4880cb 100644 --- a/deeptab/configs/mlp_config.py +++ b/deeptab/configs/mlp_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from .base_config import BaseConfig diff --git a/deeptab/configs/modernnca_config.py b/deeptab/configs/modernnca_config.py index e6d01b2f..30cd3493 100644 --- a/deeptab/configs/modernnca_config.py +++ b/deeptab/configs/modernnca_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from .base_config import BaseConfig diff --git a/deeptab/configs/ndtf_config.py b/deeptab/configs/ndtf_config.py index 1fa1eec8..bea45fda 100644 --- a/deeptab/configs/ndtf_config.py +++ b/deeptab/configs/ndtf_config.py @@ -1,4 +1,5 @@ from dataclasses import dataclass + from .base_config import BaseConfig diff --git a/deeptab/configs/node_config.py b/deeptab/configs/node_config.py index b60ccd8c..529a05bf 100644 --- a/deeptab/configs/node_config.py +++ b/deeptab/configs/node_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from .base_config import BaseConfig diff --git a/deeptab/configs/resnet_config.py b/deeptab/configs/resnet_config.py index 7a458d59..9e092c07 100644 --- a/deeptab/configs/resnet_config.py +++ b/deeptab/configs/resnet_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from .base_config import BaseConfig diff --git a/deeptab/configs/saint_config.py b/deeptab/configs/saint_config.py index 3e903692..4e026970 100644 --- a/deeptab/configs/saint_config.py +++ b/deeptab/configs/saint_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from .base_config import BaseConfig diff --git a/deeptab/configs/tabm_config.py b/deeptab/configs/tabm_config.py index 6d9c20ca..1dc93e11 100644 --- a/deeptab/configs/tabm_config.py +++ b/deeptab/configs/tabm_config.py @@ -1,7 +1,9 @@ from collections.abc import Callable from dataclasses import dataclass, field from typing import Literal + import torch.nn as nn + from .base_config import BaseConfig diff --git a/deeptab/configs/tabr_config.py b/deeptab/configs/tabr_config.py index d1ec9799..8bf30e1a 100644 --- a/deeptab/configs/tabr_config.py +++ b/deeptab/configs/tabr_config.py @@ -1,8 +1,11 @@ from collections.abc import Callable from dataclasses import dataclass, field -from .base_config import BaseConfig + import torch.nn as nn +from .base_config import BaseConfig + + @dataclass class DefaultTabRConfig(BaseConfig): """Configuration class for the default TabR model with predefined hyperparameters. @@ -11,28 +14,28 @@ class DefaultTabRConfig(BaseConfig): """ # Optimizer Parameters - lr: float = 0.0003121273641315169 + lr: float = 0.0003121273641315169 weight_decay: float = 1.2260352006404615e-06 - lr_patience =10 + lr_patience = 10 lr_factor: float = 0.1 # Factor for LR scheduler # Architecture Parameters d_main: int = 256 - context_dropout: float =0.38920071545944357 - d_multiplier : int = 2 - encoder_n_blocks : int=0 - predictor_n_blocks: int=1 - mixer_normalization: str ="auto" - dropout0: float =0.38852797479169876 - dropout1: float=0.0 + context_dropout: float = 0.38920071545944357 + d_multiplier: int = 2 + encoder_n_blocks: int = 0 + predictor_n_blocks: int = 1 + mixer_normalization: str = "auto" + dropout0: float = 0.38852797479169876 + dropout1: float = 0.0 normalization: str = "LayerNorm" - activation:Callable = nn.ReLU() + activation: Callable = nn.ReLU() # noqa: RUF009 memory_efficient: bool = False - candidate_encoding_batch_size:int = 0 - context_size:int=96 + candidate_encoding_batch_size: int = 0 + context_size: int = 96 # Embedding Parameters embedding_type: str = "plr" plr_lite: bool = True n_frequencies: int = 75 - frequencies_init_scale: float = 0.045 \ No newline at end of file + frequencies_init_scale: float = 0.045 diff --git a/deeptab/configs/tabtransformer_config.py b/deeptab/configs/tabtransformer_config.py index 84f16c92..1b0f9f3b 100644 --- a/deeptab/configs/tabtransformer_config.py +++ b/deeptab/configs/tabtransformer_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from ..arch_utils.transformer_utils import ReGLU from .base_config import BaseConfig diff --git a/deeptab/configs/tabularnn_config.py b/deeptab/configs/tabularnn_config.py index 99a74e07..f271505f 100644 --- a/deeptab/configs/tabularnn_config.py +++ b/deeptab/configs/tabularnn_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from .base_config import BaseConfig diff --git a/deeptab/configs/tangos_config.py b/deeptab/configs/tangos_config.py index 0cb58e86..1501b8ff 100644 --- a/deeptab/configs/tangos_config.py +++ b/deeptab/configs/tangos_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from .base_config import BaseConfig @@ -34,4 +36,3 @@ class DefaultTangosConfig(BaseConfig): lamda1: float = 0.5 lamda2: float = 0.1 subsample: float = 0.5 - diff --git a/deeptab/configs/trompt_config.py b/deeptab/configs/trompt_config.py index 127e60bc..16ebf94c 100644 --- a/deeptab/configs/trompt_config.py +++ b/deeptab/configs/trompt_config.py @@ -1,6 +1,8 @@ from collections.abc import Callable from dataclasses import dataclass, field + import torch.nn as nn + from ..arch_utils.transformer_utils import ReGLU from .base_config import BaseConfig diff --git a/deeptab/data_utils/datamodule.py b/deeptab/data_utils/datamodule.py index bc10ebcf..7c0d3fcf 100644 --- a/deeptab/data_utils/datamodule.py +++ b/deeptab/data_utils/datamodule.py @@ -159,7 +159,7 @@ def preprocess_data( self.embeddings_val = None # Fit the preprocessor on the combined training and validation data - combined_X = pd.concat([self.X_train, self.X_val], axis=0).reset_index(drop=True) + combined_X = pd.concat([self.X_train, self.X_val], axis=0).reset_index(drop=True) # type: ignore[arg-type] combined_y = np.concatenate((self.y_train, self.y_val), axis=0) if self.embeddings_train is not None and self.embeddings_val is not None: diff --git a/deeptab/data_utils/dataset.py b/deeptab/data_utils/dataset.py index e447f9a2..1410607c 100644 --- a/deeptab/data_utils/dataset.py +++ b/deeptab/data_utils/dataset.py @@ -24,8 +24,8 @@ def __init__( labels=None, regression=True, ): - assert cat_features_list or num_features_list - + assert cat_features_list or num_features_list # noqa: S101 + self.cat_features_list = cat_features_list # Categorical features tensors self.num_features_list = num_features_list # Numerical features tensors self.embeddings_list = embeddings_list # Embeddings tensors (optional) @@ -61,9 +61,7 @@ def __getitem__(self, idx): tuple: A tuple containing lists of tensors for numerical features, categorical features, embeddings (if available), and a label (if available). """ - cat_features = [ - feature_tensor[idx] for feature_tensor in self.cat_features_list - ] + cat_features = [feature_tensor[idx] for feature_tensor in self.cat_features_list] num_features = [ torch.as_tensor(feature_tensor[idx]).clone().detach().to(torch.float32) for feature_tensor in self.num_features_list diff --git a/deeptab/models/__init__.py b/deeptab/models/__init__.py index 4db9342f..ebffddc7 100644 --- a/deeptab/models/__init__.py +++ b/deeptab/models/__init__.py @@ -1,3 +1,5 @@ +from .autoint import AutoIntClassifier, AutoIntLSS, AutoIntRegressor +from .enode import ENODELSS, ENODEClassifier, ENODERegressor from .fttransformer import ( FTTransformerClassifier, FTTransformerLSS, @@ -11,13 +13,11 @@ ) from .mambular import MambularClassifier, MambularLSS, MambularRegressor from .mlp import MLPLSS, MLPClassifier, MLPRegressor +from .modern_nca import ModernNCAClassifier, ModernNCALSS, ModernNCARegressor from .ndtf import NDTFLSS, NDTFClassifier, NDTFRegressor from .node import NODELSS, NODEClassifier, NODERegressor from .resnet import ResNetClassifier, ResNetLSS, ResNetRegressor from .saint import SAINTLSS, SAINTClassifier, SAINTRegressor -from .utils.sklearn_base_classifier import SklearnBaseClassifier -from .utils.sklearn_base_lss import SklearnBaseLSS -from .utils.sklearn_base_regressor import SklearnBaseRegressor from .tabm import TabMClassifier, TabMLSS, TabMRegressor from .tabtransformer import ( TabTransformerClassifier, @@ -25,32 +25,23 @@ TabTransformerRegressor, ) from .tabularnn import TabulaRNNClassifier, TabulaRNNLSS, TabulaRNNRegressor -from .autoint import AutoIntClassifier, AutoIntLSS, AutoIntRegressor -from .trompt import TromptClassifier, TromptLSS, TromptRegressor -from .enode import ENODEClassifier, ENODELSS, ENODERegressor from .tangos import TangosClassifier, TangosLSS, TangosRegressor -from .modern_nca import ModernNCARegressor, ModernNCAClassifier, ModernNCALSS +from .trompt import TromptClassifier, TromptLSS, TromptRegressor +from .utils.sklearn_base_classifier import SklearnBaseClassifier +from .utils.sklearn_base_lss import SklearnBaseLSS +from .utils.sklearn_base_regressor import SklearnBaseRegressor __all__ = [ - "ModernNCARegressor", - "ModernNCAClassifier", - "ModernNCALSS", - "TangosClassifier", - "TangosLSS", - "TangosRegressor", - "ENODEClassifier", "ENODELSS", - "ENODERegressor", - "TromptClassifier", - "TromptLSS", - "TromptRegressor", - "AutoIntClassifier", - "AutoIntLSS", - "AutoIntRegressor", "MLPLSS", "NDTFLSS", "NODELSS", "SAINTLSS", + "AutoIntClassifier", + "AutoIntLSS", + "AutoIntRegressor", + "ENODEClassifier", + "ENODERegressor", "FTTransformerClassifier", "FTTransformerLSS", "FTTransformerRegressor", @@ -65,6 +56,9 @@ "MambularClassifier", "MambularLSS", "MambularRegressor", + "ModernNCAClassifier", + "ModernNCALSS", + "ModernNCARegressor", "NDTFClassifier", "NDTFRegressor", "NODEClassifier", @@ -86,4 +80,10 @@ "TabulaRNNClassifier", "TabulaRNNLSS", "TabulaRNNRegressor", + "TangosClassifier", + "TangosLSS", + "TangosRegressor", + "TromptClassifier", + "TromptLSS", + "TromptRegressor", ] diff --git a/deeptab/models/fttransformer.py b/deeptab/models/fttransformer.py index a8c4eb90..b56bfdf7 100644 --- a/deeptab/models/fttransformer.py +++ b/deeptab/models/fttransformer.py @@ -24,9 +24,7 @@ class and uses the FTTransformer model with the default FTTransformer ) def __init__(self, **kwargs): - super().__init__( - model=FTTransformer, config=DefaultFTTransformerConfig, **kwargs - ) + super().__init__(model=FTTransformer, config=DefaultFTTransformerConfig, **kwargs) class FTTransformerClassifier(SklearnBaseClassifier): @@ -44,9 +42,7 @@ class FTTransformerClassifier(SklearnBaseClassifier): ) def __init__(self, **kwargs): - super().__init__( - model=FTTransformer, config=DefaultFTTransformerConfig, **kwargs - ) + super().__init__(model=FTTransformer, config=DefaultFTTransformerConfig, **kwargs) class FTTransformerLSS(SklearnBaseLSS): @@ -65,6 +61,4 @@ class FTTransformerLSS(SklearnBaseLSS): ) def __init__(self, **kwargs): - super().__init__( - model=FTTransformer, config=DefaultFTTransformerConfig, **kwargs - ) + super().__init__(model=FTTransformer, config=DefaultFTTransformerConfig, **kwargs) diff --git a/deeptab/models/mambattention.py b/deeptab/models/mambattention.py index 0ff9c06e..691b5791 100644 --- a/deeptab/models/mambattention.py +++ b/deeptab/models/mambattention.py @@ -23,9 +23,7 @@ class MambAttentionRegressor(SklearnBaseRegressor): ) def __init__(self, **kwargs): - super().__init__( - model=MambAttention, config=DefaultMambAttentionConfig, **kwargs - ) + super().__init__(model=MambAttention, config=DefaultMambAttentionConfig, **kwargs) class MambAttentionClassifier(SklearnBaseClassifier): @@ -45,9 +43,7 @@ class MambAttentionClassifier(SklearnBaseClassifier): ) def __init__(self, **kwargs): - super().__init__( - model=MambAttention, config=DefaultMambAttentionConfig, **kwargs - ) + super().__init__(model=MambAttention, config=DefaultMambAttentionConfig, **kwargs) class MambAttentionLSS(SklearnBaseLSS): @@ -67,6 +63,4 @@ class MambAttentionLSS(SklearnBaseLSS): ) def __init__(self, **kwargs): - super().__init__( - model=MambAttention, config=DefaultMambAttentionConfig, **kwargs - ) + super().__init__(model=MambAttention, config=DefaultMambAttentionConfig, **kwargs) diff --git a/deeptab/models/tabr.py b/deeptab/models/tabr.py index 76c4fba0..48f6c30b 100644 --- a/deeptab/models/tabr.py +++ b/deeptab/models/tabr.py @@ -21,6 +21,7 @@ class TabRRegressor(SklearnBaseRegressor): >>> model.evaluate(X_test, y_test) """, ) + def __init__(self, **kwargs): super().__init__(model=TabR, config=DefaultTabRConfig, **kwargs) @@ -40,6 +41,7 @@ class TabRClassifier(SklearnBaseClassifier): >>> model.evaluate(X_test, y_test) """, ) + def __init__(self, **kwargs): super().__init__(model=TabR, config=DefaultTabRConfig, **kwargs) @@ -59,5 +61,6 @@ class TabRLSS(SklearnBaseLSS): >>> model.evaluate(X_test, y_test) """, ) + def __init__(self, **kwargs): super().__init__(model=TabR, config=DefaultTabRConfig, **kwargs) diff --git a/deeptab/models/tabtransformer.py b/deeptab/models/tabtransformer.py index 9c01dcf8..50638d68 100644 --- a/deeptab/models/tabtransformer.py +++ b/deeptab/models/tabtransformer.py @@ -23,9 +23,7 @@ class TabTransformerRegressor(SklearnBaseRegressor): ) def __init__(self, **kwargs): - super().__init__( - model=TabTransformer, config=DefaultTabTransformerConfig, **kwargs - ) + super().__init__(model=TabTransformer, config=DefaultTabTransformerConfig, **kwargs) class TabTransformerClassifier(SklearnBaseClassifier): @@ -45,9 +43,7 @@ class TabTransformerClassifier(SklearnBaseClassifier): ) def __init__(self, **kwargs): - super().__init__( - model=TabTransformer, config=DefaultTabTransformerConfig, **kwargs - ) + super().__init__(model=TabTransformer, config=DefaultTabTransformerConfig, **kwargs) class TabTransformerLSS(SklearnBaseLSS): @@ -67,6 +63,4 @@ class TabTransformerLSS(SklearnBaseLSS): ) def __init__(self, **kwargs): - super().__init__( - model=TabTransformer, config=DefaultTabTransformerConfig, **kwargs - ) + super().__init__(model=TabTransformer, config=DefaultTabTransformerConfig, **kwargs) diff --git a/deeptab/models/utils/sklearn_base_classifier.py b/deeptab/models/utils/sklearn_base_classifier.py index a92d906d..82d065d7 100644 --- a/deeptab/models/utils/sklearn_base_classifier.py +++ b/deeptab/models/utils/sklearn_base_classifier.py @@ -1,19 +1,19 @@ import warnings from collections.abc import Callable + +import numpy as np import pandas as pd import torch from sklearn.metrics import accuracy_score, log_loss + from .sklearn_parent import SklearnBase -import numpy as np class SklearnBaseClassifier(SklearnBase): def __init__(self, model, config, **kwargs): super().__init__(model, config, **kwargs) # Raise a warning if task is set to 'classification' - preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k in self.preprocessor_arg_names - } + preprocessor_kwargs = {k: v for k, v in kwargs.items() if k in self.preprocessor_arg_names} if preprocessor_kwargs.get("task") == "regression": warnings.warn( @@ -299,7 +299,7 @@ def predict_proba(self, X, embeddings=None, device=None): logits_list = self.trainer.predict(self.task_model, self.data_module) # Concatenate predictions from all batches - logits = torch.cat(logits_list, dim=0) + logits = torch.cat(logits_list, dim=0) # type: ignore[arg-type] # Check if ensemble is used if getattr(self.estimator, "returns_ensemble", False): # If using ensemble @@ -450,17 +450,15 @@ def pretrain( """ if not self.built: - raise ValueError( - "The model has not been built yet. Call model.build_model(**args) first." - ) + raise ValueError("The model has not been built yet. Call model.build_model(**args) first.") - if not hasattr(self.task_model.estimator, "embedding_layer"): + if not hasattr(self.task_model.estimator, "embedding_layer"): # type: ignore[union-attr] raise ValueError("The model does not have an embedding layer") self.data_module.setup("fit") super()._pretrain( - self.task_model.estimator, + self.task_model.estimator, # type: ignore[union-attr] self.data_module, pretrain_epochs=pretrain_epochs, k_neighbors=k_neighbors, diff --git a/deeptab/models/utils/sklearn_base_lss.py b/deeptab/models/utils/sklearn_base_lss.py index 60c0af2d..de6e8e9d 100644 --- a/deeptab/models/utils/sklearn_base_lss.py +++ b/deeptab/models/utils/sklearn_base_lss.py @@ -7,6 +7,7 @@ import properscoring as ps import torch from lightning.pytorch.callbacks import EarlyStopping, ModelCheckpoint, ModelSummary +from pretab.preprocessor import Preprocessor from sklearn.base import BaseEstimator from sklearn.metrics import accuracy_score, mean_squared_error from torch.utils.data import DataLoader @@ -14,8 +15,6 @@ from ...base_models.utils.lightning_wrapper import TaskModel from ...data_utils.datamodule import MambularDataModule -from pretab.preprocessor import Preprocessor - from ...utils.distributional_metrics import ( beta_brier_score, dirichlet_error, @@ -31,12 +30,12 @@ DirichletDistribution, GammaDistribution, InverseGammaDistribution, + JohnsonSuDistribution, NegativeBinomialDistribution, NormalDistribution, PoissonDistribution, Quantile, StudentTDistribution, - JohnsonSuDistribution, ) @@ -61,15 +60,11 @@ def __init__(self, model, config, **kwargs): ] self.config_kwargs = { - k: v - for k, v in kwargs.items() - if k not in self.preprocessor_arg_names and not k.startswith("optimizer") + k: v for k, v in kwargs.items() if k not in self.preprocessor_arg_names and not k.startswith("optimizer") } self.config = config(**self.config_kwargs) - preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k in self.preprocessor_arg_names - } + preprocessor_kwargs = {k: v for k, v in kwargs.items() if k in self.preprocessor_arg_names} self.preprocessor = Preprocessor(**preprocessor_kwargs) self.task_model = None @@ -90,8 +85,7 @@ def __init__(self, model, config, **kwargs): self.optimizer_kwargs = { k: v for k, v in kwargs.items() - if k - not in ["lr", "weight_decay", "patience", "lr_patience", "optimizer_type"] + if k not in ["lr", "weight_decay", "patience", "lr_patience", "optimizer_type"] and k.startswith("optimizer_") } @@ -112,10 +106,7 @@ def get_params(self, deep=True): params.update(self.config_kwargs) if deep: - preprocessor_params = { - "prepro__" + key: value - for key, value in self.preprocessor.get_params().items() - } + preprocessor_params = {"prepro__" + key: value for key, value in self.preprocessor.get_params().items()} # type: ignore[attr-defined] params.update(preprocessor_params) return params @@ -133,14 +124,8 @@ def set_params(self, **parameters): self : object Estimator instance. """ - config_params = { - k: v for k, v in parameters.items() if not k.startswith("prepro__") - } - preprocessor_params = { - k.split("__")[1]: v - for k, v in parameters.items() - if k.startswith("prepro__") - } + config_params = {k: v for k, v in parameters.items() if not k.startswith("prepro__")} + preprocessor_params = {k.split("__")[1]: v for k, v in parameters.items() if k.startswith("prepro__")} if config_params: self.config_kwargs.update(config_params) @@ -151,7 +136,7 @@ def set_params(self, **parameters): self.config = self.config_class(**self.config_kwargs) # type: ignore if preprocessor_params: - self.preprocessor.set_params(**preprocessor_params) + self.preprocessor.set_params(**preprocessor_params) # type: ignore[attr-defined] return self @@ -236,9 +221,7 @@ def build_model( **dataloader_kwargs, ) - self.data_module.preprocess_data( - X, y, X_val, y_val, val_size=val_size, random_state=random_state - ) + self.data_module.preprocess_data(X, y, X_val, y_val, val_size=val_size, random_state=random_state) self.task_model = TaskModel( model_class=self.estimator, # type: ignore @@ -251,13 +234,9 @@ def build_model( self.data_module.embedding_feature_info, ), lr=lr if lr is not None else self.config.lr, - lr_patience=( - lr_patience if lr_patience is not None else self.config.lr_patience - ), + lr_patience=(lr_patience if lr_patience is not None else self.config.lr_patience), lr_factor=lr_factor if lr_factor is not None else self.config.lr_factor, - weight_decay=( - weight_decay if weight_decay is not None else self.config.weight_decay - ), + weight_decay=(weight_decay if weight_decay is not None else self.config.weight_decay), lss=True, train_metrics=train_metrics, val_metrics=val_metrics, @@ -290,9 +269,7 @@ def get_number_of_params(self, requires_grad=True): If the model has not been built prior to calling this method. """ if not self.built: - raise ValueError( - "The model must be built before the number of parameters can be estimated" - ) + raise ValueError("The model must be built before the number of parameters can be estimated") else: if requires_grad: return sum(p.numel() for p in self.task_model.parameters() if p.requires_grad) # type: ignore @@ -494,7 +471,7 @@ def predict(self, X, raw=False, device=None): predictions_list = self.trainer.predict(self.task_model, self.data_module) # Concatenate predictions from all batches - predictions = torch.cat(predictions_list, dim=0) + predictions = torch.cat(predictions_list, dim=0) # type: ignore[arg-type] # Check if ensemble is used if getattr(self.estimator, "returns_ensemble", False): # If using ensemble @@ -535,9 +512,7 @@ def evaluate(self, X, y_true, metrics=None, distribution_family=None): """ # Infer distribution family from model settings if not provided if distribution_family is None: - distribution_family = getattr( - self.task_model, "distribution_family", "normal" - ) + distribution_family = getattr(self.task_model, "distribution_family", "normal") # Setup default metrics if none are provided if metrics is None: @@ -573,10 +548,7 @@ def get_default_metrics(self, distribution_family): "normal": { "MSE": lambda y, pred: mean_squared_error(y, pred[:, 0]), "CRPS": lambda y, pred: np.mean( - [ - ps.crps_gaussian(y[i], mu=pred[i, 0], sig=np.sqrt(pred[i, 1])) - for i in range(len(y)) - ] + [ps.crps_gaussian(y[i], mu=pred[i, 0], sig=np.sqrt(pred[i, 1])) for i in range(len(y))] ), }, "poisson": {"Poisson Deviance": poisson_deviance}, @@ -642,9 +614,7 @@ def encode(self, X, batch_size=64): # Process data in batches encoded_outputs = [] for num_features, cat_features in tqdm(data_loader): - embeddings = self.task_model.estimator.encode( - num_features, cat_features - ) # Call your encode function + embeddings = self.task_model.estimator.encode(num_features, cat_features) # type: ignore[union-attr] # Call your encode function encoded_outputs.append(embeddings) # Concatenate all encoded outputs @@ -700,7 +670,7 @@ def optimize_hparams( Best hyperparameters found during optimization. """ - return super().optimize_hparams( + return super().optimize_hparams( # type: ignore[attr-defined] X, y, regression=False, diff --git a/deeptab/models/utils/sklearn_base_regressor.py b/deeptab/models/utils/sklearn_base_regressor.py index 426ff5fa..22cbccb8 100644 --- a/deeptab/models/utils/sklearn_base_regressor.py +++ b/deeptab/models/utils/sklearn_base_regressor.py @@ -1,7 +1,9 @@ import warnings from collections.abc import Callable + import torch from sklearn.metrics import mean_squared_error + from .sklearn_parent import SklearnBase @@ -9,9 +11,7 @@ class SklearnBaseRegressor(SklearnBase): def __init__(self, model, config, **kwargs): super().__init__(model, config, **kwargs) # Raise a warning if task is set to 'classification' - preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k in self.preprocessor_arg_names - } + preprocessor_kwargs = {k: v for k, v in kwargs.items() if k in self.preprocessor_arg_names} if preprocessor_kwargs.get("task") == "classification": warnings.warn( @@ -250,9 +250,7 @@ def predict(self, X, embeddings=None, device=None): predictions = torch.cat(predictions_list, dim=0) # type: ignore # Check if ensemble is used - if getattr( - self.task_model.estimator, "returns_ensemble", False - ): # If using ensemble + if getattr(self.task_model.estimator, "returns_ensemble", False): # If using ensemble predictions = predictions.mean(dim=1) # Average over ensemble dimension # Convert predictions to NumPy array and return @@ -367,17 +365,15 @@ def pretrain( """ if not self.built: - raise ValueError( - "The model has not been built yet. Call model.build_model(**args) first." - ) + raise ValueError("The model has not been built yet. Call model.build_model(**args) first.") - if not hasattr(self.task_model.estimator, "embedding_layer"): + if not hasattr(self.task_model.estimator, "embedding_layer"): # type: ignore[union-attr] raise ValueError("The model does not have an embedding layer") self.data_module.setup("fit") super()._pretrain( - self.task_model.estimator, + self.task_model.estimator, # type: ignore[union-attr] self.data_module, pretrain_epochs=pretrain_epochs, k_neighbors=k_neighbors, diff --git a/deeptab/models/utils/sklearn_parent.py b/deeptab/models/utils/sklearn_parent.py index 509e6bc7..709c7231 100644 --- a/deeptab/models/utils/sklearn_parent.py +++ b/deeptab/models/utils/sklearn_parent.py @@ -5,21 +5,16 @@ import pandas as pd import torch from lightning.pytorch.callbacks import EarlyStopping, ModelCheckpoint, ModelSummary +from pretab.preprocessor import Preprocessor from sklearn.base import BaseEstimator from skopt import gp_minimize from torch.utils.data import DataLoader from tqdm import tqdm from ...base_models.utils.lightning_wrapper import TaskModel -from ...data_utils.datamodule import MambularDataModule -from pretab.preprocessor import Preprocessor -from ...utils.config_mapper import ( - activation_mapper, - get_search_space, - round_to_nearest_16, -) - from ...base_models.utils.pretraining import pretrain_embeddings +from ...data_utils.datamodule import MambularDataModule +from ...utils.config_mapper import activation_mapper, get_search_space, round_to_nearest_16 class SklearnBase(BaseEstimator): @@ -43,15 +38,11 @@ def __init__(self, model, config, **kwargs): ] self.config_kwargs = { - k: v - for k, v in kwargs.items() - if k not in self.preprocessor_arg_names and not k.startswith("optimizer") + k: v for k, v in kwargs.items() if k not in self.preprocessor_arg_names and not k.startswith("optimizer") } self.config = config(**self.config_kwargs) - self.preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k in self.preprocessor_arg_names - } + self.preprocessor_kwargs = {k: v for k, v in kwargs.items() if k in self.preprocessor_arg_names} self.preprocessor = Preprocessor(**self.preprocessor_kwargs) self.estimator = model @@ -63,8 +54,7 @@ def __init__(self, model, config, **kwargs): self.optimizer_kwargs = { k: v for k, v in kwargs.items() - if k - not in ["lr", "weight_decay", "patience", "lr_patience", "optimizer_type"] + if k not in ["lr", "weight_decay", "patience", "lr_patience", "optimizer_type"] and k.startswith("optimizer_") } @@ -76,7 +66,7 @@ def get_params(self, deep=True): if deep: preprocessor_params = { key: value - for key, value in self.preprocessor.get_params().items() + for key, value in self.preprocessor.get_params().items() # type: ignore[attr-defined] if key in self.preprocessor_arg_names } params.update(preprocessor_params) @@ -84,12 +74,8 @@ def get_params(self, deep=True): def set_params(self, **parameters): """Set the parameters of this estimator.""" - config_params = { - k: v for k, v in parameters.items() if k not in self.preprocessor_arg_names - } - preprocessor_params = { - k: v for k, v in parameters.items() if k in self.preprocessor_arg_names - } + config_params = {k: v for k, v in parameters.items() if k not in self.preprocessor_arg_names} + preprocessor_params = {k: v for k, v in parameters.items() if k in self.preprocessor_arg_names} # Update config and preprocessor parameters correctly if config_params: @@ -97,7 +83,7 @@ def set_params(self, **parameters): if preprocessor_params: self.preprocessor_kwargs.update(preprocessor_params) - self.preprocessor.set_params(**self.preprocessor_kwargs) + self.preprocessor.set_params(**self.preprocessor_kwargs) # type: ignore[attr-defined] return self @@ -120,7 +106,7 @@ def _build_model( y_val=None, embeddings=None, embeddings_val=None, - num_classes: int = None, + num_classes: int | None = None, random_state: int = 101, batch_size: int = 128, shuffle: bool = True, @@ -217,14 +203,10 @@ def _build_model( self.data_module.embedding_feature_info, ), lr=lr if lr is not None else self.config.lr, - lr_patience=( - lr_patience if lr_patience is not None else self.config.lr_patience - ), + lr_patience=(lr_patience if lr_patience is not None else self.config.lr_patience), lr_factor=lr_factor if lr_factor is not None else self.config.lr_factor, - weight_decay=( - weight_decay if weight_decay is not None else self.config.weight_decay - ), - num_classes=num_classes, + weight_decay=(weight_decay if weight_decay is not None else self.config.weight_decay), + num_classes=num_classes, # type: ignore[arg-type] train_metrics=train_metrics, val_metrics=val_metrics, optimizer_type=self.optimizer_type, @@ -256,9 +238,7 @@ def get_number_of_params(self, requires_grad=True): If the model has not been built prior to calling this method. """ if not self.built: - raise ValueError( - "The model must be built before the number of parameters can be estimated" - ) + raise ValueError("The model must be built before the number of parameters can be estimated") else: if requires_grad: return sum(p.numel() for p in self.task_model.parameters() if p.requires_grad) # type: ignore @@ -275,7 +255,7 @@ def fit( y_val=None, embeddings=None, embeddings_val=None, - num_classes: int = None, + num_classes: int | None = None, max_epochs: int = 100, random_state: int = 101, batch_size: int = 128, @@ -402,8 +382,8 @@ def fit( ], **trainer_kwargs, ) - self.task_model.train() - self.task_model.estimator.train() + self.task_model.train() # type: ignore[union-attr] + self.task_model.estimator.train() # type: ignore[union-attr] self.trainer.fit(self.task_model, self.data_module) # type: ignore self.best_model_path = checkpoint_callback.best_model_path @@ -427,9 +407,7 @@ def _score(self, X, y, embeddings, metric): return metric(y, predictions) def predict(self, X, embeddings=None, device=None): - raise NotImplementedError( - "The 'predict' method is not implemented in the Parent class." - ) + raise NotImplementedError("The 'predict' method is not implemented in the Parent class.") def encode(self, X, embeddings=None, batch_size=64): """ @@ -464,7 +442,7 @@ def encode(self, X, embeddings=None, batch_size=64): for batch in tqdm(data_loader): embeddings = self.task_model.estimator.encode( batch - ) # Call your encode function + ) # Call your encode function # type: ignore[union-attr] encoded_outputs.append(embeddings) # Concatenate all encoded outputs @@ -486,7 +464,6 @@ def _pretrain( use_negative=True, pool_sequence=True, ): - pretrain_embeddings( base_model=base_model, train_dataloader=train_dataloader, @@ -572,17 +549,13 @@ def optimize_hparams( ) best_val_loss = float("inf") - if hasattr(self, "score") and callable(self.score): + if hasattr(self, "score") and callable(self.score): # type: ignore[attr-defined] if X_val is not None and y_val is not None: - val_loss = self.score(X_val, y_val) + val_loss = self.score(X_val, y_val) # type: ignore[attr-defined] else: - val_loss = self.trainer.validate(self.task_model, self.data_module)[0][ - "val_loss" - ] + val_loss = self.trainer.validate(self.task_model, self.data_module)[0]["val_loss"] else: - raise NotImplementedError( - "The 'score' method is not implemented in the child class." - ) + raise NotImplementedError("The 'score' method is not implemented in the child class.") best_val_loss = val_loss best_epoch_val_loss = self.task_model.epoch_val_loss_at( # type: ignore @@ -608,9 +581,7 @@ def _objective(hyperparams): if param_value in activation_mapper: setattr(self.config, key, activation_mapper[param_value]) else: - raise ValueError( - f"Unknown activation function: {param_value}" - ) + raise ValueError(f"Unknown activation function: {param_value}") else: setattr(self.config, key, param_value) @@ -633,12 +604,10 @@ def _objective(hyperparams): # Dynamically set the early pruning threshold if prune_by_epoch: - early_pruning_threshold = ( - best_epoch_val_loss * 1.5 - ) # Prune based on specific epoch loss + early_pruning_threshold = best_epoch_val_loss * 1.5 # Prune based on specific epoch loss else: # Prune based on the best overall validation loss - early_pruning_threshold = best_val_loss * 1.5 + early_pruning_threshold = best_val_loss * 1.5 # type: ignore[operator] # Initialize the model with pruning self.task_model.early_pruning_threshold = early_pruning_threshold # type: ignore @@ -659,15 +628,11 @@ def _objective(hyperparams): # Evaluate validation loss if hasattr(self, "score") and callable(self._score): if X_val is not None and y_val is not None: - val_loss = self._score(X_val, y_val) + val_loss = self._score(X_val, y_val) # type: ignore[call-arg] else: - val_loss = self.trainer.validate( - self.task_model, self.data_module - )[0]["val_loss"] + val_loss = self.trainer.validate(self.task_model, self.data_module)[0]["val_loss"] else: - raise NotImplementedError( - "The 'score' method is not implemented in the child class." - ) + raise NotImplementedError("The 'score' method is not implemented in the child class.") # Pruning based on validation loss at specific epoch epoch_val_loss = self.task_model.epoch_val_loss_at( # type: ignore @@ -677,28 +642,22 @@ def _objective(hyperparams): if prune_by_epoch and epoch_val_loss < best_epoch_val_loss: best_epoch_val_loss = epoch_val_loss - if val_loss < best_val_loss: + if val_loss < best_val_loss: # type: ignore[operator] best_val_loss = val_loss return val_loss except Exception as e: # Penalize the hyperparameter configuration with a large value - print( - f"Error encountered during fit with hyperparameters {hyperparams}: {e}" - ) - return ( - best_val_loss * 100 - ) # Large value to discourage this configuration + print(f"Error encountered during fit with hyperparameters {hyperparams}: {e}") + return best_val_loss * 100 # Large value to discourage this configuration # type: ignore[operator] # Perform Bayesian optimization using scikit-optimize result = gp_minimize(_objective, param_space, n_calls=time, random_state=42) # Update the model with the best-found hyperparameters best_hparams = result.x # type: ignore - head_layer_sizes = ( - [] if "head_layer_sizes" in self.config.__dataclass_fields__ else None - ) + head_layer_sizes = [] if "head_layer_sizes" in self.config.__dataclass_fields__ else None layer_sizes = [] if "layer_sizes" in self.config.__dataclass_fields__ else None # Iterate over the best hyperparameters found by optimization diff --git a/deeptab/utils/config_mapper.py b/deeptab/utils/config_mapper.py index 42df2426..1378f47c 100644 --- a/deeptab/utils/config_mapper.py +++ b/deeptab/utils/config_mapper.py @@ -122,7 +122,7 @@ def get_search_space( # Optimize individual layer sizes layer_size_min, layer_size_max = 16, 512 for i in range(max_head_layers): - layer_key = f"head_layer_size_{i+1}" + layer_key = f"head_layer_size_{i + 1}" param_names.append(layer_key) param_space.append(Integer(layer_size_min, layer_size_max)) diff --git a/deeptab/utils/distributions.py b/deeptab/utils/distributions.py index e79291b6..7420bb98 100644 --- a/deeptab/utils/distributions.py +++ b/deeptab/utils/distributions.py @@ -116,9 +116,7 @@ def forward(self, predictions): """ transformed_params = [] for idx, param_name in enumerate(self.param_names): - transform_func = self.get_transform( - getattr(self, f"{param_name}_transform", "none") - ) + transform_func = self.get_transform(getattr(self, f"{param_name}_transform", "none")) transformed_params.append( transform_func(predictions[:, idx]).unsqueeze( # type: ignore 1 @@ -155,9 +153,7 @@ def __init__(self, name="Normal", mean_transform="none", var_transform="positive def compute_loss(self, predictions, y_true): mean = self.mean_transform(predictions[:, self.param_names.index("mean")]) - variance = self.variance_transform( - predictions[:, self.param_names.index("variance")] - ) + variance = self.variance_transform(predictions[:, self.param_names.index("variance")]) normal_dist = dist.Normal(mean, variance) @@ -171,14 +167,10 @@ def evaluate_nll(self, y_true, y_pred): y_true_tensor = torch.tensor(y_true, dtype=torch.float32) y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32) - mse_loss = torch.nn.functional.mse_loss( - y_true_tensor, y_pred_tensor[:, self.param_names.index("mean")] - ) + mse_loss = torch.nn.functional.mse_loss(y_true_tensor, y_pred_tensor[:, self.param_names.index("mean")]) rmse = np.sqrt(mse_loss.detach().numpy()) mae = ( - torch.nn.functional.l1_loss( - y_true_tensor, y_pred_tensor[:, self.param_names.index("mean")] - ) + torch.nn.functional.l1_loss(y_true_tensor, y_pred_tensor[:, self.param_names.index("mean")]) .detach() .numpy() ) @@ -236,9 +228,7 @@ def evaluate_nll(self, y_true, y_pred): .detach() .numpy() # type: ignore ) # type: ignore - poisson_deviance = 2 * torch.sum( - y_true_tensor * torch.log(y_true_tensor / rate) - (y_true_tensor - rate) - ) + poisson_deviance = 2 * torch.sum(y_true_tensor * torch.log(y_true_tensor / rate) - (y_true_tensor - rate)) # type: ignore[operator] metrics["mse"] = mse_loss.detach().numpy() metrics["mae"] = mae @@ -377,9 +367,7 @@ class GammaDistribution(BaseDistribution): rate_transform (str or callable): Transformation for the rate parameter to ensure it remains positive. """ - def __init__( - self, name="Gamma", shape_transform="positive", rate_transform="positive" - ): + def __init__(self, name="Gamma", shape_transform="positive", rate_transform="positive"): param_names = ["shape", "rate"] super().__init__(name, param_names) @@ -446,16 +434,10 @@ def evaluate_nll(self, y_true, y_pred): y_true_tensor = torch.tensor(y_true, dtype=torch.float32) y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32) - mse_loss = torch.nn.functional.mse_loss( - y_true_tensor, y_pred_tensor[:, self.param_names.index("loc")] - ) + mse_loss = torch.nn.functional.mse_loss(y_true_tensor, y_pred_tensor[:, self.param_names.index("loc")]) rmse = np.sqrt(mse_loss.detach().numpy()) mae = ( - torch.nn.functional.l1_loss( - y_true_tensor, y_pred_tensor[:, self.param_names.index("loc")] - ) - .detach() - .numpy() + torch.nn.functional.l1_loss(y_true_tensor, y_pred_tensor[:, self.param_names.index("loc")]).detach().numpy() ) metrics["mse"] = mse_loss.detach().numpy() @@ -496,14 +478,12 @@ def __init__( def compute_loss(self, predictions, y_true): # Apply transformations to ensure mean and dispersion parameters are positive mean = self.mean_transform(predictions[:, self.param_names.index("mean")]) - dispersion = self.dispersion_transform( - predictions[:, self.param_names.index("dispersion")] - ) + dispersion = self.dispersion_transform(predictions[:, self.param_names.index("dispersion")]) # Calculate the probability (p) and number of successes (r) from mean and dispersion # These calculations follow from the mean and variance of the negative binomial distribution # where variance = mean + mean^2 / dispersion - r = torch.tensor(1.0) / dispersion + r = torch.tensor(1.0) / dispersion # type: ignore[operator] p = r / (r + mean) # Define the Negative Binomial distribution with the transformed parameters @@ -631,9 +611,7 @@ def log_prob(self, x, skew, shape, loc, scale): """ z = skew + shape * torch.asinh((x - loc) / scale) log_pdf = ( - torch.log(shape / (scale * np.sqrt(2 * np.pi))) - - 0.5 * z**2 - - 0.5 * torch.log(1 + ((x - loc) / scale) ** 2) + torch.log(shape / (scale * np.sqrt(2 * np.pi))) - 0.5 * z**2 - 0.5 * torch.log(1 + ((x - loc) / scale) ** 2) ) return log_pdf @@ -653,14 +631,10 @@ def evaluate_nll(self, y_true, y_pred): y_true_tensor = torch.tensor(y_true, dtype=torch.float32) y_pred_tensor = torch.tensor(y_pred, dtype=torch.float32) - mse_loss = torch.nn.functional.mse_loss( - y_true_tensor, y_pred_tensor[:, self.param_names.index("location")] - ) + mse_loss = torch.nn.functional.mse_loss(y_true_tensor, y_pred_tensor[:, self.param_names.index("location")]) rmse = np.sqrt(mse_loss.detach().numpy()) mae = ( - torch.nn.functional.l1_loss( - y_true_tensor, y_pred_tensor[:, self.param_names.index("location")] - ) + torch.nn.functional.l1_loss(y_true_tensor, y_pred_tensor[:, self.param_names.index("location")]) .detach() .numpy() ) diff --git a/deeptab/utils/docstring_generator.py b/deeptab/utils/docstring_generator.py index 7c4e1e3b..f570ee2c 100644 --- a/deeptab/utils/docstring_generator.py +++ b/deeptab/utils/docstring_generator.py @@ -1,3 +1,6 @@ +import inspect +import textwrap + from pretab.preprocessor import Preprocessor @@ -6,27 +9,34 @@ def generate_docstring(config, model_description, examples): The `Parameters` tag is stripped from the Preprocessor docstring to avoid duplication. """ - config_doc = config.__doc__ or "No documentation for DefaultFTTransformerConfig." - preprocessor_doc = Preprocessor.__doc__ or "No documentation for Preprocessor." - - # Remove "Parameters" section header from the Preprocessor docstring - preprocessor_doc_cleaned = preprocessor_doc.split("Parameters\n ----------\n", 1)[-1].strip() - + # inspect.cleandoc is the correct tool for Python docstrings: it strips + # leading blank lines, then removes the common indentation from lines 2+ + # (the class-body indent). textwrap.dedent cannot do this because Python + # stores line 1 without any leading whitespace, making the common indent 0. + config_doc = inspect.cleandoc(config.__doc__ or "No documentation.") + preprocessor_doc = inspect.cleandoc(Preprocessor.__doc__ or "No documentation.") + + # After cleandoc the section header is at column 0: "Parameters\n----------\n" + preprocessor_doc_cleaned = preprocessor_doc.split("Parameters\n----------\n", 1)[-1].strip() preprocessor_doc_cleaned = preprocessor_doc_cleaned.split("Attributes")[0].strip() - config_doc += preprocessor_doc_cleaned + # Combine config doc + preprocessor params, then re-indent uniformly at 4 spaces. + config_doc_indented = textwrap.indent(config_doc + "\n\n" + preprocessor_doc_cleaned, " ") + + description_indented = textwrap.indent(textwrap.dedent(model_description).strip(), " ") + examples_indented = textwrap.indent(textwrap.dedent(examples).strip(), " ") return f""" - {model_description.strip()} +{description_indented} Notes ----- The parameters for this class include the attributes from the config dataclass as well as preprocessing arguments handled by the base class. - {config_doc} +{config_doc_indented} Examples -------- - {examples.strip()} +{examples_indented} """ diff --git a/docs/api/base_models/BaseModels.rst b/docs/api/base_models/BaseModels.rst index ccd41d80..d9b7176b 100644 --- a/docs/api/base_models/BaseModels.rst +++ b/docs/api/base_models/BaseModels.rst @@ -1,29 +1,25 @@ deeptab.base_models ======================= -.. autoclass:: deeptab.base_models.BaseModel - :members: - :no-inherited-members: - -.. autoclass:: deeptab.base_models.TaskModel - :members: - :no-inherited-members: - .. autoclass:: deeptab.base_models.Mambular :members: :no-inherited-members: + :exclude-members: forward .. autoclass:: deeptab.base_models.MLP :members: :no-inherited-members: + :exclude-members: forward .. autoclass:: deeptab.base_models.ResNet :members: :no-inherited-members: + :exclude-members: forward .. autoclass:: deeptab.base_models.FTTransformer :members: :no-inherited-members: + :exclude-members: forward .. autoclass:: deeptab.base_models.TabTransformer :members: @@ -33,22 +29,52 @@ deeptab.base_models :members: :no-inherited-members: -.. autoclass:: mambular.base_models.MambAttention +.. autoclass:: deeptab.base_models.MambAttention + :members: + :no-inherited-members: + :exclude-members: forward + +.. autoclass:: deeptab.base_models.MambaTab + :members: + :no-inherited-members: + :exclude-members: forward + +.. autoclass:: deeptab.base_models.TabM + :members: + :no-inherited-members: + +.. autoclass:: deeptab.base_models.NODE + :members: + :no-inherited-members: + :exclude-members: forward + +.. autoclass:: deeptab.base_models.NDTF + :members: + :no-inherited-members: + :exclude-members: forward, penalty_forward + +.. autoclass:: deeptab.base_models.SAINT + :members: + :no-inherited-members: + :exclude-members: forward + +.. autoclass:: deeptab.base_models.AutoInt :members: :no-inherited-members: -.. autoclass:: mambular.base_models.TabM +.. autoclass:: deeptab.base_models.ENODE :members: :no-inherited-members: + :exclude-members: forward -.. autoclass:: mambular.base_models.NODE +.. autoclass:: deeptab.base_models.ModernNCA :members: :no-inherited-members: -.. autoclass:: mambular.base_models.NDTF +.. autoclass:: deeptab.base_models.Tangos :members: :no-inherited-members: -.. autoclass:: mambular.base_models.SAINT +.. autoclass:: deeptab.base_models.Trompt :members: :no-inherited-members: diff --git a/docs/api/base_models/index.rst b/docs/api/base_models/index.rst index 8d80e362..ddf97a4c 100644 --- a/docs/api/base_models/index.rst +++ b/docs/api/base_models/index.rst @@ -1,17 +1,15 @@ .. -*- mode: rst -*- -.. currentmodule:: mambular.base_models +.. currentmodule:: deeptab.base_models BaseModels ========== -This module provides foundational classes and architectures for Mambular models, including various neural network architectures tailored for tabular data. +This module provides foundational classes and architectures for deeptab models, including various neural network architectures tailored for tabular data. ========================================= ======================================================================================================= Modules Description ========================================= ======================================================================================================= -:class:`BaseModel` Abstract base class defining the core structure and initialization logic for Mambular models. -:class:`TaskModel` PyTorch Lightning module for managing model training, validation, and testing workflows. :class:`Mambular` Flexible neural network model leveraging the Mamba architecture with configurable normalization techniques for tabular data. :class:`MLP` Multi-layer perceptron (MLP) model designed for tabular tasks, initialized with a custom configuration. :class:`ResNet` Deep residual network (ResNet) model optimized for structured/tabular datasets. @@ -22,7 +20,13 @@ Modules Description :class:`NDTF` Neural Decision Tree Forest (NDTF) model for tabular tasks, blending decision tree concepts with neural networks. :class:`TabulaRNN` Recurrent neural network (RNN) model, including LSTM and GRU architectures, tailored for sequential or time-series tabular data. :class:`MambAttention` Attention-based architecture for tabular tasks, combining feature importance weighting with advanced normalization techniques. -:class:`SAINT` SAINT model. Transformer based model using row and column attetion. +:class:`SAINT` SAINT model. Transformer based model using row and column attention. +:class:`MambaTab` Tabular model using a Mamba-Block on a joint input representation. +:class:`AutoInt` Automatic Feature Interaction model for tabular data. +:class:`ENODE` Embedding Neural Oblivious Decision Ensembles for tabular tasks. +:class:`ModernNCA` Modern Nearest Centroid Approach for tabular deep learning. +:class:`Tangos` Tangos model for tabular data. +:class:`Trompt` Trompt model for tabular data. ========================================= ======================================================================================================= diff --git a/docs/api/configs/Configurations.rst b/docs/api/configs/Configurations.rst index 4069103f..801b119d 100644 --- a/docs/api/configs/Configurations.rst +++ b/docs/api/configs/Configurations.rst @@ -1,50 +1,70 @@ Configurations =============== -.. autoclass:: mambular.configs.DefaultMambularConfig +.. autoclass:: deeptab.configs.DefaultMambularConfig :members: :undoc-members: -.. autoclass:: mambular.configs.DefaultFTTransformerConfig +.. autoclass:: deeptab.configs.DefaultFTTransformerConfig :members: :undoc-members: -.. autoclass:: mambular.configs.DefaultResNetConfig +.. autoclass:: deeptab.configs.DefaultResNetConfig :members: :undoc-members: -.. autoclass:: mambular.configs.DefaultMLPConfig +.. autoclass:: deeptab.configs.DefaultMLPConfig :members: :undoc-members: -.. autoclass:: mambular.configs.DefaultTabTransformerConfig +.. autoclass:: deeptab.configs.DefaultTabTransformerConfig :members: :undoc-members: -.. autoclass:: mambular.configs.DefaultMambaTabConfig +.. autoclass:: deeptab.configs.DefaultMambaTabConfig :members: :undoc-members: -.. autoclass:: mambular.configs.DefaultTabulaRNNConfig +.. autoclass:: deeptab.configs.DefaultTabulaRNNConfig :members: :undoc-members: -.. autoclass:: mambular.configs.DefaultMambAttentionConfig +.. autoclass:: deeptab.configs.DefaultMambAttentionConfig :members: :undoc-members: -.. autoclass:: mambular.configs.DefaultNDTFConfig +.. autoclass:: deeptab.configs.DefaultNDTFConfig :members: :undoc-members: -.. autoclass:: mambular.configs.DefaultNODEConfig +.. autoclass:: deeptab.configs.DefaultNODEConfig :members: :undoc-members: -.. autoclass:: mambular.configs.DefaultTabMConfig +.. autoclass:: deeptab.configs.DefaultTabMConfig :members: :undoc-members: -.. autoclass:: mambular.configs.DefaultSAINTConfig +.. autoclass:: deeptab.configs.DefaultSAINTConfig + :members: + :undoc-members: + +.. autoclass:: deeptab.configs.DefaultAutoIntConfig + :members: + :undoc-members: + +.. autoclass:: deeptab.configs.DefaultENODEConfig + :members: + :undoc-members: + +.. autoclass:: deeptab.configs.DefaultModernNCAConfig + :members: + :undoc-members: + +.. autoclass:: deeptab.configs.DefaultTangosConfig + :members: + :undoc-members: + +.. autoclass:: deeptab.configs.DefaultTromptConfig :members: :undoc-members: diff --git a/docs/api/models/Models.rst b/docs/api/models/Models.rst index 56c46fce..fd02d9fe 100644 --- a/docs/api/models/Models.rst +++ b/docs/api/models/Models.rst @@ -1,158 +1,218 @@ -mambular.models -=============== +deeptab.models +============== -.. autoclass:: mambular.models.MambularClassifier +.. autoclass:: deeptab.models.MambularClassifier :members: :inherited-members: -.. autoclass:: mambular.models.MambularRegressor +.. autoclass:: deeptab.models.MambularRegressor :members: :inherited-members: -.. autoclass:: mambular.models.MambularLSS +.. autoclass:: deeptab.models.MambularLSS :members: :undoc-members: -.. autoclass:: mambular.models.FTTransformerClassifier +.. autoclass:: deeptab.models.FTTransformerClassifier :members: :undoc-members: -.. autoclass:: mambular.models.FTTransformerRegressor +.. autoclass:: deeptab.models.FTTransformerRegressor :members: :undoc-members: -.. autoclass:: mambular.models.FTTransformerLSS +.. autoclass:: deeptab.models.FTTransformerLSS :members: :undoc-members: -.. autoclass:: mambular.models.MLPClassifier +.. autoclass:: deeptab.models.MLPClassifier :members: :undoc-members: -.. autoclass:: mambular.models.MLPRegressor +.. autoclass:: deeptab.models.MLPRegressor :members: :undoc-members: -.. autoclass:: mambular.models.MLPLSS +.. autoclass:: deeptab.models.MLPLSS :members: :undoc-members: -.. autoclass:: mambular.models.TabTransformerClassifier +.. autoclass:: deeptab.models.TabTransformerClassifier :members: :undoc-members: -.. autoclass:: mambular.models.TabTransformerRegressor +.. autoclass:: deeptab.models.TabTransformerRegressor :members: :undoc-members: -.. autoclass:: mambular.models.TabTransformerLSS +.. autoclass:: deeptab.models.TabTransformerLSS :members: :undoc-members: -.. autoclass:: mambular.models.ResNetClassifier +.. autoclass:: deeptab.models.ResNetClassifier :members: :undoc-members: -.. autoclass:: mambular.models.ResNetRegressor +.. autoclass:: deeptab.models.ResNetRegressor :members: :undoc-members: -.. autoclass:: mambular.models.ResNetLSS +.. autoclass:: deeptab.models.ResNetLSS :members: :undoc-members: -.. autoclass:: mambular.models.MambaTabClassifier +.. autoclass:: deeptab.models.MambaTabClassifier :members: :undoc-members: -.. autoclass:: mambular.models.MambaTabRegressor +.. autoclass:: deeptab.models.MambaTabRegressor :members: :undoc-members: -.. autoclass:: mambular.models.MambaTabLSS +.. autoclass:: deeptab.models.MambaTabLSS :members: :undoc-members: -.. autoclass:: mambular.models.MambAttentionClassifier +.. autoclass:: deeptab.models.MambAttentionClassifier :members: :undoc-members: -.. autoclass:: mambular.models.MambAttentionRegressor +.. autoclass:: deeptab.models.MambAttentionRegressor :members: :undoc-members: -.. autoclass:: mambular.models.MambAttentionLSS +.. autoclass:: deeptab.models.MambAttentionLSS :members: :undoc-members: -.. autoclass:: mambular.models.TabulaRNNClassifier +.. autoclass:: deeptab.models.TabulaRNNClassifier :members: :undoc-members: -.. autoclass:: mambular.models.TabulaRNNRegressor +.. autoclass:: deeptab.models.TabulaRNNRegressor :members: :undoc-members: -.. autoclass:: mambular.models.TabulaRNNLSS +.. autoclass:: deeptab.models.TabulaRNNLSS :members: :undoc-members: -.. autoclass:: mambular.models.TabMClassifier +.. autoclass:: deeptab.models.TabMClassifier :members: :inherited-members: -.. autoclass:: mambular.models.TabMRegressor +.. autoclass:: deeptab.models.TabMRegressor :members: :inherited-members: -.. autoclass:: mambular.models.TabMLSS +.. autoclass:: deeptab.models.TabMLSS :members: :undoc-members: -.. autoclass:: mambular.models.NODEClassifier +.. autoclass:: deeptab.models.NODEClassifier :members: :inherited-members: -.. autoclass:: mambular.models.NODERegressor +.. autoclass:: deeptab.models.NODERegressor :members: :inherited-members: -.. autoclass:: mambular.models.NODELSS +.. autoclass:: deeptab.models.NODELSS :members: :undoc-members: -.. autoclass:: mambular.models.NDTFClassifier +.. autoclass:: deeptab.models.NDTFClassifier :members: :inherited-members: -.. autoclass:: mambular.models.NDTFRegressor +.. autoclass:: deeptab.models.NDTFRegressor :members: :inherited-members: -.. autoclass:: mambular.models.NDTFLSS +.. autoclass:: deeptab.models.NDTFLSS :members: :undoc-members: -.. autoclass:: mambular.models.SAINTClassifier +.. autoclass:: deeptab.models.SAINTClassifier :members: :inherited-members: -.. autoclass:: mambular.models.SAINTRegressor +.. autoclass:: deeptab.models.SAINTRegressor :members: :inherited-members: -.. autoclass:: mambular.models.SAINTLSS +.. autoclass:: deeptab.models.SAINTLSS :members: :undoc-members: -.. autoclass:: mambular.models.SklearnBaseClassifier +.. autoclass:: deeptab.models.AutoIntClassifier :members: :undoc-members: -.. autoclass:: mambular.models.SklearnBaseLSS +.. autoclass:: deeptab.models.AutoIntRegressor :members: :undoc-members: -.. autoclass:: mambular.models.SklearnBaseRegressor +.. autoclass:: deeptab.models.AutoIntLSS + :members: + :undoc-members: + +.. autoclass:: deeptab.models.ENODEClassifier + :members: + :undoc-members: + +.. autoclass:: deeptab.models.ENODERegressor + :members: + :undoc-members: + +.. autoclass:: deeptab.models.ENODELSS + :members: + :undoc-members: + +.. autoclass:: deeptab.models.ModernNCAClassifier + :members: + :undoc-members: + +.. autoclass:: deeptab.models.ModernNCARegressor + :members: + :undoc-members: + +.. autoclass:: deeptab.models.ModernNCALSS + :members: + :undoc-members: + +.. autoclass:: deeptab.models.TangosClassifier + :members: + :undoc-members: + +.. autoclass:: deeptab.models.TangosRegressor + :members: + :undoc-members: + +.. autoclass:: deeptab.models.TangosLSS + :members: + :undoc-members: + +.. autoclass:: deeptab.models.TromptClassifier + :members: + :undoc-members: + +.. autoclass:: deeptab.models.TromptRegressor + :members: + :undoc-members: + +.. autoclass:: deeptab.models.TromptLSS + :members: + :undoc-members: + +.. autoclass:: deeptab.models.SklearnBaseClassifier + :members: + :undoc-members: + +.. autoclass:: deeptab.models.SklearnBaseLSS + :members: + :undoc-members: + +.. autoclass:: deeptab.models.SklearnBaseRegressor :members: :undoc-members: diff --git a/docs/api/models/index.rst b/docs/api/models/index.rst index 6a20c1aa..20fe8460 100644 --- a/docs/api/models/index.rst +++ b/docs/api/models/index.rst @@ -1,6 +1,6 @@ .. -*- mode: rst -*- -.. currentmodule:: mambular.models +.. currentmodule:: deeptab.models Models ====== diff --git a/docs/conf.py b/docs/conf.py index 8a21f912..5235d5bd 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -4,10 +4,12 @@ # https://www.sphinx-doc.org/en/master/usage/configuration.html import os +import sys +from importlib.metadata import PackageNotFoundError +from importlib.metadata import version as _version # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information -import sys sys.path.insert(0, os.path.abspath(".")) sys.path.insert(0, os.path.abspath("../")) @@ -17,13 +19,11 @@ project_copyright = "2024, BASF SE" author = "Anton Frederik Thielmann, Manish Kumar, Christoph Weisser, Benjamin Saefken, Soheila Samiee" -VERSION_PATH = "../deeptab/__version__.py" -with open(VERSION_PATH) as f: - lines = f.readlines() - for line in lines: - if line.startswith("__version__"): - version = line.split("=")[-1].strip().strip('"') - release = version +try: + version = _version("deeptab") +except PackageNotFoundError: + version = "0+unknown" +release = version # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration @@ -87,7 +87,7 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ["_build", "_templates"] +exclude_patterns = ["_build", "_templates", "homepage.md"] # The reST default role (single back ticks `dict`) cross links to any code # object (including Python, but others as well). diff --git a/docs/contributing.md b/docs/contributing.md index 0018f35c..d74490f4 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -1,12 +1,12 @@ -## Contribution Guidelines +# Contribution Guidelines Thank you for considering contributing to our Python package! We appreciate your time and effort in helping us improve our project. Please take a moment to review the following guidelines to ensure a smooth and efficient contribution process. -### Code of Conduct +## Code of Conduct We kindly request all contributors to adhere to our Code of Conduct when participating in this project. It outlines our expectations for respectful and inclusive behavior within the community. -### Setting Up Development Environment +## Setting Up Development Environment Before you start contributing to the project, you need to set up your development environment. This will allow you to make changes to the codebase, run tests, and build the documentation locally. The project uses `poetry` for dependency management and packaging. Along with that, `ruff` is used for source code formatting and linting. @@ -48,182 +48,254 @@ cd DeepTab poetry install -poetry run pre-commit install +poetry run pre-commit install --hook-type commit-msg --hook-type pre-commit --hook-type pre-push ``` -If you need to update the documentation, please install the dependencies requried for documentation: +If you need to update the documentation, please install the documentation dependencies: -``` +```bash +# Recommended: install via the docs dependency group +poetry install --with docs + +# Alternative: install directly pip install -r docs/requirements_docs.txt ``` **Note:** You can also set up a virtual environment to isolate your development environment. -### How to Contribute +## How to Contribute -1. Create a new branch from the `develop` branch for your contributions. Please use descriptive and concise branch names. +1. Create a new branch from `main` for your contributions. Please use descriptive and concise branch names. 2. Make your desired changes or additions to the codebase. 3. Ensure that your code adheres to [PEP8](https://peps.python.org/pep-0008/) coding style guidelines. -4. Write appropriate tests for your changes, ensuring that they pass. - - `make test` +4. Write appropriate tests for your changes and verify they pass: + ```bash + just test + ``` 5. Update the documentation and examples, if necessary. -6. Build the html documentation and verify if it works as expected. We have used Sphinx for documentation, you could build the documents as follows: - - `cd src/docs` - - `make clean` - - `make html` -7. Verify the html documents created under `docs/_build/html` directory. `index.html` file is the main file which contains link to all other files and doctree. - -8. Commit your changes following the Conventional Commits specification (see below). -9. Submit a pull request from your branch to the development branch of the original repository. +6. Build the HTML documentation and verify it works as expected: + ```bash + just docs + ``` + Verify the output under `docs/_build/html/`. `index.html` is the entry point. +7. Run the full local check suite before pushing (lint, format, type-check, and all pre-commit hooks): + ```bash + just check + ``` + If `ruff-format` modifies any files, commit those changes before pushing: + ```bash + git add -u + git commit -m "style: apply ruff formatting" + ``` +8. Commit your changes following the Conventional Commits specification (see below): + ```bash + just commit + ``` +9. Submit a pull request from your branch to `main` in the original repository. 10. Wait for the maintainers to review your pull request. Address any feedback or comments if required. 11. Once approved, your changes will be merged into the main codebase. -### Release Workflow +## Pre-commit Hooks -This project uses automated semantic versioning and releases. Here's how releases work: +This project uses [pre-commit](https://pre-commit.com/) to enforce code quality automatically. The hooks run on two stages: -#### Automated Release Process +- **commit** — `ruff` format and lint checks, plus general file hygiene hooks +- **push** — `pyright` type checking (slow, so deferred to push) -``` -1. Make Changes → 2. Conventional Commit → 3. Merge to Master → 4. Automated Release +`just install` registers all three hook types (`commit-msg`, `pre-commit`, `pre-push`) so everything fires at the right time automatically. + +> **Important:** Run `just check` before opening a PR. It executes all hooks against every file in the repo (both commit and push stages), giving you the same signal CI will see. + +```bash +# Run commit-stage hooks on all files (ruff format, ruff lint, file hygiene) +just lint + +# Run ruff formatter +just format + +# Run pyright type checker +just typecheck + +# Run ALL hooks across ALL files (commit + push stages) — equivalent to what CI checks +just check ``` -**Step-by-Step:** +If pre-commit reports files that _would be reformatted_, run `just format`, stage the changes, and commit before pushing. Formatting-only changes should be committed separately with `style: apply ruff formatting`. -1. **Development Phase** +### Type checking (pyright) - - Create feature branch from `develop` - - Make your changes - - Commit using conventional commits (e.g., `feat:`, `fix:`) +Type checking with `pyright` runs automatically on `git push` via the pre-push hook (registered by `just install`). It also runs in CI as the `typecheck` job in `.github/workflows/ci.yml`. -2. **Merge to Develop** +To run it manually at any time: - - Create PR to `develop` branch - - After review, merge to `develop` - - ReadTheDocs dev documentation updates automatically +```bash +just typecheck +``` -3. **Merge to Master** (Triggers Release) +Fix any reported errors before opening a PR. - - Merge `develop` to `master` - - GitHub Actions semantic-release workflow runs automatically +## Docs Workflow -4. **Automated Release (on Master)** - - ✅ Analyzes conventional commits since last release - - ✅ Determines version bump (major/minor/patch) - - ✅ Updates version in `pyproject.toml` and `__version__.py` - - ✅ Generates/updates `CHANGELOG.md` - - ✅ Creates git tag (e.g., `v1.7.0`) - - ✅ Builds package (`poetry build`) - - ✅ Publishes to PyPI - - ✅ Creates GitHub Release with notes +Documentation is built with [Sphinx](https://www.sphinx-doc.org/) and hosted on [Read the Docs](https://readthedocs.org/). -#### What Triggers a Release? +The docs CI is defined in `.github/workflows/docs.yml` and is separate from the main CI workflow. -| Commit Type | Version Bump | PyPI Release | -| -------------------------------------------------------- | ------------- | ------------ | -| `feat:` | Minor (1.x.0) | ✅ Yes | -| `fix:` | Patch (1.6.x) | ✅ Yes | -| `perf:` | Patch (1.6.x) | ✅ Yes | -| `feat!:` or `BREAKING CHANGE:` | Major (x.0.0) | ✅ Yes | -| `docs:`, `style:`, `refactor:`, `test:`, `chore:`, `ci:` | None | ❌ No | +### How docs are published -#### Example Scenarios +| Trigger | CI (`docs.yml`) | Read the Docs | +| --------------------------------------------------------------------- | ----------------------------------------------------- | ------------------------------------------------------------- | +| PR touching `docs/**`, `deeptab/**`, `README.md`, or `pyproject.toml` | Sphinx build check — PR is blocked if docs are broken | No publish | +| Merge to `main` | Sphinx build check | Publishes **latest** (dev) version | +| Stable tag pushed (`vX.Y.Z`) | Sphinx build check from that exact tagged commit | Publishes **versioned** snapshot and updates **stable** alias | +| RC tag pushed (`vX.Y.Zrc1`) | Sphinx build check from that exact tagged commit | Publishes versioned pre-release snapshot | -**Scenario 1: Documentation Update (No Release)** +> **Note:** The docs CI `push` trigger has **no `paths:` filter** — tag pushes always run the full docs build regardless of which files changed in the tagged commit. The `paths:` filter only applies to PRs to keep checks fast. -```bash -git commit -m "docs: update API reference" -# Merge to master → No version bump, no PyPI release -``` +> **Note:** Versioned and stable docs require **"Build tags"** to be enabled in the Read the Docs project settings under _Admin → Advanced settings_. RTD automatically sets the `stable` alias to the highest non-pre-release tag. -**Scenario 2: Bug Fix (Patch Release)** +### Tag → versioned docs flow -```bash -git commit -m "fix: resolve memory leak in dataloader" -# Merge to master → Version 1.6.1 → 1.6.2 → PyPI release +``` +git tag -a v1.7.0 -m "Release v1.7.0" +git push origin v1.7.0 + ↓ +docs.yml triggers on that exact tagged commit + ↓ +Sphinx build succeeds (or blocks if broken) + ↓ +RTD webhook fires → builds docs from v1.7.0 source + ↓ +RTD publishes /en/v1.7.0/ (versioned) + ↓ +RTD updates /en/stable/ → points to v1.7.0 ``` -**Scenario 3: New Feature (Minor Release)** +RC tags (`vX.Y.Zrc1`) follow the same flow but RTD does **not** update the `stable` alias for pre-release tags. + +### Building docs locally ```bash -git commit -m "feat(models): add TabNet architecture" -# Merge to master → Version 1.6.1 → 1.7.0 → PyPI release -``` +# Install system dependency (macOS) +brew install pandoc +# or on Ubuntu +sudo apt-get install pandoc -**Scenario 4: Breaking Change (Major Release)** +# Install doc dependencies (if not already done) +poetry install --with docs -```bash -git commit -m "feat!: remove Python 3.9 support +# Build HTML docs (warnings treated as errors) +sphinx-build -b html docs/ docs/_build/html -W --keep-going -BREAKING CHANGE: Python 3.10 is now the minimum required version" -# Merge to master → Version 1.6.1 → 2.0.0 → PyPI release +# Open in browser +open docs/_build/html/index.html ``` -#### Important Notes +### Version resolution -- **Only master branch** triggers releases -- **Semantic-release is fully automated** - no manual version bumping needed -- **Never manually edit version numbers** in `pyproject.toml` or `deeptab/__version__.py` - they are automatically updated by semantic-release -- **PyPI token** is configured in GitHub repository secrets -- **Review commits carefully** before merging to master (they determine the version!) +The docs version is read at build time from the installed package metadata via `importlib.metadata.version("deeptab")`, which reflects the version in `pyproject.toml`. No separate version file is maintained. -#### Working with Updated Versions +## Release Workflow -**Q: What happens to `develop` branch after a release?** +This project uses conventional commits and intentional, maintainer-controlled releases. -After semantic-release completes on `master`, the version files are automatically updated. The `develop` branch syncs automatically: +### Release Process Overview -**Automatic Sync Flow:** +``` +1. Make Changes → 2. Conventional Commit → 3. Merge to Main → 4. CI runs + ↓ + (no automatic release — main is not a release trigger) +5. Maintainer opens Release PR → version bump + CHANGELOG update → merge to main +6. Maintainer creates Git tag → PyPI publish triggered automatically ``` -┌─────────────────────────────────────────────────────────────┐ -│ Release & Sync Process │ -└─────────────────────────────────────────────────────────────┘ -1. Merge develop → master - │ - ▼ -2. Semantic Release runs on master - │ - ├─→ Version: 1.6.1 → 1.7.0 - ├─→ Update pyproject.toml - ├─→ Update __version__.py - ├─→ Update CHANGELOG.md - └─→ Create tag v1.7.0 - │ - ▼ -3. Auto-Sync Workflow triggers - │ - ├─→ [No Conflicts] ✅ - │ │ - │ ├─→ Merge master → develop automatically - │ └─→ Develop updated within 60 seconds - │ - └─→ [Conflicts Detected] ⚠️ - │ - ├─→ Create PR: "chore: sync develop with master" - ├─→ Notify maintainers - └─→ Manual merge required (rare) +**Step-by-Step:** + +1. **Development Phase** + - Create feature branch from `main` + - Make your changes + - Commit using conventional commits (e.g., `feat:`, `fix:`) + +2. **Merge to Main** (CI only — no release) + - Create PR to `main` + - After review, merge to `main` + - GitHub Actions runs tests + - **No version bump, no tag, no PyPI publish happens automatically** + +3. **Maintainer Release PR** (periodic, intentional) + - Maintainer creates a `release/vX.Y.Z` branch + - Runs `cz bump` to update `pyproject.toml` and `CHANGELOG.md` + - Opens PR to `main`, merges after review + +4. **Maintainer Creates Git Tag** + - After the release PR is merged: + ```bash + git checkout main && git pull + git tag -a vX.Y.Z -m "Release vX.Y.Z" + git push origin vX.Y.Z + ``` + - This tag push triggers `publish-pypi.yml` → builds and publishes to PyPI + creates GitHub Release + - For RC tags (`vX.Y.Zrc1`), push triggers `publish-testpypi.yml` → publishes to TestPyPI instead + +### What Triggers a Release? + +| Event | Result | +| ----------------------------- | ------------------------------------- | +| Push to `main` | CI tests only | +| Maintainer pushes `v*` tag | PyPI publish + GitHub Release | +| Maintainer pushes `v*rc*` tag | PyPI pre-release + GitHub pre-release | + +### Commit Types and Their Effect on Version + +Commit messages determine the version bump chosen by the maintainer when running `cz bump`: + +| Commit Type | Version Bump | +| -------------------------------------------------------- | ----------------- | +| `feat:` | Minor (1.x.0) | +| `fix:`, `perf:` | Patch (1.6.x) | +| `feat!:` / `BREAKING CHANGE:` | Major (x.0.0) | +| `docs:`, `style:`, `refactor:`, `test:`, `chore:`, `ci:` | No release needed | + +### Example Scenarios + +**Scenario 1: Documentation Update (No Release)** + +```bash +git commit -m "docs: update API reference" +# Merge to main → CI only, no release ``` -**For Contributors:** +**Scenario 2: Bug Fix (Patch Release)** + +```bash +git commit -m "fix: resolve memory leak in dataloader" +# Merged to main → later, maintainer runs cz bump → creates v1.6.2 tag → PyPI release +``` -Before starting new work, always pull the latest `develop`: +**Scenario 3: New Feature (Minor Release)** ```bash -# Pull latest develop (already synced automatically) -git checkout develop -git pull origin develop +git commit -m "feat(models): add TabNet architecture" +# Merged to main → later, maintainer runs cz bump → creates v1.7.0 tag → PyPI release +``` + +**Scenario 4: RC for risky feature** -# Create your feature branch -git checkout -b feature/my-new-feature +```bash +# Maintainer tags manually: +git tag -a v1.7.0rc1 -m "Release candidate v1.7.0rc1" +git push origin v1.7.0rc1 +# → PyPI pre-release, GitHub pre-release ``` -**Note:** 95% of the time, `develop` syncs automatically. If you see a PR titled "sync develop with master", it means manual conflict resolution is needed (maintainers handle this). +### Important Notes -Don't worry if the version seems "old" in your branch - semantic-release calculates the correct new version based on commits when merging to master. +- **Merging to `main` never triggers a PyPI release** +- **Only a manually pushed `v*` tag triggers publishing** +- **Never manually edit the version number** in `pyproject.toml` — use `cz bump` on a release branch +- **PyPI publishing** uses OIDC Trusted Publishing — no API tokens are stored in GitHub secrets -### Submitting Contributions +## Submitting Contributions When submitting your contributions, please ensure the following: @@ -234,11 +306,11 @@ When submitting your contributions, please ensure the following: - Update the documentation if necessary to reflect the changes made. - Ensure that your pull request has a single, logical focus. -### Issue Tracker +## Issue Tracker If you encounter any bugs, have feature requests, or need assistance, please visit our [Issue Tracker](https://github.com/OpenTabular/DeepTab/issues). Make sure to search for existing issues before creating a new one. -### License +## License By contributing to this project, you agree that your contributions will be licensed under the LICENSE of the project. Please note that the above guidelines are subject to change, and the project maintainers hold the right to reject or request modifications to any contributions. Thank you for your understanding and support in making this project better! diff --git a/docs/homepage.md b/docs/homepage.md index 43201091..f68d632c 100644 --- a/docs/homepage.md +++ b/docs/homepage.md @@ -1,9 +1,9 @@ # deeptab: Tabular Deep Learning Made Simple -deeptab is a Python library for tabular deep learning. It includes models that leverage the Mamba (State Space Model) architecture, as well as other popular models like TabTransformer, FTTransformer, TabM and tabular ResNets. Check out our paper `Mambular: A Sequential Model for Tabular Deep Learning`, available [here](https://arxiv.org/abs/2408.06291). Also check out our paper introducing [TabulaRNN](https://arxiv.org/pdf/2411.17207) and analyzing the efficiency of NLP inspired tabular models. - +deeptab is a Python library for tabular deep learning. It includes models that leverage the Mamba (State Space Model) architecture, as well as other popular models like TabTransformer, FTTransformer, TabM and tabular ResNets. Check out our paper `Mambular: A Sequential Model for Tabular Deep Learning`, available on [arXiv](https://arxiv.org/abs/2408.06291). Also check out our paper introducing [TabulaRNN](https://arxiv.org/pdf/2411.17207) and analyzing the efficiency of NLP inspired tabular models. # 🏃 Quickstart + Similar to any sklearn model, deeptab models can be fit as easy as this: ```python @@ -16,39 +16,37 @@ model.fit(X, y, max_epochs=150, lr=1e-04) ``` # 📖 Introduction -deeptab is a Python package that brings the power of advanced deep learning architectures to tabular data, offering a suite of models for regression, classification, and distributional regression tasks. Designed with ease of use in mind, deeptab models adhere to scikit-learn's `BaseEstimator` interface, making them highly compatible with the familiar scikit-learn ecosystem. This means you can fit, predict, and evaluate using deeptab models just as you would with any traditional scikit-learn model, but with the added performance and flexibility of deep learning. +deeptab is a Python package that brings the power of advanced deep learning architectures to tabular data, offering a suite of models for regression, classification, and distributional regression tasks. Designed with ease of use in mind, deeptab models adhere to scikit-learn's `BaseEstimator` interface, making them highly compatible with the familiar scikit-learn ecosystem. This means you can fit, predict, and evaluate using deeptab models just as you would with any traditional scikit-learn model, but with the added performance and flexibility of deep learning. # 🤖 Models -| Model | Description | -| ---------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | -| `Mambular` | A sequential model using Mamba blocks specifically designed for various tabular data tasks introduced [here](https://arxiv.org/abs/2408.06291). | -| `TabM` | Batch Ensembling for a MLP as introduced by [Gorishniy et al.](https://arxiv.org/abs/2410.24210) | -| `NODE` | Neural Oblivious Decision Ensembles as introduced by [Popov et al.](https://arxiv.org/abs/1909.06312) | -| `FTTransformer` | A model leveraging transformer encoders, as introduced by [Gorishniy et al.](https://arxiv.org/abs/2106.11959), for tabular data. | -| `MLP` | A classical Multi-Layer Perceptron (MLP) model for handling tabular data tasks. | -| `ResNet` | An adaptation of the ResNet architecture for tabular data applications. | -| `TabTransformer` | A transformer-based model for tabular data introduced by [Huang et al.](https://arxiv.org/abs/2012.06678), enhancing feature learning capabilities. | -| `MambaTab` | A tabular model using a Mamba-Block on a joint input representation described [here](https://arxiv.org/abs/2401.08867) . Not a sequential model. | -| `TabulaRNN` | A Recurrent Neural Network for Tabular data, introduced [here](https://arxiv.org/pdf/2411.17207). | -| `MambAttention` | A combination between Mamba and Transformers, also introduced [here](https://arxiv.org/pdf/2411.17207). | +| Model | Description | +| ---------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Mambular` | A sequential model using Mamba blocks specifically designed for various tabular data tasks introduced in [Thielmann et al. (2024)](https://arxiv.org/abs/2408.06291). | +| `TabM` | Batch Ensembling for a MLP as introduced by [Gorishniy et al. (2024)](https://arxiv.org/abs/2410.24210) | +| `NODE` | Neural Oblivious Decision Ensembles as introduced by [Popov et al.](https://arxiv.org/abs/1909.06312) | +| `FTTransformer` | A model leveraging transformer encoders, as introduced by [Gorishniy et al. (2021)](https://arxiv.org/abs/2106.11959), for tabular data. | +| `MLP` | A classical Multi-Layer Perceptron (MLP) model for handling tabular data tasks. | +| `ResNet` | An adaptation of the ResNet architecture for tabular data applications. | +| `TabTransformer` | A transformer-based model for tabular data introduced by [Huang et al.](https://arxiv.org/abs/2012.06678), enhancing feature learning capabilities. | +| `MambaTab` | A tabular model using a Mamba-Block on a joint input representation described in [Ahamed et al.](https://arxiv.org/abs/2401.08867). Not a sequential model. | +| `TabulaRNN` | A Recurrent Neural Network for Tabular data, introduced in [Thielmann et al. (2025)](https://arxiv.org/pdf/2411.17207). | +| `MambAttention` | A combination between Mamba and Transformers, also introduced in [Thielmann et al. (2025)](https://arxiv.org/pdf/2411.17207). | | `NDTF` | A neural decision forest using soft decision trees. See [Kontschieder et al.](https://openaccess.thecvf.com/content_iccv_2015/html/Kontschieder_Deep_Neural_Decision_ICCV_2015_paper.html) for inspiration. | -| `SAINT` | Improve neural networs via Row Attention and Contrastive Pre-Training, introduced [here](https://arxiv.org/pdf/2106.01342). | - - +| `SAINT` | Improve neural networks via Row Attention and Contrastive Pre-Training, introduced by [Somepalli et al.](https://arxiv.org/pdf/2106.01342). | All models are available for `regression`, `classification` and distributional regression, denoted by `LSS`. Hence, they are available as e.g. `MambularRegressor`, `MambularClassifier` or `MambularLSS` - # 📚 Documentation -You can find the deeptab API documentation [here](https://deeptab.readthedocs.io/en/latest/). +You can find the deeptab API documentation on [Read the Docs](https://deeptab.readthedocs.io/en/latest/). # 🛠️ Installation Install deeptab using pip: + ```sh pip install deeptab ``` @@ -74,19 +72,16 @@ deeptab simplifies data preprocessing with a range of tools designed for easy tr

Data Type Detection and Transformation

-- **Ordinal & One-Hot Encoding**: Automatically transforms categorical data into numerical formats using continuous ordinal encoding or one-hot encoding. Includes options for transforming outputs to `float` for compatibility with downstream models. -- **Binning**: Discretizes numerical features into bins, with support for both fixed binning strategies and optimal binning derived from decision tree models. -- **MinMax**: Scales numerical data to a specific range, such as [-1, 1], using Min-Max scaling or similar techniques. -- **Standardization**: Centers and scales numerical features to have a mean of zero and unit variance for better compatibility with certain models. -- **Quantile Transformations**: Normalizes numerical data to follow a uniform or normal distribution, handling distributional shifts effectively. -- **Spline Transformations**: Captures nonlinearity in numerical features using spline-based transformations, ideal for complex relationships. -- **Piecewise Linear Encodings (PLE)**: Captures complex numerical patterns by applying piecewise linear encoding, suitable for data with periodic or nonlinear structures. -- **Polynomial Features**: Automatically generates polynomial and interaction terms for numerical features, enhancing the ability to capture higher-order relationships. -- **Box-Cox & Yeo-Johnson Transformations**: Performs power transformations to stabilize variance and normalize distributions. -- **Custom Binning**: Enables user-defined bin edges for precise discretization of numerical data. - - - +- **Ordinal & One-Hot Encoding**: Automatically transforms categorical data into numerical formats using continuous ordinal encoding or one-hot encoding. Includes options for transforming outputs to `float` for compatibility with downstream models. +- **Binning**: Discretizes numerical features into bins, with support for both fixed binning strategies and optimal binning derived from decision tree models. +- **MinMax**: Scales numerical data to a specific range, such as [-1, 1], using Min-Max scaling or similar techniques. +- **Standardization**: Centers and scales numerical features to have a mean of zero and unit variance for better compatibility with certain models. +- **Quantile Transformations**: Normalizes numerical data to follow a uniform or normal distribution, handling distributional shifts effectively. +- **Spline Transformations**: Captures nonlinearity in numerical features using spline-based transformations, ideal for complex relationships. +- **Piecewise Linear Encodings (PLE)**: Captures complex numerical patterns by applying piecewise linear encoding, suitable for data with periodic or nonlinear structures. +- **Polynomial Features**: Automatically generates polynomial and interaction terms for numerical features, enhancing the ability to capture higher-order relationships. +- **Box-Cox & Yeo-Johnson Transformations**: Performs power transformations to stabilize variance and normalize distributions. +- **Custom Binning**: Enables user-defined bin edges for precise discretization of numerical data.

Fit a Model

@@ -124,8 +119,8 @@ Since all of the models are sklearn base estimators, you can use the built-in hy from sklearn.model_selection import RandomizedSearchCV param_dist = { - 'd_model': randint(32, 128), - 'n_layers': randint(2, 10), + 'd_model': randint(32, 128), + 'n_layers': randint(2, 10), 'lr': uniform(1e-5, 1e-3) } @@ -148,14 +143,12 @@ print("Best Parameters:", random_search.best_params_) print("Best Score:", random_search.best_score_) ``` - -**Note:** that using this, you can also optimize the preprocessing. Just use the prefix ``prepro__`` when specifying the preprocessor arguments you want to optimize: - +**Note:** that using this, you can also optimize the preprocessing. Just use the prefix `prepro__` when specifying the preprocessor arguments you want to optimize: ```python param_dist = { - 'd_model': randint(32, 128), - 'n_layers': randint(2, 10), + 'd_model': randint(32, 128), + 'n_layers': randint(2, 10), 'lr': uniform(1e-5, 1e-3), "prepro__numerical_preprocessing": ["ple", "standardization", "box-cox"] } @@ -164,15 +157,13 @@ param_dist = { Since we have early stopping integrated and return the best model with respect to the validation loss, setting max_epochs to a large number is sensible. - Or use the built-in bayesian hpo simply by running: ```python best_params = model.optimize_hparams(X, y) ``` -This automatically sets the search space based on the default config from ``deeptab.configs``. See the documentation for all params with regard to ``optimize_hparams()``. However, the preprocessor arguments are fixed and cannot be optimized here. - +This automatically sets the search space based on the default config from `deeptab.configs`. See the documentation for all params with regard to `optimize_hparams()`. However, the preprocessor arguments are fixed and cannot be optimized here.

⚖️ Distributional Regression with MambularLSS

@@ -200,7 +191,6 @@ MambularLSS allows you to model the full distribution of a response variable, no These distribution classes make MambularLSS versatile in modeling various data types and distributions. -

Getting Started with MambularLSS:

To integrate distributional regression into your workflow with `MambularLSS`, start by initializing the model with your desired configuration, similar to other deeptab models: @@ -222,13 +212,12 @@ model.fit( y, max_epochs=150, lr=1e-04, - patience=10, + patience=10, family="normal" # define your distribution ) ``` - # 💻 Implement Your Own Model deeptab allows users to easily integrate their custom models into the existing logic. This process is designed to be straightforward, making it simple to create a PyTorch model and define its forward pass. Instead of inheriting from `nn.Module`, you inherit from deeptab's `BaseModel`. Each deeptab model takes three main arguments: the number of classes (e.g., 1 for regression or 2 for binary classification), `cat_feature_info`, and `num_feature_info` for categorical and numerical feature information, respectively. Additionally, you can provide a config argument, which can either be a custom configuration or one of the provided default configs. @@ -343,7 +332,7 @@ for epoch in range(10): # Number of epochs # Dummy Data num_features = [torch.randn(32, 1) for _ in num_feature_info] cat_features = [torch.randint(0, 5, (32,)) for _ in cat_feature_info] - labels = torch.randn(32, num_classes) + labels = torch.randn(32, num_classes) # Forward pass outputs = model(num_features, cat_features) @@ -361,6 +350,7 @@ for epoch in range(10): # Number of epochs # 🏷️ Citation If you find this project useful in your research, please consider cite: + ```BibTeX @article{thielmann2024mambular, title={Mambular: A Sequential Model for Tabular Deep Learning}, @@ -371,6 +361,7 @@ If you find this project useful in your research, please consider cite: ``` If you use TabulaRNN please consider to cite: + ```BibTeX @article{thielmann2024efficiency, title={On the Efficiency of NLP-Inspired Methods for Tabular Deep Learning}, diff --git a/docs/installation.md b/docs/installation.md index 43647db3..0bf4f222 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -1,8 +1,8 @@ -## Installation +# Installation Please follow the steps below for installing `deeptab`. -### Install from the source: +## Install from the source: ```bash cd deeptab @@ -12,7 +12,8 @@ poetry install Note: Make sure you in the same directory where `pyproject.toml` file resides. -### Installation from PyPi: +## Installation from PyPi: + The package is available on PyPi. You can install it using the following command: ```bash diff --git a/docs/release.md b/docs/release.md index 07a8f0ea..b685a7be 100644 --- a/docs/release.md +++ b/docs/release.md @@ -3,6 +3,7 @@ The document outlines the steps to build and release the `deeptab` package. At this point, it is assumed that the development and testing of the package have been completed successfully. ## 1. Test documentation + It is expected from the contributor to update the documentation as an when required along side the change in source code. Please use the below process to test the documentation: ```sh @@ -10,27 +11,67 @@ cd deeptab/docs/ make doctest ``` + Fix any docstring related issue, then proceed with next steps. ## 2. Version update -The package version is mantained in `deeptab/__version__.py` and `pyproject.toml` file. Increment the version according to the changes such as patch, minor, major or all. -- The version number should be in the format `major.minor.patch`. For example, `1.0.1`. +The package version is maintained in `pyproject.toml` only. The version is read at runtime via `importlib.metadata`. + +On a `release/vX.Y.Z` branch, run: + +```bash +cz bump +``` + +This will: + +- Determine the next version from conventional commits since the last tag +- Update the version in `pyproject.toml` +- Update `CHANGELOG.md` +- Create a local commit `bump: version X.Y.Z-1 → X.Y.Z` + +The version number follows the format `major.minor.patch`. For example, `1.0.1`. -**Note:** Don't forget to update the version in the `pyproject.toml` file as well. +## 3. Release PR + +- Open a PR from `release/vX.Y.Z` to `main` +- After review and approval, merge the PR +- **Merging to `main` does NOT trigger a PyPI release** + +## 4. Create and push the Git tag + +After the release PR is merged, the maintainer creates the release tag: + +```bash +git checkout main && git pull +git tag -a vX.Y.Z -m "Release vX.Y.Z" +git push origin vX.Y.Z +``` + +For a release candidate: + +```bash +git tag -a vX.Y.Zrc1 -m "Release candidate vX.Y.Zrc1" +git push origin vX.Y.Zrc1 +``` +## 5. Publish package to PyPI -## 3. Release +The tag push automatically triggers the appropriate workflow in GitHub Actions: -- Create a pull request from your `feature` branch to the `develop` branch. -- Once the pull request is approved and merged to develop. The maintainer will test the package and documentation. If everything is fine, the maintainer will proceed further to merge the changed to `master` and `release` branch. -- Ideally content of `master` and `release` branch should be same. The `release` branch is used to publish the package to PyPi while `master` branch is used to publish the documentation to readthedocs and can be accesseed at [deeptab.readthedocs.io](https://deeptab.readthedocs.io/en/latest/). +- Stable tag (`vX.Y.Z`) → `publish-pypi.yml` → publishes to **PyPI** + creates GitHub Release +- RC tag (`vX.Y.Zrc1`) → `publish-testpypi.yml` → publishes to **TestPyPI** + creates GitHub pre-release +Both workflows: -## 4. Publish package to PyPi +- Build the package (`poetry build`) +- Validate with `twine check` +- Publish via **OIDC Trusted Publishing** (no API tokens required) +- Create a GitHub Release with auto-generated notes -The package is published to PyPi using GitHub Actions. The workflow is triggered when a new tag is pushed to the repository. The workflow will build the package, upload it to PyPi. +> **Note:** A `pypi-publish` GitHub Environment (for stable) and `testpypi-publish` environment (for RCs) must be configured with tag-based deployment protection rules. -## 5. GitHub Release +## 6. GitHub Release -Create a new release on GitHub with the version number and release notes. The release notes should include the changes made in the release. +The GitHub Release is created automatically by the publish workflow. Verify the release notes are correct and add any manual context if needed. diff --git a/docs/requirements_docs.txt b/docs/requirements_docs.txt index e724a314..be40c213 100644 --- a/docs/requirements_docs.txt +++ b/docs/requirements_docs.txt @@ -1,3 +1,4 @@ +setuptools sphinx==6.2.1 sphinx-autodoc-typehints==1.23.0 numpydoc==1.7.0 diff --git a/justfile b/justfile index cd4e8fa3..65938f3e 100644 --- a/justfile +++ b/justfile @@ -1,20 +1,20 @@ # list recipes -defaut: +default: @just --list --unsorted -# install dependencies +# install dependencies and set up all pre-commit hooks install: poetry install - poetry run pre-commit install --hook-type commit-msg --hook-type pre-commit + poetry run pre-commit install --hook-type commit-msg --hook-type pre-commit --hook-type pre-push -# update dependencies +# update dependencies and pre-commit hook revisions update: poetry update poetry run pre-commit autoupdate -# update the poetry.lock file if the pyproject.toml file has been updated +# regenerate poetry.lock without upgrading dependencies lock: - poetry lock + poetry lock --no-update # remove Python file artifacts clean: @@ -23,16 +23,33 @@ clean: find . -name '*~' -exec rm -f {} + find . -name '__pycache__' -exec rm -fr {} + - -# run ruff linting and fix all fixable linting errors +# run ruff linting and fix all fixable errors lint: poetry run ruff check --fix . -# run ruff formatter to format all files +# run docformatter and ruff formatter format: - poetry run docformatter --in-place --recursive --wrap-summaries 120 --wrap-descriptions 120 . poetry run ruff format . +# run pyright type checking +typecheck: + poetry run pyright + +# run tests with coverage +test: + poetry run pytest --cov=deeptab tests/ + +# build HTML docs locally (warnings treated as errors) +docs: + poetry run sphinx-build -b html docs/ docs/_build/html -W --keep-going + +# run all pre-commit hooks on all files (commit + push stage) +# if ruff-format modifies files, stage and commit them before pushing: +# git add -u && git commit -m "style: apply ruff formatting" +check: + poetry run pre-commit run --all-files + poetry run pre-commit run --all-files --hook-stage push + # create a conventional commit using commitizen commit: poetry run cz commit diff --git a/poetry.lock b/poetry.lock index 9bc92864..a21a389e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -32,6 +32,25 @@ test-prod = ["parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "py test-trackers = ["comet-ml", "dvclive", "tensorboard", "wandb"] testing = ["bitsandbytes", "datasets", "diffusers", "evaluate", "parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist", "scikit-learn", "scipy", "timm", "torchdata (>=0.8.0)", "torchpippy (>=0.2.0)", "tqdm", "transformers"] +[[package]] +name = "accessible-pygments" +version = "0.0.5" +description = "A collection of accessible pygments styles" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "accessible_pygments-0.0.5-py3-none-any.whl", hash = "sha256:88ae3211e68a1d0b011504b2ffc1691feafce124b845bd072ab6f9f66f34d4b7"}, + {file = "accessible_pygments-0.0.5.tar.gz", hash = "sha256:40918d3e6a2b619ad424cb91e556bd3bd8865443d9f22f1dcdf79e33c8046872"}, +] + +[package.dependencies] +pygments = ">=1.5" + +[package.extras] +dev = ["pillow", "pkginfo (>=1.10)", "playwright", "pre-commit", "setuptools", "twine (>=5.0)"] +tests = ["hypothesis", "pytest"] + [[package]] name = "aiohappyeyeballs" version = "2.6.1" @@ -170,15 +189,15 @@ frozenlist = ">=1.1.0" typing-extensions = {version = ">=4.2", markers = "python_version < \"3.13\""} [[package]] -name = "annotated-types" -version = "0.7.0" -description = "Reusable constraint types to use with typing.Annotated" +name = "alabaster" +version = "0.7.16" +description = "A light, configurable Sphinx theme" optional = false -python-versions = ">=3.8" -groups = ["dev"] +python-versions = ">=3.9" +groups = ["docs"] files = [ - {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, - {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, + {file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"}, + {file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"}, ] [[package]] @@ -196,6 +215,22 @@ files = [ [package.extras] test = ["coverage", "mypy", "pexpect", "ruff", "wheel"] +[[package]] +name = "asttokens" +version = "3.0.1" +description = "Annotate AST trees with source code positions" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "asttokens-3.0.1-py3-none-any.whl", hash = "sha256:15a3ebc0f43c2d0a50eeafea25e19046c68398e487b9f1f5b517f7c0f40f976a"}, + {file = "asttokens-3.0.1.tar.gz", hash = "sha256:71a4ee5de0bde6a31d64f6b13f2293ac190344478f081c3d1bccfcf5eacb0cb7"}, +] + +[package.extras] +astroid = ["astroid (>=2,<5)"] +test = ["astroid (>=2,<5)", "pytest (<9.0)", "pytest-cov", "pytest-xdist"] + [[package]] name = "async-timeout" version = "5.0.1" @@ -215,7 +250,7 @@ version = "25.1.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" -groups = ["main"] +groups = ["main", "docs"] files = [ {file = "attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a"}, {file = "attrs-25.1.0.tar.gz", hash = "sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e"}, @@ -229,18 +264,189 @@ docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphi tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] +[[package]] +name = "babel" +version = "2.18.0" +description = "Internationalization utilities" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "babel-2.18.0-py3-none-any.whl", hash = "sha256:e2b422b277c2b9a9630c1d7903c2a00d0830c409c59ac8cae9081c92f1aeba35"}, + {file = "babel-2.18.0.tar.gz", hash = "sha256:b80b99a14bd085fcacfa15c9165f651fbb3406e66cc603abf11c5750937c992d"}, +] + +[package.extras] +dev = ["backports.zoneinfo ; python_version < \"3.9\"", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata ; sys_platform == \"win32\""] + +[[package]] +name = "backports-tarfile" +version = "1.2.0" +description = "Backport of CPython tarfile module" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.12\" and platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" +files = [ + {file = "backports.tarfile-1.2.0-py3-none-any.whl", hash = "sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34"}, + {file = "backports_tarfile-1.2.0.tar.gz", hash = "sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["jaraco.test", "pytest (!=8.0.*)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)"] + +[[package]] +name = "beautifulsoup4" +version = "4.14.3" +description = "Screen-scraping library" +optional = false +python-versions = ">=3.7.0" +groups = ["docs"] +files = [ + {file = "beautifulsoup4-4.14.3-py3-none-any.whl", hash = "sha256:0918bfe44902e6ad8d57732ba310582e98da931428d231a5ecb9e7c703a735bb"}, + {file = "beautifulsoup4-4.14.3.tar.gz", hash = "sha256:6292b1c5186d356bba669ef9f7f051757099565ad9ada5dd630bd9de5fa7fb86"}, +] + +[package.dependencies] +soupsieve = ">=1.6.1" +typing-extensions = ">=4.0.0" + +[package.extras] +cchardet = ["cchardet"] +chardet = ["chardet"] +charset-normalizer = ["charset-normalizer"] +html5lib = ["html5lib"] +lxml = ["lxml"] + +[[package]] +name = "bleach" +version = "6.3.0" +description = "An easy safelist-based HTML-sanitizing tool." +optional = false +python-versions = ">=3.10" +groups = ["docs"] +files = [ + {file = "bleach-6.3.0-py3-none-any.whl", hash = "sha256:fe10ec77c93ddf3d13a73b035abaac7a9f5e436513864ccdad516693213c65d6"}, + {file = "bleach-6.3.0.tar.gz", hash = "sha256:6f3b91b1c0a02bb9a78b5a454c92506aa0fdf197e1d5e114d2e00c6f64306d22"}, +] + +[package.dependencies] +webencodings = "*" + +[package.extras] +css = ["tinycss2 (>=1.1.0,<1.5)"] + [[package]] name = "certifi" version = "2025.1.31" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" -groups = ["main", "dev"] +groups = ["main", "dev", "docs"] files = [ {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, ] +[[package]] +name = "cffi" +version = "2.0.0" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.9" +groups = ["dev", "docs"] +files = [ + {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"}, + {file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb"}, + {file = "cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a"}, + {file = "cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743"}, + {file = "cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5"}, + {file = "cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5"}, + {file = "cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187"}, + {file = "cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18"}, + {file = "cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5"}, + {file = "cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b"}, + {file = "cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27"}, + {file = "cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75"}, + {file = "cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1"}, + {file = "cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f"}, + {file = "cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25"}, + {file = "cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4"}, + {file = "cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e"}, + {file = "cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6"}, + {file = "cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322"}, + {file = "cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a"}, + {file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"}, + {file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"}, +] +markers = {dev = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and sys_platform == \"linux\" and platform_python_implementation != \"PyPy\"", docs = "implementation_name == \"pypy\""} + +[package.dependencies] +pycparser = {version = "*", markers = "implementation_name != \"PyPy\""} + [[package]] name = "cfgv" version = "3.4.0" @@ -259,7 +465,7 @@ version = "3.4.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" -groups = ["main", "dev"] +groups = ["main", "dev", "docs"] files = [ {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, @@ -355,54 +561,18 @@ files = [ {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, ] -[[package]] -name = "click" -version = "8.3.1" -description = "Composable command line interface toolkit" -optional = false -python-versions = ">=3.10" -groups = ["dev"] -files = [ - {file = "click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6"}, - {file = "click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[[package]] -name = "click-option-group" -version = "0.5.9" -description = "Option groups missing in Click" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "click_option_group-0.5.9-py3-none-any.whl", hash = "sha256:ad2599248bd373e2e19bec5407967c3eec1d0d4fc4a5e77b08a0481e75991080"}, - {file = "click_option_group-0.5.9.tar.gz", hash = "sha256:f94ed2bc4cf69052e0f29592bd1e771a1789bd7bfc482dd0bc482134aff95823"}, -] - -[package.dependencies] -click = ">=7.0" - -[package.extras] -dev = ["pre-commit", "pytest"] -docs = ["m2r2", "pallets-sphinx-themes", "sphinx"] -test = ["pytest"] -test-cov = ["pytest", "pytest-cov"] - [[package]] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["main", "dev"] +groups = ["main", "dev", "docs"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -markers = {main = "platform_system == \"Windows\""} +markers = {main = "platform_system == \"Windows\"", docs = "sys_platform == \"win32\""} [[package]] name = "commitizen" @@ -507,6 +677,73 @@ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.1 [package.extras] toml = ["tomli ; python_full_version <= \"3.11.0a6\""] +[[package]] +name = "cryptography" +version = "47.0.0" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = "!=3.9.0,!=3.9.1,>=3.8" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and sys_platform == \"linux\"" +files = [ + {file = "cryptography-47.0.0-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:160ad728f128972d362e714054f6ba0067cab7fb350c5202a9ae8ae4ce3ef1a0"}, + {file = "cryptography-47.0.0-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b9a8943e359b7615db1a3ba587994618e094ff3d6fa5a390c73d079ce18b3973"}, + {file = "cryptography-47.0.0-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f5c15764f261394b22aef6b00252f5195f46f2ca300bec57149474e2538b31f8"}, + {file = "cryptography-47.0.0-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:9c59ab0e0fa3a180a5a9c59f3a5abe3ef90d474bc56d7fadfbe80359491b615b"}, + {file = "cryptography-47.0.0-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:34b4358b925a5ea3e14384ca781a2c0ef7ac219b57bb9eacc4457078e2b19f92"}, + {file = "cryptography-47.0.0-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0024b87d47ae2399165a6bfb20d24888881eeab83ae2566d62467c5ff0030ce7"}, + {file = "cryptography-47.0.0-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:1e47422b5557bb82d3fff997e8d92cff4e28b9789576984f08c248d2b3535d93"}, + {file = "cryptography-47.0.0-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:6f29f36582e6151d9686235e586dd35bb67491f024767d10b842e520dc6a07ac"}, + {file = "cryptography-47.0.0-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:a9b761f012a943b7de0e828843c5688d0de94a0578d44d6c85a1bae32f87791f"}, + {file = "cryptography-47.0.0-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4e1de79e047e25d6e9f8cea71c86b4a53aced64134f0f003bbcbf3655fd172c8"}, + {file = "cryptography-47.0.0-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef6b3634087f18d2155b1e8ce264e5345a753da2c5fa9815e7d41315c90f8318"}, + {file = "cryptography-47.0.0-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:11dbb9f50a0f1bb9757b3d8c27c1101780efb8f0bdecfb12439c22a74d64c001"}, + {file = "cryptography-47.0.0-cp311-abi3-win32.whl", hash = "sha256:7fda2f02c9015db3f42bb8a22324a454516ed10a8c29ca6ece6cdbb5efe2a203"}, + {file = "cryptography-47.0.0-cp311-abi3-win_amd64.whl", hash = "sha256:f5c3296dab66202f1b18a91fa266be93d6aa0c2806ea3d67762c69f60adc71aa"}, + {file = "cryptography-47.0.0-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:be12cb6a204f77ed968bcefe68086eb061695b540a3dd05edac507a3111b25f0"}, + {file = "cryptography-47.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2ebd84adf0728c039a3be2700289378e1c164afc6748df1a5ed456767bef9ba7"}, + {file = "cryptography-47.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7f68d6fbc7fbbcfb0939fea72c3b96a9f9a6edfc0e1b1d29778a2066030418b1"}, + {file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:6651d32eff255423503aa276739da98c30f26c40cbeffcc6048e0d54ef704c0c"}, + {file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:3fb8fa48075fad7193f2e5496135c6a76ac4b2aa5a38433df0a539296b377829"}, + {file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:11438c7518132d95f354fa01a4aa2f806d172a061a7bed18cf18cbdacdb204d7"}, + {file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:8c1a736bbb3288005796c3f7ccb9453360d7fed483b13b9f468aea5171432923"}, + {file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:f1557695e5c2b86e204f6ce9470497848634100787935ab7adc5397c54abd7ab"}, + {file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:f9a034b642b960767fb343766ae5ba6ad653f2e890ddd82955aef288ffea8736"}, + {file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:b1c76fca783aa7698eb21eb14f9c4aa09452248ee54a627d125025a43f83e7a7"}, + {file = "cryptography-47.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:4f7722c97826770bab8ae92959a2e7b20a5e9e9bf4deae68fd86c3ca457bab52"}, + {file = "cryptography-47.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:09f6d7bf6724f8db8b32f11eccf23efc8e759924bc5603800335cf8859a3ddbd"}, + {file = "cryptography-47.0.0-cp314-cp314t-win32.whl", hash = "sha256:6eebcaf0df1d21ce1f90605c9b432dd2c4f4ab665ac29a40d5e3fc68f51b5e63"}, + {file = "cryptography-47.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:51c9313e90bd1690ec5a75ed047c27c0b8e6c570029712943d6116ef9a90620b"}, + {file = "cryptography-47.0.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:14432c8a9bcb37009784f9594a62fae211a2ae9543e96c92b2a8e4c3cd5cd0c4"}, + {file = "cryptography-47.0.0-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:07efe86201817e7d3c18781ca9770bc0db04e1e48c994be384e4602bc38f8f27"}, + {file = "cryptography-47.0.0-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2b45761c6ec22b7c726d6a829558777e32d0f1c8be7c3f3480f9c912d5ee8a10"}, + {file = "cryptography-47.0.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:edd4da498015da5b9f26d38d3bfc2e90257bfa9cbed1f6767c282a0025ae649b"}, + {file = "cryptography-47.0.0-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:9af828c0d5a65c70ec729cd7495a4bf1a67ecb66417b8f02ff125ab8a6326a74"}, + {file = "cryptography-47.0.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:256d07c78a04d6b276f5df935a9923275f53bd1522f214447fdf365494e2d515"}, + {file = "cryptography-47.0.0-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:5d0e362ff51041b0c0d219cc7d6924d7b8996f57ce5712bdcef71eb3c65a59cc"}, + {file = "cryptography-47.0.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:1581aef4219f7ca2849d0250edaa3866212fb74bf5667284f46aa92f9e65c1ca"}, + {file = "cryptography-47.0.0-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:a49a3eb5341b9503fa3000a9a0db033161db90d47285291f53c2a9d2cd1b7f76"}, + {file = "cryptography-47.0.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:2207a498b03275d0051589e326b79d4cf59985c99031b05bb292ac52631c37fe"}, + {file = "cryptography-47.0.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:7a02675e2fabd0c0fc04c868b8781863cbf1967691543c22f5470500ff840b31"}, + {file = "cryptography-47.0.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80887c5cbd1774683cb126f0ab4184567f080071d5acf62205acb354b4b753b7"}, + {file = "cryptography-47.0.0-cp38-abi3-win32.whl", hash = "sha256:ed67ea4e0cfb5faa5bc7ecb6e2b8838f3807a03758eec239d6c21c8769355310"}, + {file = "cryptography-47.0.0-cp38-abi3-win_amd64.whl", hash = "sha256:835d2d7f47cdc53b3224e90810fb1d36ca94ea29cc1801fb4c1bc43876735769"}, + {file = "cryptography-47.0.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f1207974a904e005f762869996cf620e9bf79ecb4622f148550bb48e0eb35a7"}, + {file = "cryptography-47.0.0-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:1a405c08857258c11016777e11c02bacbe7ef596faf259305d282272a3a05cbe"}, + {file = "cryptography-47.0.0-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:20fdbe3e38fb67c385d233c89371fa27f9909f6ebca1cecc20c13518dae65475"}, + {file = "cryptography-47.0.0-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:f7db373287273d8af1414cf95dc4118b13ffdc62be521997b0f2b270771fef50"}, + {file = "cryptography-47.0.0-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:9fe6b7c64926c765f9dff301f9c1b867febcda5768868ca084e18589113732ab"}, + {file = "cryptography-47.0.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:cffbba3392df0fa8629bb7f43454ee2925059ee158e23c54620b9063912b86c8"}, + {file = "cryptography-47.0.0.tar.gz", hash = "sha256:9f8e55fe4e63613a5e1cc5819030f27b97742d720203a087802ce4ce9ceb52bb"}, +] + +[package.dependencies] +cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9.0\" and platform_python_implementation != \"PyPy\""} +typing-extensions = {version = ">=4.13.2", markers = "python_full_version < \"3.11.0\""} + +[package.extras] +ssh = ["bcrypt (>=3.1.5)"] + [[package]] name = "decli" version = "0.6.3" @@ -520,22 +757,28 @@ files = [ ] [[package]] -name = "deprecated" -version = "1.3.1" -description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +name = "decorator" +version = "5.2.1" +description = "Decorators for Humans" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" -groups = ["dev"] +python-versions = ">=3.8" +groups = ["docs"] files = [ - {file = "deprecated-1.3.1-py2.py3-none-any.whl", hash = "sha256:597bfef186b6f60181535a29fbe44865ce137a5079f295b479886c82729d5f3f"}, - {file = "deprecated-1.3.1.tar.gz", hash = "sha256:b1b50e0ff0c1fddaa5708a2c6b0a6588bb09b892825ab2b214ac9ea9d92a5223"}, + {file = "decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a"}, + {file = "decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360"}, ] -[package.dependencies] -wrapt = ">=1.10,<3" - -[package.extras] -dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"] +[[package]] +name = "defusedxml" +version = "0.7.1" +description = "XML bomb protection for Python stdlib modules" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["docs"] +files = [ + {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, + {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, +] [[package]] name = "distlib" @@ -569,15 +812,15 @@ untokenize = ">=0.1.1,<0.2.0" tomli = ["tomli (>=2.0.0,<3.0.0) ; python_version < \"3.11\""] [[package]] -name = "dotty-dict" -version = "1.3.1" -description = "Dictionary wrapper for quick access to deeply nested keys." +name = "docutils" +version = "0.19" +description = "Docutils -- Python Documentation Utilities" optional = false -python-versions = ">=3.5,<4.0" -groups = ["dev"] +python-versions = ">=3.7" +groups = ["dev", "docs"] files = [ - {file = "dotty_dict-1.3.1-py3-none-any.whl", hash = "sha256:5022d234d9922f13aa711b4950372a06a6d64cb6d6db9ba43d0ba133ebfce31f"}, - {file = "dotty_dict-1.3.1.tar.gz", hash = "sha256:4b016e03b8ae265539757a53eba24b9bfda506fb94fbce0bee843c6f05541a15"}, + {file = "docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc"}, + {file = "docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6"}, ] [[package]] @@ -592,13 +835,25 @@ files = [ {file = "einops-0.8.1.tar.gz", hash = "sha256:de5d960a7a761225532e0f1959e5315ebeafc0cd43394732f103ca44b9837e84"}, ] +[[package]] +name = "entrypoints" +version = "0.4" +description = "Discover and load entry points from installed packages." +optional = false +python-versions = ">=3.6" +groups = ["docs"] +files = [ + {file = "entrypoints-0.4-py3-none-any.whl", hash = "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f"}, + {file = "entrypoints-0.4.tar.gz", hash = "sha256:b706eddaa9218a19ebcd67b56818f05bb27589b1ca9e8d797b74affad4ccacd4"}, +] + [[package]] name = "exceptiongroup" version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" -groups = ["dev"] +groups = ["dev", "docs"] markers = "python_version == \"3.10\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, @@ -608,6 +863,36 @@ files = [ [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "executing" +version = "2.2.1" +description = "Get the currently executing AST node of a frame, and other information" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017"}, + {file = "executing-2.2.1.tar.gz", hash = "sha256:3632cc370565f6648cc328b32435bd120a1e4ebb20c77e3fdde9a13cd1e533c4"}, +] + +[package.extras] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich ; python_version >= \"3.11\""] + +[[package]] +name = "fastjsonschema" +version = "2.21.2" +description = "Fastest Python implementation of JSON schema" +optional = false +python-versions = "*" +groups = ["docs"] +files = [ + {file = "fastjsonschema-2.21.2-py3-none-any.whl", hash = "sha256:1c797122d0a86c5cace2e54bf4e819c36223b552017172f32c5c024a6b77e463"}, + {file = "fastjsonschema-2.21.2.tar.gz", hash = "sha256:b1eb43748041c880796cd077f1a07c3d94e93ae84bba5ed36800a33554ae05de"}, +] + +[package.extras] +devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] + [[package]] name = "filelock" version = "3.17.0" @@ -770,40 +1055,6 @@ test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask[dataframe,test]", "moto test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] tqdm = ["tqdm"] -[[package]] -name = "gitdb" -version = "4.0.12" -description = "Git Object Database" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf"}, - {file = "gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571"}, -] - -[package.dependencies] -smmap = ">=3.0.1,<6" - -[[package]] -name = "gitpython" -version = "3.1.45" -description = "GitPython is a Python library used to interact with Git repositories" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "gitpython-3.1.45-py3-none-any.whl", hash = "sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77"}, - {file = "gitpython-3.1.45.tar.gz", hash = "sha256:85b0ee964ceddf211c41b9f27a49086010a190fd8132a24e21f362a4b36a791c"}, -] - -[package.dependencies] -gitdb = ">=4.0.1,<5" - -[package.extras] -doc = ["sphinx (>=7.1.2,<7.2)", "sphinx-autodoc-typehints", "sphinx_rtd_theme"] -test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock ; python_version < \"3.8\"", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions ; python_version < \"3.11\""] - [[package]] name = "huggingface-hub" version = "0.29.1" @@ -839,6 +1090,26 @@ testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gr torch = ["safetensors[torch]", "torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] +[[package]] +name = "id" +version = "1.6.1" +description = "A tool for generating OIDC identities" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "id-1.6.1-py3-none-any.whl", hash = "sha256:f5ec41ed2629a508f5d0988eda142e190c9c6da971100612c4de9ad9f9b237ca"}, + {file = "id-1.6.1.tar.gz", hash = "sha256:d0732d624fb46fd4e7bc4e5152f00214450953b9e772c182c1c22964def1a069"}, +] + +[package.dependencies] +urllib3 = ">=2,<3" + +[package.extras] +dev = ["build", "bump (>=1.3.2)", "id[lint,test]"] +lint = ["bandit", "interrogate", "mypy", "ruff (<0.14.15)"] +test = ["coverage[toml]", "pretend", "pytest", "pytest-cov"] + [[package]] name = "identify" version = "2.6.8" @@ -860,7 +1131,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" -groups = ["main", "dev"] +groups = ["main", "dev", "docs"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -870,24 +1141,41 @@ files = [ all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] [[package]] -name = "importlib-resources" -version = "6.5.2" -description = "Read resources from Python packages" +name = "imagesize" +version = "2.0.0" +description = "Get image size from headers (BMP/PNG/JPEG/JPEG2000/GIF/TIFF/SVG/Netpbm/WebP/AVIF/HEIC/HEIF)" optional = false -python-versions = ">=3.9" +python-versions = "<3.15,>=3.10" +groups = ["docs"] +files = [ + {file = "imagesize-2.0.0-py2.py3-none-any.whl", hash = "sha256:5667c5bbb57ab3f1fa4bc366f4fbc971db3d5ed011fd2715fd8001f782718d96"}, + {file = "imagesize-2.0.0.tar.gz", hash = "sha256:8e8358c4a05c304f1fccf7ff96f036e7243a189e9e42e90851993c558cfe9ee3"}, +] + +[[package]] +name = "importlib-metadata" +version = "9.0.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.10" groups = ["dev"] +markers = "python_version < \"3.12\" and platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" files = [ - {file = "importlib_resources-6.5.2-py3-none-any.whl", hash = "sha256:789cfdc3ed28c78b67a06acb8126751ced69a3d5f79c095a98298cd8a760ccec"}, - {file = "importlib_resources-6.5.2.tar.gz", hash = "sha256:185f87adef5bcc288449d98fb4fba07cea78bc036455dd44c5fc4a2fe78fed2c"}, + {file = "importlib_metadata-9.0.0-py3-none-any.whl", hash = "sha256:2d21d1cc5a017bd0559e36150c21c830ab1dc304dedd1b7ea85d20f45ef3edd7"}, + {file = "importlib_metadata-9.0.0.tar.gz", hash = "sha256:a4f57ab599e6a2e3016d7595cfd72eb4661a5106e787a95bcc90c7105b831efc"}, ] +[package.dependencies] +zipp = ">=3.20" + [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +check = ["pytest-checkdocs (>=2.14)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] -test = ["jaraco.test (>=5.4)", "pytest (>=6,!=8.1.*)", "zipp (>=3.17)"] -type = ["pytest-mypy"] +enabler = ["pytest-enabler (>=3.4)"] +perf = ["ipython"] +test = ["packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy (>=1.0.1) ; platform_python_implementation != \"PyPy\""] [[package]] name = "iniconfig" @@ -902,82 +1190,402 @@ files = [ ] [[package]] -name = "jinja2" -version = "3.1.6" -description = "A very fast and expressive template engine." +name = "ipython" +version = "8.39.0" +description = "IPython: Productive Interactive Computing" optional = false -python-versions = ">=3.7" -groups = ["main", "dev"] +python-versions = ">=3.10" +groups = ["docs"] +markers = "python_version == \"3.10\"" files = [ - {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, - {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, + {file = "ipython-8.39.0-py3-none-any.whl", hash = "sha256:bb3c51c4fa8148ab1dea07a79584d1c854e234ea44aa1283bcb37bc75054651f"}, + {file = "ipython-8.39.0.tar.gz", hash = "sha256:4110ae96012c379b8b6db898a07e186c40a2a1ef5d57a7fa83166047d9da7624"}, ] [package.dependencies] -MarkupSafe = ">=2.0" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +decorator = "*" +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +jedi = ">=0.16" +matplotlib-inline = "*" +pexpect = {version = ">4.3", markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\""} +prompt_toolkit = ">=3.0.41,<3.1.0" +pygments = ">=2.4.0" +stack_data = "*" +traitlets = ">=5.13.0" +typing_extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} [package.extras] -i18n = ["Babel (>=2.7)"] +all = ["ipython[black,doc,kernel,matplotlib,nbconvert,nbformat,notebook,parallel,qtconsole]", "ipython[test,test-extra]"] +black = ["black"] +doc = ["docrepr", "exceptiongroup", "intersphinx_registry", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "tomli ; python_version < \"3.11\"", "typing_extensions"] +kernel = ["ipykernel"] +matplotlib = ["matplotlib"] +nbconvert = ["nbconvert"] +nbformat = ["nbformat"] +notebook = ["ipywidgets", "notebook"] +parallel = ["ipyparallel"] +qtconsole = ["qtconsole"] +test = ["packaging", "pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"] +test-extra = ["curio", "ipython[test]", "jupyter_ai", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.23)", "pandas", "trio"] [[package]] -name = "joblib" -version = "1.4.2" -description = "Lightweight pipelining with Python functions" +name = "ipython" +version = "9.13.0" +description = "IPython: Productive Interactive Computing" +optional = false +python-versions = ">=3.11" +groups = ["docs"] +markers = "python_version >= \"3.11\"" +files = [ + {file = "ipython-9.13.0-py3-none-any.whl", hash = "sha256:57f9d4639e20818d328d287c7b549af3d05f12486ea8f2e7f73e52a36ec4d201"}, + {file = "ipython-9.13.0.tar.gz", hash = "sha256:7e834b6afc99f020e3f05966ced34792f40267d64cb1ea9043886dab0dde5967"}, +] + +[package.dependencies] +colorama = {version = ">=0.4.4", markers = "sys_platform == \"win32\""} +decorator = ">=5.1.0" +ipython-pygments-lexers = ">=1.0.0" +jedi = ">=0.18.2" +matplotlib-inline = ">=0.1.6" +pexpect = {version = ">4.6", markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\""} +prompt_toolkit = ">=3.0.41,<3.1.0" +psutil = ">=7" +pygments = ">=2.14.0" +stack_data = ">=0.6.0" +traitlets = ">=5.13.0" +typing_extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} + +[package.extras] +all = ["argcomplete (>=3.0)", "ipython[doc,matplotlib,terminal,test,test-extra]", "types-decorator"] +black = ["black"] +doc = ["docrepr", "exceptiongroup", "intersphinx_registry", "ipykernel", "ipython[matplotlib,test]", "setuptools (>=80.0)", "sphinx (>=8.0)", "sphinx-rtd-theme (>=0.1.8)", "sphinx_toml (==0.0.4)", "typing_extensions"] +matplotlib = ["matplotlib (>3.9)"] +test = ["packaging (>=23.0.0)", "pytest (>=7.0.0)", "pytest-asyncio (>=1.0.0)", "setuptools (>=80.0)", "testpath (>=0.2)"] +test-extra = ["curio", "ipykernel (>6.30)", "ipython[matplotlib]", "ipython[test]", "jupyter_ai", "nbclient", "nbformat", "numpy (>=2.0)", "pandas (>2.1)", "trio (>=0.22.0)"] + +[[package]] +name = "ipython-pygments-lexers" +version = "1.1.1" +description = "Defines a variety of Pygments lexers for highlighting IPython code." optional = false python-versions = ">=3.8" -groups = ["main"] +groups = ["docs"] +markers = "python_version >= \"3.11\"" files = [ - {file = "joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6"}, - {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, + {file = "ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c"}, + {file = "ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81"}, ] +[package.dependencies] +pygments = "*" + [[package]] -name = "lightning" -version = "2.5.0.post0" -description = "The Deep Learning framework to train, deploy, and ship AI products Lightning fast." +name = "jaraco-classes" +version = "3.4.0" +description = "Utility functions for Python class constructs" optional = false -python-versions = ">=3.9" -groups = ["main"] +python-versions = ">=3.8" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" files = [ - {file = "lightning-2.5.0.post0-py3-none-any.whl", hash = "sha256:b08463326e6fb39cb3e4db8ff2660a80ce3372a0688c80e3370c091346ea220c"}, - {file = "lightning-2.5.0.post0.tar.gz", hash = "sha256:f720fe4f6d03a7f15f9aef3112c5a0d1eafd8d27b903f4a1354b609685b2ec70"}, + {file = "jaraco.classes-3.4.0-py3-none-any.whl", hash = "sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790"}, + {file = "jaraco.classes-3.4.0.tar.gz", hash = "sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd"}, ] [package.dependencies] -fsspec = {version = ">=2022.5.0,<2026.0", extras = ["http"]} -lightning-utilities = ">=0.10.0,<2.0" -packaging = ">=20.0,<25.0" -pytorch-lightning = "*" -PyYAML = ">=5.4,<8.0" -torch = ">=2.1.0,<4.0" -torchmetrics = ">=0.7.0,<3.0" -tqdm = ">=4.57.0,<6.0" -typing-extensions = ">=4.4.0,<6.0" +more-itertools = "*" [package.extras] -all = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\"", "hydra-core (>=1.2.0,<2.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.27.7,<5.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.2.3,<3.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] -data = ["litdata (>=0.2.0rc,<1.0)"] -dev = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "click (==8.1.7)", "cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\"", "fastapi", "hydra-core (>=1.2.0,<2.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.27.7,<5.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "numpy (>=1.17.2,<2.0)", "omegaconf (>=2.2.3,<3.0)", "onnx (>=1.12.0,<2.0)", "onnxruntime (>=1.12.0,<2.0)", "pandas (>1.0,<3.0)", "psutil (<6.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "scikit-learn (>0.22.1,<2.0)", "tensorboard (>=2.9.1,<3.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchmetrics (>=0.7.0,<2.0)", "torchvision (>=0.16.0,<1.0)", "uvicorn"] -examples = ["ipython[all] (<9.0)", "lightning-utilities (>=0.8.0,<1.0)", "requests (<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] -extra = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "hydra-core (>=1.2.0,<2.0)", "jsonargparse[signatures] (>=4.27.7,<5.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.2.3,<3.0)", "rich (>=12.3.0,<14.0)", "tensorboardX (>=2.2,<3.0)"] -fabric-all = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\"", "lightning-utilities (>=0.8.0,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] -fabric-dev = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "click (==8.1.7)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\"", "lightning-utilities (>=0.8.0,<1.0)", "numpy (>=1.17.2,<2.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchmetrics (>=0.7.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] -fabric-examples = ["lightning-utilities (>=0.8.0,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] -fabric-strategies = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\""] -fabric-test = ["click (==8.1.7)", "coverage (==7.3.1)", "numpy (>=1.17.2,<2.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.7.0,<2.0)"] -pytorch-all = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\"", "hydra-core (>=1.2.0,<2.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.27.7,<5.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.2.3,<3.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] -pytorch-dev = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\"", "fastapi", "hydra-core (>=1.2.0,<2.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.27.7,<5.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "numpy (>=1.17.2,<2.0)", "omegaconf (>=2.2.3,<3.0)", "onnx (>=1.12.0,<2.0)", "onnxruntime (>=1.12.0,<2.0)", "pandas (>1.0,<3.0)", "psutil (<6.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "scikit-learn (>0.22.1,<2.0)", "tensorboard (>=2.9.1,<3.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)", "uvicorn"] -pytorch-examples = ["ipython[all] (<9.0)", "lightning-utilities (>=0.8.0,<1.0)", "requests (<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] -pytorch-extra = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "hydra-core (>=1.2.0,<2.0)", "jsonargparse[signatures] (>=4.27.7,<5.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.2.3,<3.0)", "rich (>=12.3.0,<14.0)", "tensorboardX (>=2.2,<3.0)"] -pytorch-strategies = ["deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\""] -pytorch-test = ["cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "fastapi", "numpy (>=1.17.2,<2.0)", "onnx (>=1.12.0,<2.0)", "onnxruntime (>=1.12.0,<2.0)", "pandas (>1.0,<3.0)", "psutil (<6.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "scikit-learn (>0.22.1,<2.0)", "tensorboard (>=2.9.1,<3.0)", "uvicorn"] -strategies = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\""] -test = ["click (==8.1.7)", "cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "fastapi", "numpy (>=1.17.2,<2.0)", "onnx (>=1.12.0,<2.0)", "onnxruntime (>=1.12.0,<2.0)", "pandas (>1.0,<3.0)", "psutil (<6.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "scikit-learn (>0.22.1,<2.0)", "tensorboard (>=2.9.1,<3.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.7.0,<2.0)", "uvicorn"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [[package]] -name = "lightning-utilities" -version = "0.12.0" -description = "Lightning toolbox for across the our ecosystem." +name = "jaraco-context" +version = "6.1.2" +description = "Useful decorators and context managers" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" +files = [ + {file = "jaraco_context-6.1.2-py3-none-any.whl", hash = "sha256:bf8150b79a2d5d91ae48629d8b427a8f7ba0e1097dd6202a9059f29a36379535"}, + {file = "jaraco_context-6.1.2.tar.gz", hash = "sha256:f1a6c9d391e661cc5b8d39861ff077a7dc24dc23833ccee564b234b81c82dfe3"}, +] + +[package.dependencies] +"backports.tarfile" = {version = "*", markers = "python_version < \"3.12\""} + +[package.extras] +check = ["pytest-checkdocs (>=2.14)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=3.4)"] +test = ["jaraco.test (>=5.6.0)", "portend", "pytest (>=6,!=8.1.*)"] +type = ["pytest-mypy (>=1.0.1) ; platform_python_implementation != \"PyPy\""] + +[[package]] +name = "jaraco-functools" +version = "4.4.0" +description = "Functools like those found in stdlib" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" +files = [ + {file = "jaraco_functools-4.4.0-py3-none-any.whl", hash = "sha256:9eec1e36f45c818d9bf307c8948eb03b2b56cd44087b3cdc989abca1f20b9176"}, + {file = "jaraco_functools-4.4.0.tar.gz", hash = "sha256:da21933b0417b89515562656547a77b4931f98176eb173644c0d35032a33d6bb"}, +] + +[package.dependencies] +more_itertools = "*" + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=3.4)"] +test = ["jaraco.classes", "pytest (>=6,!=8.1.*)"] +type = ["mypy (<1.19) ; platform_python_implementation == \"PyPy\"", "pytest-mypy (>=1.0.1)"] + +[[package]] +name = "jedi" +version = "0.19.2" +description = "An autocompletion tool for Python that can be used for text editors." +optional = false +python-versions = ">=3.6" +groups = ["docs"] +files = [ + {file = "jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9"}, + {file = "jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0"}, +] + +[package.dependencies] +parso = ">=0.8.4,<0.9.0" + +[package.extras] +docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] + +[[package]] +name = "jeepney" +version = "0.9.0" +description = "Low-level, pure Python DBus protocol wrapper." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and sys_platform == \"linux\"" +files = [ + {file = "jeepney-0.9.0-py3-none-any.whl", hash = "sha256:97e5714520c16fc0a45695e5365a2e11b81ea79bba796e26f9f1d178cb182683"}, + {file = "jeepney-0.9.0.tar.gz", hash = "sha256:cf0e9e845622b81e4a28df94c40345400256ec608d0e55bb8a3feaa9163f5732"}, +] + +[package.extras] +test = ["async-timeout ; python_version < \"3.11\"", "pytest", "pytest-asyncio (>=0.17)", "pytest-trio", "testpath", "trio"] +trio = ["trio"] + +[[package]] +name = "jinja2" +version = "3.1.6" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["main", "dev", "docs"] +files = [ + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "joblib" +version = "1.4.2" +description = "Lightweight pipelining with Python functions" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6"}, + {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, +] + +[[package]] +name = "jsonschema" +version = "4.26.0" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.10" +groups = ["docs"] +files = [ + {file = "jsonschema-4.26.0-py3-none-any.whl", hash = "sha256:d489f15263b8d200f8387e64b4c3a75f06629559fb73deb8fdfb525f2dab50ce"}, + {file = "jsonschema-4.26.0.tar.gz", hash = "sha256:0c26707e2efad8aa1bfc5b7ce170f3fccc2e4918ff85989ba9ffa9facb2be326"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.25.0" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "rfc3987-syntax (>=1.1.0)", "uri-template", "webcolors (>=24.6.0)"] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe"}, + {file = "jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "jupyter-client" +version = "8.8.0" +description = "Jupyter protocol implementation and client libraries" +optional = false +python-versions = ">=3.10" +groups = ["docs"] +files = [ + {file = "jupyter_client-8.8.0-py3-none-any.whl", hash = "sha256:f93a5b99c5e23a507b773d3a1136bd6e16c67883ccdbd9a829b0bbdb98cd7d7a"}, + {file = "jupyter_client-8.8.0.tar.gz", hash = "sha256:d556811419a4f2d96c869af34e854e3f059b7cc2d6d01a9cd9c85c267691be3e"}, +] + +[package.dependencies] +jupyter-core = ">=5.1" +python-dateutil = ">=2.8.2" +pyzmq = ">=25.0" +tornado = ">=6.4.1" +traitlets = ">=5.3" + +[package.extras] +docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +orjson = ["orjson"] +test = ["anyio", "coverage", "ipykernel (>=6.14)", "msgpack", "mypy ; platform_python_implementation != \"PyPy\"", "paramiko ; sys_platform == \"win32\"", "pre-commit", "pytest", "pytest-cov", "pytest-jupyter[client] (>=0.6.2)", "pytest-timeout"] + +[[package]] +name = "jupyter-core" +version = "5.9.1" +description = "Jupyter core package. A base package on which Jupyter projects rely." +optional = false +python-versions = ">=3.10" +groups = ["docs"] +files = [ + {file = "jupyter_core-5.9.1-py3-none-any.whl", hash = "sha256:ebf87fdc6073d142e114c72c9e29a9d7ca03fad818c5d300ce2adc1fb0743407"}, + {file = "jupyter_core-5.9.1.tar.gz", hash = "sha256:4d09aaff303b9566c3ce657f580bd089ff5c91f5f89cf7d8846c3cdf465b5508"}, +] + +[package.dependencies] +platformdirs = ">=2.5" +traitlets = ">=5.3" + +[package.extras] +docs = ["intersphinx-registry", "myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-spelling", "traitlets"] +test = ["ipykernel", "pre-commit", "pytest (<9)", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "jupyterlab-pygments" +version = "0.3.0" +description = "Pygments theme using JupyterLab CSS variables" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780"}, + {file = "jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d"}, +] + +[[package]] +name = "keyring" +version = "25.7.0" +description = "Store and access your passwords safely." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" +files = [ + {file = "keyring-25.7.0-py3-none-any.whl", hash = "sha256:be4a0b195f149690c166e850609a477c532ddbfbaed96a404d4e43f8d5e2689f"}, + {file = "keyring-25.7.0.tar.gz", hash = "sha256:fe01bd85eb3f8fb3dd0405defdeac9a5b4f6f0439edbb3149577f244a2e8245b"}, +] + +[package.dependencies] +importlib_metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""} +"jaraco.classes" = "*" +"jaraco.context" = "*" +"jaraco.functools" = "*" +jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""} +pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""} +SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""} + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +completion = ["shtab (>=1.1.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=3.4)"] +test = ["pyfakefs", "pytest (>=6,!=8.1.*)"] +type = ["pygobject-stubs", "pytest-mypy (>=1.0.1)", "shtab", "types-pywin32"] + +[[package]] +name = "lightning" +version = "2.5.0.post0" +description = "The Deep Learning framework to train, deploy, and ship AI products Lightning fast." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "lightning-2.5.0.post0-py3-none-any.whl", hash = "sha256:b08463326e6fb39cb3e4db8ff2660a80ce3372a0688c80e3370c091346ea220c"}, + {file = "lightning-2.5.0.post0.tar.gz", hash = "sha256:f720fe4f6d03a7f15f9aef3112c5a0d1eafd8d27b903f4a1354b609685b2ec70"}, +] + +[package.dependencies] +fsspec = {version = ">=2022.5.0,<2026.0", extras = ["http"]} +lightning-utilities = ">=0.10.0,<2.0" +packaging = ">=20.0,<25.0" +pytorch-lightning = "*" +PyYAML = ">=5.4,<8.0" +torch = ">=2.1.0,<4.0" +torchmetrics = ">=0.7.0,<3.0" +tqdm = ">=4.57.0,<6.0" +typing-extensions = ">=4.4.0,<6.0" + +[package.extras] +all = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\"", "hydra-core (>=1.2.0,<2.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.27.7,<5.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.2.3,<3.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] +data = ["litdata (>=0.2.0rc,<1.0)"] +dev = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "click (==8.1.7)", "cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\"", "fastapi", "hydra-core (>=1.2.0,<2.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.27.7,<5.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "numpy (>=1.17.2,<2.0)", "omegaconf (>=2.2.3,<3.0)", "onnx (>=1.12.0,<2.0)", "onnxruntime (>=1.12.0,<2.0)", "pandas (>1.0,<3.0)", "psutil (<6.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "scikit-learn (>0.22.1,<2.0)", "tensorboard (>=2.9.1,<3.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchmetrics (>=0.7.0,<2.0)", "torchvision (>=0.16.0,<1.0)", "uvicorn"] +examples = ["ipython[all] (<9.0)", "lightning-utilities (>=0.8.0,<1.0)", "requests (<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] +extra = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "hydra-core (>=1.2.0,<2.0)", "jsonargparse[signatures] (>=4.27.7,<5.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.2.3,<3.0)", "rich (>=12.3.0,<14.0)", "tensorboardX (>=2.2,<3.0)"] +fabric-all = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\"", "lightning-utilities (>=0.8.0,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] +fabric-dev = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "click (==8.1.7)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\"", "lightning-utilities (>=0.8.0,<1.0)", "numpy (>=1.17.2,<2.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchmetrics (>=0.7.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] +fabric-examples = ["lightning-utilities (>=0.8.0,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] +fabric-strategies = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\""] +fabric-test = ["click (==8.1.7)", "coverage (==7.3.1)", "numpy (>=1.17.2,<2.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.7.0,<2.0)"] +pytorch-all = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\"", "hydra-core (>=1.2.0,<2.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.27.7,<5.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.2.3,<3.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] +pytorch-dev = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\"", "fastapi", "hydra-core (>=1.2.0,<2.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.27.7,<5.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "numpy (>=1.17.2,<2.0)", "omegaconf (>=2.2.3,<3.0)", "onnx (>=1.12.0,<2.0)", "onnxruntime (>=1.12.0,<2.0)", "pandas (>1.0,<3.0)", "psutil (<6.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "scikit-learn (>0.22.1,<2.0)", "tensorboard (>=2.9.1,<3.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)", "uvicorn"] +pytorch-examples = ["ipython[all] (<9.0)", "lightning-utilities (>=0.8.0,<1.0)", "requests (<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.16.0,<1.0)"] +pytorch-extra = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "hydra-core (>=1.2.0,<2.0)", "jsonargparse[signatures] (>=4.27.7,<5.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.2.3,<3.0)", "rich (>=12.3.0,<14.0)", "tensorboardX (>=2.2,<3.0)"] +pytorch-strategies = ["deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\""] +pytorch-test = ["cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "fastapi", "numpy (>=1.17.2,<2.0)", "onnx (>=1.12.0,<2.0)", "onnxruntime (>=1.12.0,<2.0)", "pandas (>1.0,<3.0)", "psutil (<6.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "scikit-learn (>0.22.1,<2.0)", "tensorboard (>=2.9.1,<3.0)", "uvicorn"] +strategies = ["bitsandbytes (>=0.42.0,<1.0) ; sys_platform == \"darwin\"", "bitsandbytes (>=0.44.0,<1.0) ; sys_platform == \"linux\" or sys_platform == \"win32\"", "deepspeed (>=0.8.2,<=0.9.3) ; platform_system != \"Windows\" and platform_system != \"Darwin\""] +test = ["click (==8.1.7)", "cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "fastapi", "numpy (>=1.17.2,<2.0)", "onnx (>=1.12.0,<2.0)", "onnxruntime (>=1.12.0,<2.0)", "pandas (>1.0,<3.0)", "psutil (<6.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "scikit-learn (>0.22.1,<2.0)", "tensorboard (>=2.9.1,<3.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.7.0,<2.0)", "uvicorn"] + +[[package]] +name = "lightning-utilities" +version = "0.12.0" +description = "Lightning toolbox for across the our ecosystem." optional = false python-versions = ">=3.9" groups = ["main"] @@ -996,16 +1604,197 @@ cli = ["fire"] docs = ["requests (>=2.0.0)"] typing = ["fire", "mypy (>=1.0.0)", "types-setuptools"] +[[package]] +name = "lxml" +version = "6.1.0" +description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "lxml-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:41dcc4c7b10484257cbd6c37b83ddb26df2b0e5aff5ac00d095689015af868ec"}, + {file = "lxml-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a31286dbb5e74c8e9a5344465b77ab4c5bd511a253b355b5ca2fae7e579fafec"}, + {file = "lxml-6.1.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1bc4cc83fb7f66ffb16f74d6dd0162e144333fc36ebcce32246f80c8735b2551"}, + {file = "lxml-6.1.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:20cf4d0651987c906a2f5cba4e3a8d6ba4bfdf973cfe2a96c0d6053888ea2ecd"}, + {file = "lxml-6.1.0-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ffb34ea45a82dd637c2c97ae1bbb920850c1e59bcae79ce1c15af531d83e7215"}, + {file = "lxml-6.1.0-cp310-cp310-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a1d9b99e5b2597e4f5aed2484fef835256fa1b68a19e4265c97628ef4bf8bcf4"}, + {file = "lxml-6.1.0-cp310-cp310-manylinux_2_28_i686.whl", hash = "sha256:d43aa26dcda363f21e79afa0668f5029ed7394b3bb8c92a6927a3d34e8b610ea"}, + {file = "lxml-6.1.0-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:6262b87f9e5c1e5fe501d6c153247289af42eb44ad7660b9b3de17baaf92d6f6"}, + {file = "lxml-6.1.0-cp310-cp310-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d1392c569c032f78a11a25d1de1c43fff13294c793b39e19d84fade3045cbbc3"}, + {file = "lxml-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:045e387d1f4f42a418380930fa3f45c73c9b392faf67e495e58902e68e8f44a7"}, + {file = "lxml-6.1.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:9f93d5b8b07f73e8c77e3c6556a3db269918390c804b5e5fcdd4858232cc8f16"}, + {file = "lxml-6.1.0-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:de550d129f18d8ab819651ffe4f38b1b713c7e116707de3c0c6400d0ef34fbc1"}, + {file = "lxml-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c08da09dc003c9e8c70e06b53a11db6fb3b250c21c4236b03c7d7b443c318e7a"}, + {file = "lxml-6.1.0-cp310-cp310-win32.whl", hash = "sha256:37448bf9c7d7adfc5254763901e2bbd6bb876228dfc1fc7f66e58c06368a7544"}, + {file = "lxml-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:2593a0a6621545b9095b71ad74ed4226eba438a7d9fc3712a99bdb15508cf93a"}, + {file = "lxml-6.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:e80807d72f96b96ad5588cb85c75616e4f2795a7737d4630784c51497beb7776"}, + {file = "lxml-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cec05be8c876f92a5aa07b01d60bbb4d11cfbdd654cad0561c0d7b5c043a61b9"}, + {file = "lxml-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9c03e048b6ce8e77b09c734e931584894ecd58d08296804ca2d0b184c933ce50"}, + {file = "lxml-6.1.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:942454ff253da14218f972b23dc72fa4edf6c943f37edd19cd697618b626fac5"}, + {file = "lxml-6.1.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d036ee7b99d5148072ac7c9b847193decdfeac633db350363f7bce4fff108f0e"}, + {file = "lxml-6.1.0-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3ae5d8d5427f3cc317e7950f2da7ad276df0cfa37b8de2f5658959e618ea8512"}, + {file = "lxml-6.1.0-cp311-cp311-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:363e47283bde87051b821826e71dde47f107e08614e1aa312ba0c5711e77738c"}, + {file = "lxml-6.1.0-cp311-cp311-manylinux_2_28_i686.whl", hash = "sha256:f504d861d9f2a8f94020130adac88d66de93841707a23a86244263d1e54682f5"}, + {file = "lxml-6.1.0-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:23a5dc68e08ed13331d61815c08f260f46b4a60fdd1640bbeb82cf89a9d90289"}, + {file = "lxml-6.1.0-cp311-cp311-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f15401d8d3dbf239e23c818afc10c7207f7b95f9a307e092122b6f86dd43209a"}, + {file = "lxml-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fcf3da95e93349e0647d48d4b36a12783105bcc74cb0c416952f9988410846a3"}, + {file = "lxml-6.1.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:0d082495c5fcf426e425a6e28daaba1fcb6d8f854a4ff01effb1f1f381203eb9"}, + {file = "lxml-6.1.0-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:e3c4f84b24a1fcba435157d111c4b755099c6ff00a3daee1ad281817de75ed11"}, + {file = "lxml-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:976a6b39b1b13e8c354ad8d3f261f3a4ac6609518af91bdb5094760a08f132c4"}, + {file = "lxml-6.1.0-cp311-cp311-win32.whl", hash = "sha256:857efde87d365706590847b916baff69c0bc9252dc5af030e378c9800c0b10e3"}, + {file = "lxml-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:183bfb45a493081943be7ea2b5adfc2b611e1cf377cefa8b8a8be404f45ef9a7"}, + {file = "lxml-6.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:19f4164243fc206d12ed3d866e80e74f5bc3627966520da1a5f97e42c32a3f39"}, + {file = "lxml-6.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d2f17a16cd8751e8eb233a7e41aecdf8e511712e00088bf9be455f604cd0d28d"}, + {file = "lxml-6.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f0cea5b1d3e6e77d71bd2b9972eb2446221a69dc52bb0b9c3c6f6e5700592d93"}, + {file = "lxml-6.1.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc46da94826188ed45cb53bd8e3fc076ae22675aea2087843d4735627f867c6d"}, + {file = "lxml-6.1.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9147d8e386ec3b82c3b15d88927f734f565b0aaadef7def562b853adca45784a"}, + {file = "lxml-6.1.0-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5715e0e28736a070f3f34a7ccc09e2fdcba0e3060abbcf61a1a5718ff6d6b105"}, + {file = "lxml-6.1.0-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4937460dc5df0cdd2f06a86c285c28afda06aefa3af949f9477d3e8df430c485"}, + {file = "lxml-6.1.0-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bc783ee3147e60a25aa0445ea82b3e8aabb83b240f2b95d32cb75587ff781814"}, + {file = "lxml-6.1.0-cp312-cp312-manylinux_2_28_i686.whl", hash = "sha256:40d9189f80075f2e1f88db21ef815a2b17b28adf8e50aaf5c789bfe737027f32"}, + {file = "lxml-6.1.0-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:05b9b8787e35bec69e68daf4952b2e6dfcfb0db7ecf1a06f8cdfbbac4eb71aad"}, + {file = "lxml-6.1.0-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0f0f08beb0182e3e9a86fae124b3c47a7b41b7b69b225e1377db983802404e54"}, + {file = "lxml-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73becf6d8c81d4c76b1014dbd3584cb26d904492dcf73ca85dc8bff08dcd6d2d"}, + {file = "lxml-6.1.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1ae225f66e5938f4fa29d37e009a3bb3b13032ac57eb4eb42afa44f6e4054e69"}, + {file = "lxml-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:690022c7fae793b0489aa68a658822cea83e0d5933781811cabbf5ea3bcfe73d"}, + {file = "lxml-6.1.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:63aeafc26aac0be8aff14af7871249e87ea1319be92090bfd632ec68e03b16a5"}, + {file = "lxml-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:264c605ab9c0e4aa1a679636f4582c4d3313700009fac3ec9c3412ed0d8f3e1d"}, + {file = "lxml-6.1.0-cp312-cp312-win32.whl", hash = "sha256:56971379bc5ee8037c5a0f09fa88f66cdb7d37c3e38af3e45cf539f41131ac1f"}, + {file = "lxml-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:bba078de0031c219e5dd06cf3e6bf8fb8e6e64a77819b358f53bb132e3e03366"}, + {file = "lxml-6.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:c3592631e652afa34999a088f98ba7dfc7d6aff0d535c410bea77a71743f3819"}, + {file = "lxml-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a0092f2b107b69601adf562a57c956fbb596e05e3e6651cabd3054113b007e45"}, + {file = "lxml-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fc7140d7a7386e6b545d41b7358f4d02b656d4053f5fa6859f92f4b9c2572c4d"}, + {file = "lxml-6.1.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:419c58fc92cc3a2c3fa5f78c63dbf5da70c1fa9c1b25f25727ecee89a96c7de2"}, + {file = "lxml-6.1.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:37fabd1452852636cf38ecdcc9dd5ca4bba7a35d6c53fa09725deeb894a87491"}, + {file = "lxml-6.1.0-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a2853c8b2170cc6cd54a6b4d50d2c1a8a7aeca201f23804b4898525c7a152cfc"}, + {file = "lxml-6.1.0-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8e369cbd690e788c8d15e56222d91a09c6a417f49cbc543040cba0fe2e25a79e"}, + {file = "lxml-6.1.0-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e69aa6805905807186eb00e66c6d97a935c928275182eb02ee40ba00da9623b2"}, + {file = "lxml-6.1.0-cp313-cp313-manylinux_2_28_i686.whl", hash = "sha256:4bd1bdb8a9e0e2dd229de19b5f8aebac80e916921b4b2c6ef8a52bc131d0c1f9"}, + {file = "lxml-6.1.0-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:cbd7b79cdcb4986ad78a2662625882747f09db5e4cd7b2ae178a88c9c51b3dfe"}, + {file = "lxml-6.1.0-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:43e4d297f11080ec9d64a4b1ad7ac02b4484c9f0e2179d9c4ef78e886e747b88"}, + {file = "lxml-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cc16682cc987a3da00aa56a3aa3075b08edb10d9b1e476938cfdbee8f3b67181"}, + {file = "lxml-6.1.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:d6d8efe71429635f0559579092bb5e60560d7b9115ee38c4adbea35632e7fa24"}, + {file = "lxml-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e39ab3a28af7784e206d8606ec0e4bcad0190f63a492bca95e94e5a4aef7f6e"}, + {file = "lxml-6.1.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:9eb667bf50856c4a58145f8ca2d5e5be160191e79eb9e30855a476191b3c3495"}, + {file = "lxml-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7f4a77d6f7edf9230cee3e1f7f6764722a41604ee5681844f18db9a81ea0ec33"}, + {file = "lxml-6.1.0-cp313-cp313-win32.whl", hash = "sha256:28902146ffbe5222df411c5d19e5352490122e14447e98cd118907ee3fd6ee62"}, + {file = "lxml-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:4a1503c56e4e2b38dc76f2f2da7bae69670c0f1933e27cfa34b2fa5876410b16"}, + {file = "lxml-6.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:e0af85773850417d994d019741239b901b22c6680206f46a34766926e466141d"}, + {file = "lxml-6.1.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:ab863fd37458fed6456525f297d21239d987800c46e67da5ef04fc6b3dd93ac8"}, + {file = "lxml-6.1.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:6fd8b1df8254ff4fd93fd31da1fc15770bde23ac045be9bb1f87425702f61cc9"}, + {file = "lxml-6.1.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:47024feaae386a92a146af0d2aeed65229bf6fff738e6a11dda6b0015fb8fd03"}, + {file = "lxml-6.1.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3f00972f84450204cd5d93a5395965e348956aaceaadec693a22ec743f8ae3eb"}, + {file = "lxml-6.1.0-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97faa0860e13b05b15a51fb4986421ef7a30f0b3334061c416e0981e9450ca4c"}, + {file = "lxml-6.1.0-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:972a6451204798675407beaad97b868d0c733d9a74dafefc63120b81b8c2de28"}, + {file = "lxml-6.1.0-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fe022f20bc4569ec66b63b3fb275a3d628d9d32da6326b2982584104db6d3086"}, + {file = "lxml-6.1.0-cp314-cp314-manylinux_2_28_i686.whl", hash = "sha256:75c4c7c619a744f972f4451bf5adf6d0fb00992a1ffc9fd78e13b0bc817cc99f"}, + {file = "lxml-6.1.0-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:3648f20d25102a22b6061c688beb3a805099ea4beb0a01ce62975d926944d292"}, + {file = "lxml-6.1.0-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:77b9f99b17cbf14026d1e618035077060fc7195dd940d025149f3e2e830fbfcb"}, + {file = "lxml-6.1.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:32662519149fd7a9db354175aa5e417d83485a8039b8aaa62f873ceee7ea4cad"}, + {file = "lxml-6.1.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:73d658216fc173cf2c939e90e07b941c5e12736b0bf6a99e7af95459cfe8eabb"}, + {file = "lxml-6.1.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ac4db068889f8772a4a698c5980ec302771bb545e10c4b095d4c8be26749616f"}, + {file = "lxml-6.1.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:45e9dfbd1b661eb64ba0d4dbe762bd210c42d86dd1e5bd2bdf89d634231beb43"}, + {file = "lxml-6.1.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:89e8d73d09ac696a5ba42ec69787913d53284f12092f651506779314f10ba585"}, + {file = "lxml-6.1.0-cp314-cp314-win32.whl", hash = "sha256:ebe33f4ec1b2de38ceb225a1749a2965855bffeef435ba93cd2d5d540783bf2f"}, + {file = "lxml-6.1.0-cp314-cp314-win_amd64.whl", hash = "sha256:398443df51c538bd578529aa7e5f7afc6c292644174b47961f3bf87fe5741120"}, + {file = "lxml-6.1.0-cp314-cp314-win_arm64.whl", hash = "sha256:8c8984e1d8c4b3949e419158fda14d921ff703a9ed8a47236c6eb7a2b6cb4946"}, + {file = "lxml-6.1.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:1081dd10bc6fa437db2500e13993abf7cc30716d0a2f40e65abb935f02ec559c"}, + {file = "lxml-6.1.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:dabecc48db5f42ba348d1f5d5afdc54c6c4cc758e676926c7cd327045749517d"}, + {file = "lxml-6.1.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e3dd5fe19c9e0ac818a9c7f132a5e43c1339ec1cbbfecb1a938bd3a47875b7c9"}, + {file = "lxml-6.1.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9e7b0a4ca6dcc007a4cef00a761bba2dea959de4bd2df98f926b33c92ca5dfb9"}, + {file = "lxml-6.1.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d27bbe326c6b539c64b42638b18bc6003a8d88f76213a97ac9ed4f885efeab7"}, + {file = "lxml-6.1.0-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4e425db0c5445ef0ad56b0eec54f89b88b2d884656e536a90b2f52aecb4ca86"}, + {file = "lxml-6.1.0-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4b89b098105b8599dc57adac95d1813409ac476d3c948a498775d3d0c6124bfb"}, + {file = "lxml-6.1.0-cp314-cp314t-manylinux_2_28_i686.whl", hash = "sha256:c4a699432846df86cc3de502ee85f445ebad748a1c6021d445f3e514d2cd4b1c"}, + {file = "lxml-6.1.0-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:30e7b2ed63b6c8e97cca8af048589a788ab5c9c905f36d9cf1c2bb549f450d2f"}, + {file = "lxml-6.1.0-cp314-cp314t-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:022981127642fe19866d2907d76241bb07ed21749601f727d5d5dd1ce5d1b773"}, + {file = "lxml-6.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:23cad0cc86046d4222f7f418910e46b89971c5a45d3c8abfad0f64b7b05e4a9b"}, + {file = "lxml-6.1.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:21c3302068f50d1e8728c67c87ba92aa87043abee517aa2576cca1855326b405"}, + {file = "lxml-6.1.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:be10838781cb3be19251e276910cd508fe127e27c3242e50521521a0f3781690"}, + {file = "lxml-6.1.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:2173a7bffe97667bbf0767f8a99e587740a8c56fdf3befac4b09cb29a80276fd"}, + {file = "lxml-6.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c6854e9cf99c84beb004eecd7d3a3868ef1109bf2b1df92d7bc11e96a36c2180"}, + {file = "lxml-6.1.0-cp314-cp314t-win32.whl", hash = "sha256:00750d63ef0031a05331b9223463b1c7c02b9004cef2346a5b2877f0f9494dd2"}, + {file = "lxml-6.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:80410c3a7e3c617af04de17caa9f9f20adaa817093293d69eae7d7d0522836f5"}, + {file = "lxml-6.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:26dd9f57ee3bd41e7d35b4c98a2ffd89ed11591649f421f0ec19f67d50ec67ac"}, + {file = "lxml-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b6c2f225662bc5ad416bdd06f72ca301b31b39ce4261f0e0097017fc2891b940"}, + {file = "lxml-6.1.0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a86f06f059e22a0d574990ee2df24ede03f7f3c68c1336293eee9536c4c776cd"}, + {file = "lxml-6.1.0-cp38-cp38-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:468479e52ecf3ec23799c863336d02c05fc2f7ffd1a1424eeeb9a28d4eb69d13"}, + {file = "lxml-6.1.0-cp38-cp38-manylinux_2_28_i686.whl", hash = "sha256:a02ca8fe48815bddcfca3248efe54451abb9dbf2f7d1c5744c8aa4142d476919"}, + {file = "lxml-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bb40648d96157f9081886defe13eac99253e663be969ff938a9289eff6e47b72"}, + {file = "lxml-6.1.0-cp38-cp38-win32.whl", hash = "sha256:1dd6a1c3ad4cb674f44525d9957f3e9c209bb6dd9213245195167a281fcc2bdc"}, + {file = "lxml-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:4e2c54d6b47361d0f1d3bc8d4e082ad87201e56ccdcca4d3b9ee3644ff595ec8"}, + {file = "lxml-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:920354904d1cb86577d4b3cfe2830c2dbe81d6f4449e57ada428f1609b5985f7"}, + {file = "lxml-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c871299c595ee004d186f61840f0bfc4941aa3f17c8ba4a565ead7e4f4f820ee"}, + {file = "lxml-6.1.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d0d799ff958655781296ec870d5e2448e75150da2b3d07f13ff5b0c2c35beefd"}, + {file = "lxml-6.1.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7ba11752e346bd804ea312ec2eea2532dfa8b8d3261d81a32ef9e6ab16256280"}, + {file = "lxml-6.1.0-cp39-cp39-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:26c5272c6a4bf4cf32d3f5a7890c942b0e04438691157d341616d02cca74d4bd"}, + {file = "lxml-6.1.0-cp39-cp39-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c53fa3a5a52122d590e847a57ccf955557b9634a7f99ff5a35131321b0a85317"}, + {file = "lxml-6.1.0-cp39-cp39-manylinux_2_28_i686.whl", hash = "sha256:76b958b4ea3104483c20f74866d55aa056546e15ebe83dd7aecd63698f43b755"}, + {file = "lxml-6.1.0-cp39-cp39-manylinux_2_31_armv7l.whl", hash = "sha256:8c11b984b5ce6add4dccc7144c7be5d364d298f15b0c6a57da1991baedc750ce"}, + {file = "lxml-6.1.0-cp39-cp39-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d3829a6e6fd550a219564912d4002c537f65da4c6ae4e093cc34462f4fa027ad"}, + {file = "lxml-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:52b0ac6903cf74ebf997eb8c682d2fbac7d1ab7e4c552413eec55868a9b73f39"}, + {file = "lxml-6.1.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:29f5c00cb7d752bce2c70ebd2d31b0a42f9499ffdd3ecb2f31a5b73ee43031ad"}, + {file = "lxml-6.1.0-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:c748ebcb6877de89f48ab90ca96642ac458fff5dec291a2b9337cd4d0934e383"}, + {file = "lxml-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:08950a23f296b3f83521577274e3d3b0f3d739bf2e68d01a752e4288bc50d286"}, + {file = "lxml-6.1.0-cp39-cp39-win32.whl", hash = "sha256:11a873c77a181b4fef9c2e357d08ed399542c2af1390101da66720a19c7c9618"}, + {file = "lxml-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:81ff55c70b67d19d52b6fd118a114c0a4c97d799cd3089ff9bd9e2ff4b414ee2"}, + {file = "lxml-6.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:481d6e2104285d9add34f41b42b247b76b61c5b5c26c303c2e9707bbf8bd9a64"}, + {file = "lxml-6.1.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:546b66c0dd1bb8d9fa89d7123e5fa19a8aff3a1f2141eb22df96112afb17b842"}, + {file = "lxml-6.1.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5cfa1a34df366d9dc0d5eaf420f4cf2bb1e1bebe1066d1c2fc28c179f8a4004c"}, + {file = "lxml-6.1.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:db88156fcf544cdbf0d95588051515cfdfd4c876fc66444eb98bceb5d6db76de"}, + {file = "lxml-6.1.0-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:07f98f5496f96bf724b1e3c933c107f0cbf2745db18c03d2e13a291c3afd2635"}, + {file = "lxml-6.1.0-pp311-pypy311_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4642e04449a1e164b5ff71ffd901ddb772dfabf5c9adf1b7be5dffe1212bc037"}, + {file = "lxml-6.1.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:7da13bb6fbadfafb474e0226a30570a3445cfd47c86296f2446dafbd77079ace"}, + {file = "lxml-6.1.0.tar.gz", hash = "sha256:bfd57d8008c4965709a919c3e9a98f76c2c7cb319086b3d26858250620023b13"}, +] + +[package.extras] +cssselect = ["cssselect (>=0.7)"] +html-clean = ["lxml_html_clean"] +html5 = ["html5lib"] +htmlsoup = ["BeautifulSoup4"] + +[[package]] +name = "lxml-html-clean" +version = "0.4.0" +description = "HTML cleaner from lxml project" +optional = false +python-versions = "*" +groups = ["docs"] +files = [ + {file = "lxml_html_clean-0.4.0-py3-none-any.whl", hash = "sha256:3b5aedb6c2b4b684c0fbc8d4f1b901aae0a92c1ce525de84e71cc6dd1d9d4e3d"}, + {file = "lxml_html_clean-0.4.0.tar.gz", hash = "sha256:a8b517d3f46c19e9303eafb2a1b4b422fe724ad42ae53793637a8e5cc36ffbc1"}, +] + +[package.dependencies] +lxml = "*" + +[[package]] +name = "m2r2" +version = "0.3.3.post2" +description = "Markdown and reStructuredText in a single file." +optional = false +python-versions = ">=3.7" +groups = ["docs"] +files = [ + {file = "m2r2-0.3.3.post2-py3-none-any.whl", hash = "sha256:86157721eb6eabcd54d4eea7195890cc58fa6188b8d0abea633383cfbb5e11e3"}, + {file = "m2r2-0.3.3.post2.tar.gz", hash = "sha256:e62bcb0e74b3ce19cda0737a0556b04cf4a43b785072fcef474558f2c1482ca8"}, +] + +[package.dependencies] +docutils = ">=0.19" +mistune = "0.8.4" + [[package]] name = "markdown-it-py" -version = "4.0.0" +version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false -python-versions = ">=3.10" -groups = ["dev"] +python-versions = ">=3.8" +groups = ["dev", "docs"] files = [ - {file = "markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147"}, - {file = "markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3"}, + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, ] [package.dependencies] @@ -1013,12 +1802,13 @@ mdurl = ">=0.1,<1.0" [package.extras] benchmarking = ["psutil", "pytest", "pytest-benchmark"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "markdown-it-pyrs", "mistletoe (>=1.0,<2.0)", "mistune (>=3.0,<4.0)", "panflute (>=2.3,<3.0)"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins (>=0.5.0)"] +plugins = ["mdit-py-plugins"] profiling = ["gprof2dot"] -rtd = ["ipykernel", "jupyter_sphinx", "mdit-py-plugins (>=0.5.0)", "myst-parser", "pyyaml", "sphinx", "sphinx-book-theme (>=1.0,<2.0)", "sphinx-copybutton", "sphinx-design"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions", "requests"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "markupsafe" @@ -1026,7 +1816,7 @@ version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["main", "dev", "docs"] files = [ {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, @@ -1091,18 +1881,81 @@ files = [ {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, ] +[[package]] +name = "matplotlib-inline" +version = "0.2.1" +description = "Inline Matplotlib backend for Jupyter" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "matplotlib_inline-0.2.1-py3-none-any.whl", hash = "sha256:d56ce5156ba6085e00a9d54fead6ed29a9c47e215cd1bba2e976ef39f5710a76"}, + {file = "matplotlib_inline-0.2.1.tar.gz", hash = "sha256:e1ee949c340d771fc39e241ea75683deb94762c8fa5f2927ec57c83c4dffa9fe"}, +] + +[package.dependencies] +traitlets = "*" + +[package.extras] +test = ["flake8", "nbdime", "nbval", "notebook", "pytest"] + +[[package]] +name = "mdit-py-plugins" +version = "0.5.0" +description = "Collection of plugins for markdown-it-py" +optional = false +python-versions = ">=3.10" +groups = ["docs"] +files = [ + {file = "mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f"}, + {file = "mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6"}, +] + +[package.dependencies] +markdown-it-py = ">=2.0.0,<5.0.0" + +[package.extras] +code-style = ["pre-commit"] +rtd = ["myst-parser", "sphinx-book-theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + [[package]] name = "mdurl" version = "0.1.2" description = "Markdown URL utilities" optional = false python-versions = ">=3.7" -groups = ["dev"] +groups = ["dev", "docs"] files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] +[[package]] +name = "mistune" +version = "0.8.4" +description = "The fastest markdown parser in pure Python" +optional = false +python-versions = "*" +groups = ["docs"] +files = [ + {file = "mistune-0.8.4-py2.py3-none-any.whl", hash = "sha256:88a1051873018da288eee8538d476dffe1262495144b33ecb586c4ab266bb8d4"}, + {file = "mistune-0.8.4.tar.gz", hash = "sha256:59a3429db53c50b5c6bcc8a07f8848cb00d7dc8bdb431a4ab41920d201d4756e"}, +] + +[[package]] +name = "more-itertools" +version = "11.0.2" +description = "More routines for operating on iterables, beyond itertools" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" +files = [ + {file = "more_itertools-11.0.2-py3-none-any.whl", hash = "sha256:6e35b35f818b01f691643c6c611bc0902f2e92b46c18fffa77ae1e7c46e912e4"}, + {file = "more_itertools-11.0.2.tar.gz", hash = "sha256:392a9e1e362cbc106a2457d37cabf9b36e5e12efd4ebff1654630e76597df804"}, +] + [[package]] name = "mpmath" version = "1.3.0" @@ -1226,6 +2079,136 @@ files = [ [package.dependencies] typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} +[[package]] +name = "myst-parser" +version = "3.0.1" +description = "An extended [CommonMark](https://spec.commonmark.org/) compliant parser," +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "myst_parser-3.0.1-py3-none-any.whl", hash = "sha256:6457aaa33a5d474aca678b8ead9b3dc298e89c68e67012e73146ea6fd54babf1"}, + {file = "myst_parser-3.0.1.tar.gz", hash = "sha256:88f0cb406cb363b077d176b51c476f62d60604d68a8dcdf4832e080441301a87"}, +] + +[package.dependencies] +docutils = ">=0.18,<0.22" +jinja2 = "*" +markdown-it-py = ">=3.0,<4.0" +mdit-py-plugins = ">=0.4,<1.0" +pyyaml = "*" +sphinx = ">=6,<8" + +[package.extras] +code-style = ["pre-commit (>=3.0,<4.0)"] +linkify = ["linkify-it-py (>=2.0,<3.0)"] +rtd = ["ipython", "sphinx (>=7)", "sphinx-autodoc2 (>=0.5.0,<0.6.0)", "sphinx-book-theme (>=1.1,<2.0)", "sphinx-copybutton", "sphinx-design", "sphinx-pyscript", "sphinx-tippy (>=0.4.3)", "sphinx-togglebutton", "sphinxext-opengraph (>=0.9.0,<0.10.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)"] +testing = ["beautifulsoup4", "coverage[toml]", "defusedxml", "pytest (>=8,<9)", "pytest-cov", "pytest-param-files (>=0.6.0,<0.7.0)", "pytest-regressions", "sphinx-pytest"] +testing-docutils = ["pygments", "pytest (>=8,<9)", "pytest-param-files (>=0.6.0,<0.7.0)"] + +[[package]] +name = "nbclient" +version = "0.10.4" +description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." +optional = false +python-versions = ">=3.10.0" +groups = ["docs"] +files = [ + {file = "nbclient-0.10.4-py3-none-any.whl", hash = "sha256:9162df5a7373d70d606527300a95a975a47c137776cd942e52d9c7e29ff83440"}, + {file = "nbclient-0.10.4.tar.gz", hash = "sha256:1e54091b16e6da39e297b0ece3e10f6f29f4ac4e8ee515d29f8a7099bd6553c9"}, +] + +[package.dependencies] +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +nbformat = ">=5.1.3" +traitlets = ">=5.4" + +[package.extras] +dev = ["pre-commit"] +docs = ["autodoc-traits", "flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "mock", "moto", "myst-parser", "nbconvert (>=7.1.0)", "pytest (>=9.0.1,<10)", "pytest-asyncio (>=1.3.0)", "pytest-cov (>=4.0)", "sphinx (>=1.7)", "sphinx-book-theme", "sphinxcontrib-spelling", "testpath", "xmltodict"] +test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.1.0)", "pytest (>=9.0.1,<10)", "pytest-asyncio (>=1.3.0)", "pytest-cov (>=4.0)", "testpath", "xmltodict"] + +[[package]] +name = "nbconvert" +version = "6.5.4" +description = "Converting Jupyter Notebooks" +optional = false +python-versions = ">=3.7" +groups = ["docs"] +files = [ + {file = "nbconvert-6.5.4-py3-none-any.whl", hash = "sha256:d679a947f849a966cbbd0bf6e7fedcfdb64be3b20ce7cef11ad55c13f5820e19"}, + {file = "nbconvert-6.5.4.tar.gz", hash = "sha256:9e3c7c6d491374cbdd5f35d268c05809357716d346f4573186bbeab32ee50bc1"}, +] + +[package.dependencies] +beautifulsoup4 = "*" +bleach = "*" +defusedxml = "*" +entrypoints = ">=0.2.2" +jinja2 = ">=3.0" +jupyter-core = ">=4.7" +jupyterlab-pygments = "*" +lxml = "*" +MarkupSafe = ">=2.0" +mistune = ">=0.8.1,<2" +nbclient = ">=0.5.0" +nbformat = ">=5.1" +packaging = "*" +pandocfilters = ">=1.4.1" +pygments = ">=2.4.1" +tinycss2 = "*" +traitlets = ">=5.0" + +[package.extras] +all = ["ipykernel", "ipython", "ipywidgets (>=7)", "nbsphinx (>=0.2.12)", "pre-commit", "pyppeteer (>=1,<1.1)", "pytest", "pytest-cov", "pytest-dependency", "sphinx (>=1.5.1)", "sphinx-rtd-theme", "tornado (>=6.1)"] +docs = ["ipython", "nbsphinx (>=0.2.12)", "sphinx (>=1.5.1)", "sphinx-rtd-theme"] +serve = ["tornado (>=6.1)"] +test = ["ipykernel", "ipywidgets (>=7)", "pre-commit", "pyppeteer (>=1,<1.1)", "pytest", "pytest-cov", "pytest-dependency"] +webpdf = ["pyppeteer (>=1,<1.1)"] + +[[package]] +name = "nbformat" +version = "5.10.4" +description = "The Jupyter Notebook format" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b"}, + {file = "nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a"}, +] + +[package.dependencies] +fastjsonschema = ">=2.15" +jsonschema = ">=2.6" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +traitlets = ">=5.1" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["pep440", "pre-commit", "pytest", "testpath"] + +[[package]] +name = "nbsphinx" +version = "0.9.4" +description = "Jupyter Notebook Tools for Sphinx" +optional = false +python-versions = ">=3.6" +groups = ["docs"] +files = [ + {file = "nbsphinx-0.9.4-py3-none-any.whl", hash = "sha256:22cb1d974a8300e8118ca71aea1f649553743c0c5830a54129dcd446e6a8ba17"}, + {file = "nbsphinx-0.9.4.tar.gz", hash = "sha256:042a60806fc23d519bc5bef59d95570713913fe442fda759d53e3aaf62104794"}, +] + +[package.dependencies] +docutils = ">=0.18.1" +jinja2 = "*" +nbconvert = ">=5.3,<5.4 || >5.4" +nbformat = "*" +sphinx = ">=1.8" +traitlets = ">=5" + [[package]] name = "networkx" version = "3.4.2" @@ -1246,6 +2229,43 @@ example = ["cairocffi (>=1.7)", "contextily (>=1.6)", "igraph (>=0.11)", "momepy extra = ["lxml (>=4.6)", "pydot (>=3.0.1)", "pygraphviz (>=1.14)", "sympy (>=1.10)"] test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] +[[package]] +name = "nh3" +version = "0.3.5" +description = "Python binding to Ammonia HTML sanitizer Rust crate" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "nh3-0.3.5-cp314-cp314t-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:23a312224875f72cd16bde417f49071451877e29ef646a60e50fcb69407cc18a"}, + {file = "nh3-0.3.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:387abd011e81959d5a35151a11350a0795c6edeb53ebfa02d2e882dc01299263"}, + {file = "nh3-0.3.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:48f45e3e914be93a596431aa143dedf1582557bf41a58153c296048d6e3798c9"}, + {file = "nh3-0.3.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0a09f51806fd51b4fedbf9ea2b61fef388f19aef0d62fe51199d41648be14588"}, + {file = "nh3-0.3.5-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:c357f1d042c67f135a5e6babb2b0e3b9d9224ff4a3543240f597767b01384ffd"}, + {file = "nh3-0.3.5-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:38748140bf76383ab7ce2dce0ad4cb663855d8fbc9098f7f3483673d09616a17"}, + {file = "nh3-0.3.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:84bdeb082544fbcb77a12c034dd77d7da0556fdc0727b787eb6214b958c15e29"}, + {file = "nh3-0.3.5-cp314-cp314t-win32.whl", hash = "sha256:c3aae321f67ae66cff2a627115f106a377d4475d10b0e13d97959a13486b9a88"}, + {file = "nh3-0.3.5-cp314-cp314t-win_amd64.whl", hash = "sha256:c88605d8d468f7fc1b31e06129bc91d6c96f6c621776c9b504a0da9beac9df5f"}, + {file = "nh3-0.3.5-cp314-cp314t-win_arm64.whl", hash = "sha256:72c5bdedec27fa33de6a5326346ea8aa3fe54f6ac294d54c4b204fb66a9f1e79"}, + {file = "nh3-0.3.5-cp38-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:3bb854485c9b33e5bb143ff3e49e577073bc6bc320f0ff8fc316dd89c0d3c101"}, + {file = "nh3-0.3.5-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d401ab2d8e86d59e2126e3ab2a2f45840c405842b626d9a51624b3a33b6878"}, + {file = "nh3-0.3.5-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acfd354e61accbe4c74f8017c6e397a776916dfe47c48643cf7fd84ade826f93"}, + {file = "nh3-0.3.5-cp38-abi3-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:52d877980d7ca01dc3baf3936bf844828bc6f332962227a684ed79c18cce14c3"}, + {file = "nh3-0.3.5-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:207c01801d3e9bb8ec08f08689346bdd30ce15b8bf60013a925d08b5388962a4"}, + {file = "nh3-0.3.5-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea232933394d1d58bf7c4bb348dc4660eae6604e1ae81cd2ba6d9ed80d390f3b"}, + {file = "nh3-0.3.5-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe3a787dc76b50de6bee54ef242f26c41dfe47654428e3e94f0fae5bb6dd2cc1"}, + {file = "nh3-0.3.5-cp38-abi3-manylinux_2_31_riscv64.whl", hash = "sha256:488928988caad25ba14b1eb5bc74e25e21f3b5e40341d956f3ce4a8bc19460dc"}, + {file = "nh3-0.3.5-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2c069570b06aa848457713ad7af4a9905691291548c4466a9ad78ee95808382b"}, + {file = "nh3-0.3.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:eeedc90ed8c42c327e8e10e621ccfa314fc6cce35d5929f4297ff1cdb89667c4"}, + {file = "nh3-0.3.5-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:de8e8621853b6470fe928c684ee0d3f39ea8086cebafe4c416486488dea7b68d"}, + {file = "nh3-0.3.5-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:6ea58cc44d274c643b83547ca9654a0b1a817609b160601356f76a2b744c49ad"}, + {file = "nh3-0.3.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:e49c9b564e6bcb03ecd2f057213df9a0de15a95812ac9db9600b590db23d3ae9"}, + {file = "nh3-0.3.5-cp38-abi3-win32.whl", hash = "sha256:559e4c73b689e9a7aa97ac9760b1bc488038d7c1a575aa4ab5a0e19ee9630c0f"}, + {file = "nh3-0.3.5-cp38-abi3-win_amd64.whl", hash = "sha256:45e6a65dc88a300a2e3502cb9c8e6d1d6b831d6fba7470643333609c6aab1f30"}, + {file = "nh3-0.3.5-cp38-abi3-win_arm64.whl", hash = "sha256:8f85285700a18e9f3fc5bff41fe573fa84f81542ef13b48a89f9fecca0474d3b"}, + {file = "nh3-0.3.5.tar.gz", hash = "sha256:45855e14ff056064fec77133bfcf7cd691838168e5e17bbef075394954dc9dc8"}, +] + [[package]] name = "nodeenv" version = "1.9.1" @@ -1409,6 +2429,28 @@ files = [ {file = "numpy-2.3.4.tar.gz", hash = "sha256:a7d018bfedb375a8d979ac758b120ba846a7fe764911a64465fd87b8729f4a6a"}, ] +[[package]] +name = "numpydoc" +version = "1.7.0" +description = "Sphinx extension to support docstrings in Numpy format" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "numpydoc-1.7.0-py3-none-any.whl", hash = "sha256:5a56419d931310d79a06cfc2a126d1558700feeb9b4f3d8dcae1a8134be829c9"}, + {file = "numpydoc-1.7.0.tar.gz", hash = "sha256:866e5ae5b6509dcf873fc6381120f5c31acf13b135636c1a81d68c166a95f921"}, +] + +[package.dependencies] +sphinx = ">=6" +tabulate = ">=0.8.10" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} + +[package.extras] +developer = ["pre-commit (>=3.3)", "tomli ; python_version < \"3.11\""] +doc = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pydata-sphinx-theme (>=0.13.3)", "sphinx (>=7)"] +test = ["matplotlib", "pytest", "pytest-cov"] + [[package]] name = "nvidia-cublas-cu12" version = "12.6.4.1" @@ -1637,7 +2679,7 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["main", "dev", "docs"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -1730,13 +2772,57 @@ sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-d test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] xml = ["lxml (>=4.9.2)"] +[[package]] +name = "pandocfilters" +version = "1.5.1" +description = "Utilities for writing pandoc filters in python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["docs"] +files = [ + {file = "pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc"}, + {file = "pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e"}, +] + +[[package]] +name = "parso" +version = "0.8.6" +description = "A Python Parser" +optional = false +python-versions = ">=3.6" +groups = ["docs"] +files = [ + {file = "parso-0.8.6-py2.py3-none-any.whl", hash = "sha256:2c549f800b70a5c4952197248825584cb00f033b29c692671d3bf08bf380baff"}, + {file = "parso-0.8.6.tar.gz", hash = "sha256:2b9a0332696df97d454fa67b81618fd69c35a7b90327cbe6ba5c92d2c68a7bfd"}, +] + +[package.extras] +qa = ["flake8 (==5.0.4)", "types-setuptools (==67.2.0.1)", "zuban (==0.5.1)"] +testing = ["docopt", "pytest"] + +[[package]] +name = "pexpect" +version = "4.9.0" +description = "Pexpect allows easy control of interactive console applications." +optional = false +python-versions = "*" +groups = ["docs"] +markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\"" +files = [ + {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, + {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + [[package]] name = "platformdirs" version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" -groups = ["dev"] +groups = ["dev", "docs"] files = [ {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, @@ -1806,7 +2892,7 @@ version = "3.0.52" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.8" -groups = ["dev"] +groups = ["dev", "docs"] files = [ {file = "prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955"}, {file = "prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855"}, @@ -1945,7 +3031,7 @@ version = "7.0.0" description = "Cross-platform lib for process and system monitoring in Python. NOTE: the syntax of this script MUST be kept compatible with Python 2.7." optional = false python-versions = ">=3.6" -groups = ["main"] +groups = ["main", "docs"] files = [ {file = "psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25"}, {file = "psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da"}, @@ -1958,11 +3044,40 @@ files = [ {file = "psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553"}, {file = "psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456"}, ] +markers = {docs = "python_version >= \"3.11\""} [package.extras] dev = ["abi3audit", "black (==24.10.0)", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest", "pytest-cov", "pytest-xdist", "requests", "rstcheck", "ruff", "setuptools", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "vulture", "wheel"] test = ["pytest", "pytest-xdist", "setuptools"] +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +optional = false +python-versions = "*" +groups = ["docs"] +markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\"" +files = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +description = "Safely evaluate AST nodes without side effects" +optional = false +python-versions = "*" +groups = ["docs"] +files = [ + {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, + {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"}, +] + +[package.extras] +tests = ["pytest"] + [[package]] name = "pyaml" version = "25.1.0" @@ -1982,138 +3097,45 @@ PyYAML = "*" anchors = ["unidecode"] [[package]] -name = "pydantic" -version = "2.11.10" -description = "Data validation using Python type hints" +name = "pycparser" +version = "3.0" +description = "C parser in Python" optional = false -python-versions = ">=3.9" -groups = ["dev"] +python-versions = ">=3.10" +groups = ["dev", "docs"] files = [ - {file = "pydantic-2.11.10-py3-none-any.whl", hash = "sha256:802a655709d49bd004c31e865ef37da30b540786a46bfce02333e0e24b5fe29a"}, - {file = "pydantic-2.11.10.tar.gz", hash = "sha256:dc280f0982fbda6c38fada4e476dc0a4f3aeaf9c6ad4c28df68a666ec3c61423"}, + {file = "pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992"}, + {file = "pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29"}, ] - -[package.dependencies] -annotated-types = ">=0.6.0" -pydantic-core = "2.33.2" -typing-extensions = ">=4.12.2" -typing-inspection = ">=0.4.0" - -[package.extras] -email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] +markers = {dev = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and sys_platform == \"linux\" and platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\"", docs = "implementation_name == \"pypy\""} [[package]] -name = "pydantic-core" -version = "2.33.2" -description = "Core functionality for Pydantic validation and serialization" +name = "pydata-sphinx-theme" +version = "0.15.2" +description = "Bootstrap-based Sphinx theme from the PyData community" optional = false python-versions = ">=3.9" -groups = ["dev"] +groups = ["docs"] files = [ - {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, - {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, - {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, - {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, - {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, - {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, - {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, - {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, - {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, - {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, - {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, - {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, - {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, - {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, - {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, - {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, - {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, - {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, - {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, - {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, - {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, - {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, - {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, - {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, - {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, - {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, - {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, - {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, - {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, - {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, - {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, - {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, - {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, - {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, - {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"}, - {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"}, - {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"}, - {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"}, - {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"}, - {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"}, - {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"}, - {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, -] - -[package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + {file = "pydata_sphinx_theme-0.15.2-py3-none-any.whl", hash = "sha256:0c5fa1fa98a9b26dae590666ff576f27e26c7ba708fee754ecb9e07359ed4588"}, + {file = "pydata_sphinx_theme-0.15.2.tar.gz", hash = "sha256:4243fee85b3afcfae9df64f83210a04e7182e53bc3db8841ffff6d21d95ae320"}, +] + +[package.dependencies] +accessible-pygments = "*" +Babel = "*" +beautifulsoup4 = "*" +docutils = "!=0.17.0" +packaging = "*" +pygments = ">=2.7" +sphinx = ">=5.0" +typing-extensions = "*" + +[package.extras] +a11y = ["pytest-playwright"] +dev = ["nox", "pre-commit", "pydata-sphinx-theme[doc,test]", "pyyaml"] +doc = ["ablog (>=0.11.0rc2)", "colorama", "ipykernel", "ipyleaflet", "jupyter_sphinx", "jupyterlite-sphinx", "linkify-it-py", "matplotlib", "myst-parser", "nbsphinx", "numpy", "numpydoc", "pandas", "plotly", "rich", "sphinx-autoapi (>=3.0.0)", "sphinx-copybutton", "sphinx-design", "sphinx-favicon (>=1.0.1)", "sphinx-sitemap", "sphinx-togglebutton", "sphinxcontrib-youtube (<1.4)", "sphinxext-rediraffe", "xarray"] +test = ["pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "pygments" @@ -2121,7 +3143,7 @@ version = "2.19.2" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" -groups = ["dev"] +groups = ["dev", "docs"] files = [ {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, @@ -2130,6 +3152,27 @@ files = [ [package.extras] windows-terminal = ["colorama (>=0.4.6)"] +[[package]] +name = "pyright" +version = "1.1.409" +description = "Command line wrapper for pyright" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "pyright-1.1.409-py3-none-any.whl", hash = "sha256:aa3ea228cab90c845c7a60d28db7a844c04315356392aa09fafcee98c8c22fb3"}, + {file = "pyright-1.1.409.tar.gz", hash = "sha256:986ee05beca9e077c165758ad123667c679e050059a2546aa02473930394bc93"}, +] + +[package.dependencies] +nodeenv = ">=1.6.0" +typing-extensions = ">=4.1" + +[package.extras] +all = ["nodejs-wheel-binaries", "twine (>=3.4.1)"] +dev = ["twine (>=3.4.1)"] +nodejs = ["nodejs-wheel-binaries"] + [[package]] name = "pytest" version = "8.3.4" @@ -2178,7 +3221,7 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] +groups = ["main", "docs"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -2187,61 +3230,6 @@ files = [ [package.dependencies] six = ">=1.5" -[[package]] -name = "python-gitlab" -version = "5.6.0" -description = "The python wrapper for the GitLab REST and GraphQL APIs." -optional = false -python-versions = ">=3.9.0" -groups = ["dev"] -files = [ - {file = "python_gitlab-5.6.0-py3-none-any.whl", hash = "sha256:68980cd70929fc7f8f06d8a7b09bd046a6b79e1995c19d61249f046005099100"}, - {file = "python_gitlab-5.6.0.tar.gz", hash = "sha256:bc531e8ba3e5641b60409445d4919ace68a2c18cb0ec6d48fbced6616b954166"}, -] - -[package.dependencies] -requests = ">=2.32.0" -requests-toolbelt = ">=1.0.0" - -[package.extras] -autocompletion = ["argcomplete (>=1.10.0,<3)"] -graphql = ["gql[httpx] (>=3.5.0,<4)"] -yaml = ["PyYaml (>=6.0.1)"] - -[[package]] -name = "python-semantic-release" -version = "9.21.1" -description = "Automatic Semantic Versioning for Python projects" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "python_semantic_release-9.21.1-py3-none-any.whl", hash = "sha256:e69afe5100106390eec9e800132c947ed774bdcf9aa8f0df29589ea9ef375a21"}, - {file = "python_semantic_release-9.21.1.tar.gz", hash = "sha256:b5c509a573899e88e8f29504d2f83e9ddab9a66af861ec1baf39f2b86bbf3517"}, -] - -[package.dependencies] -click = ">=8.0,<9.0" -click-option-group = ">=0.5,<1.0" -Deprecated = ">=1.2,<2.0" -dotty-dict = ">=1.3,<2.0" -gitpython = ">=3.0,<4.0" -importlib-resources = ">=6.0,<7.0" -jinja2 = ">=3.1,<4.0" -pydantic = ">=2.0,<3.0" -python-gitlab = ">=4.0.0,<6.0.0" -requests = ">=2.25,<3.0" -rich = ">=14.0,<15.0" -shellingham = ">=1.5,<2.0" -tomlkit = ">=0.11,<1.0" - -[package.extras] -build = ["build (>=1.2,<2.0)"] -dev = ["pre-commit (>=3.5,<4.0)", "ruff (==0.6.1)", "tox (>=4.11,<5.0)"] -docs = ["Sphinx (>=6.0,<7.0)", "furo (>=2024.1,<2025.0)", "sphinx-autobuild (==2024.2.4)", "sphinxcontrib-apidoc (==0.5.0)"] -mypy = ["mypy (==1.15.0)", "types-Deprecated (>=1.2,<2.0)", "types-pyyaml (>=6.0,<7.0)", "types-requests (>=2.32.0,<2.33.0)"] -test = ["coverage[toml] (>=7.0,<8.0)", "filelock (>=3.15,<4.0)", "flatdict (>=4.0,<5.0)", "freezegun (>=1.5,<2.0)", "pytest (>=8.3,<9.0)", "pytest-clarity (>=1.0,<2.0)", "pytest-cov (>=5.0.0,<7.0.0)", "pytest-env (>=1.0,<2.0)", "pytest-lazy-fixtures (>=1.1.1,<1.2.0)", "pytest-mock (>=3.0,<4.0)", "pytest-order (>=1.3,<2.0)", "pytest-pretty (>=1.2,<2.0)", "pytest-xdist (>=3.0,<4.0)", "pyyaml (>=6.0,<7.0)", "requests-mock (>=1.10,<2.0)", "responses (>=0.25.0,<0.26.0)"] - [[package]] name = "pytorch-lightning" version = "2.5.0.post0" @@ -2285,13 +3273,26 @@ files = [ {file = "pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e"}, ] +[[package]] +name = "pywin32-ctypes" +version = "0.2.3" +description = "A (partial) reimplementation of pywin32 using ctypes/cffi" +optional = false +python-versions = ">=3.6" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and sys_platform == \"win32\"" +files = [ + {file = "pywin32-ctypes-0.2.3.tar.gz", hash = "sha256:d162dc04946d704503b2edc4d55f3dba5c1d539ead017afa00142c38b9885755"}, + {file = "pywin32_ctypes-0.2.3-py3-none-any.whl", hash = "sha256:8a1513379d709975552d202d942d9837758905c8d01eb82b8bcc30918929e7b8"}, +] + [[package]] name = "pyyaml" version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["main", "dev", "docs"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -2348,20 +3349,179 @@ files = [ {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] +[[package]] +name = "pyzmq" +version = "27.1.0" +description = "Python bindings for 0MQ" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "pyzmq-27.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:508e23ec9bc44c0005c4946ea013d9317ae00ac67778bd47519fdf5a0e930ff4"}, + {file = "pyzmq-27.1.0-cp310-cp310-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:507b6f430bdcf0ee48c0d30e734ea89ce5567fd7b8a0f0044a369c176aa44556"}, + {file = "pyzmq-27.1.0-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf7b38f9fd7b81cb6d9391b2946382c8237fd814075c6aa9c3b746d53076023b"}, + {file = "pyzmq-27.1.0-cp310-cp310-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:03ff0b279b40d687691a6217c12242ee71f0fba28bf8626ff50e3ef0f4410e1e"}, + {file = "pyzmq-27.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:677e744fee605753eac48198b15a2124016c009a11056f93807000ab11ce6526"}, + {file = "pyzmq-27.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:dd2fec2b13137416a1c5648b7009499bcc8fea78154cd888855fa32514f3dad1"}, + {file = "pyzmq-27.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:08e90bb4b57603b84eab1d0ca05b3bbb10f60c1839dc471fc1c9e1507bef3386"}, + {file = "pyzmq-27.1.0-cp310-cp310-win32.whl", hash = "sha256:a5b42d7a0658b515319148875fcb782bbf118dd41c671b62dae33666c2213bda"}, + {file = "pyzmq-27.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:c0bb87227430ee3aefcc0ade2088100e528d5d3298a0a715a64f3d04c60ba02f"}, + {file = "pyzmq-27.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:9a916f76c2ab8d045b19f2286851a38e9ac94ea91faf65bd64735924522a8b32"}, + {file = "pyzmq-27.1.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:226b091818d461a3bef763805e75685e478ac17e9008f49fce2d3e52b3d58b86"}, + {file = "pyzmq-27.1.0-cp311-cp311-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:0790a0161c281ca9723f804871b4027f2e8b5a528d357c8952d08cd1a9c15581"}, + {file = "pyzmq-27.1.0-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c895a6f35476b0c3a54e3eb6ccf41bf3018de937016e6e18748317f25d4e925f"}, + {file = "pyzmq-27.1.0-cp311-cp311-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5bbf8d3630bf96550b3be8e1fc0fea5cbdc8d5466c1192887bd94869da17a63e"}, + {file = "pyzmq-27.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:15c8bd0fe0dabf808e2d7a681398c4e5ded70a551ab47482067a572c054c8e2e"}, + {file = "pyzmq-27.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bafcb3dd171b4ae9f19ee6380dfc71ce0390fefaf26b504c0e5f628d7c8c54f2"}, + {file = "pyzmq-27.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e829529fcaa09937189178115c49c504e69289abd39967cd8a4c215761373394"}, + {file = "pyzmq-27.1.0-cp311-cp311-win32.whl", hash = "sha256:6df079c47d5902af6db298ec92151db82ecb557af663098b92f2508c398bb54f"}, + {file = "pyzmq-27.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:190cbf120fbc0fc4957b56866830def56628934a9d112aec0e2507aa6a032b97"}, + {file = "pyzmq-27.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:eca6b47df11a132d1745eb3b5b5e557a7dae2c303277aa0e69c6ba91b8736e07"}, + {file = "pyzmq-27.1.0-cp312-abi3-macosx_10_15_universal2.whl", hash = "sha256:452631b640340c928fa343801b0d07eb0c3789a5ffa843f6e1a9cee0ba4eb4fc"}, + {file = "pyzmq-27.1.0-cp312-abi3-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1c179799b118e554b66da67d88ed66cd37a169f1f23b5d9f0a231b4e8d44a113"}, + {file = "pyzmq-27.1.0-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3837439b7f99e60312f0c926a6ad437b067356dc2bc2ec96eb395fd0fe804233"}, + {file = "pyzmq-27.1.0-cp312-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43ad9a73e3da1fab5b0e7e13402f0b2fb934ae1c876c51d0afff0e7c052eca31"}, + {file = "pyzmq-27.1.0-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0de3028d69d4cdc475bfe47a6128eb38d8bc0e8f4d69646adfbcd840facbac28"}, + {file = "pyzmq-27.1.0-cp312-abi3-musllinux_1_2_i686.whl", hash = "sha256:cf44a7763aea9298c0aa7dbf859f87ed7012de8bda0f3977b6fb1d96745df856"}, + {file = "pyzmq-27.1.0-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f30f395a9e6fbca195400ce833c731e7b64c3919aa481af4d88c3759e0cb7496"}, + {file = "pyzmq-27.1.0-cp312-abi3-win32.whl", hash = "sha256:250e5436a4ba13885494412b3da5d518cd0d3a278a1ae640e113c073a5f88edd"}, + {file = "pyzmq-27.1.0-cp312-abi3-win_amd64.whl", hash = "sha256:9ce490cf1d2ca2ad84733aa1d69ce6855372cb5ce9223802450c9b2a7cba0ccf"}, + {file = "pyzmq-27.1.0-cp312-abi3-win_arm64.whl", hash = "sha256:75a2f36223f0d535a0c919e23615fc85a1e23b71f40c7eb43d7b1dedb4d8f15f"}, + {file = "pyzmq-27.1.0-cp313-cp313-android_24_arm64_v8a.whl", hash = "sha256:93ad4b0855a664229559e45c8d23797ceac03183c7b6f5b4428152a6b06684a5"}, + {file = "pyzmq-27.1.0-cp313-cp313-android_24_x86_64.whl", hash = "sha256:fbb4f2400bfda24f12f009cba62ad5734148569ff4949b1b6ec3b519444342e6"}, + {file = "pyzmq-27.1.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:e343d067f7b151cfe4eb3bb796a7752c9d369eed007b91231e817071d2c2fec7"}, + {file = "pyzmq-27.1.0-cp313-cp313t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:08363b2011dec81c354d694bdecaef4770e0ae96b9afea70b3f47b973655cc05"}, + {file = "pyzmq-27.1.0-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d54530c8c8b5b8ddb3318f481297441af102517602b569146185fa10b63f4fa9"}, + {file = "pyzmq-27.1.0-cp313-cp313t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6f3afa12c392f0a44a2414056d730eebc33ec0926aae92b5ad5cf26ebb6cc128"}, + {file = "pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c65047adafe573ff023b3187bb93faa583151627bc9c51fc4fb2c561ed689d39"}, + {file = "pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:90e6e9441c946a8b0a667356f7078d96411391a3b8f80980315455574177ec97"}, + {file = "pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:add071b2d25f84e8189aaf0882d39a285b42fa3853016ebab234a5e78c7a43db"}, + {file = "pyzmq-27.1.0-cp313-cp313t-win32.whl", hash = "sha256:7ccc0700cfdf7bd487bea8d850ec38f204478681ea02a582a8da8171b7f90a1c"}, + {file = "pyzmq-27.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:8085a9fba668216b9b4323be338ee5437a235fe275b9d1610e422ccc279733e2"}, + {file = "pyzmq-27.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:6bb54ca21bcfe361e445256c15eedf083f153811c37be87e0514934d6913061e"}, + {file = "pyzmq-27.1.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ce980af330231615756acd5154f29813d553ea555485ae712c491cd483df6b7a"}, + {file = "pyzmq-27.1.0-cp314-cp314t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1779be8c549e54a1c38f805e56d2a2e5c009d26de10921d7d51cfd1c8d4632ea"}, + {file = "pyzmq-27.1.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7200bb0f03345515df50d99d3db206a0a6bee1955fbb8c453c76f5bf0e08fb96"}, + {file = "pyzmq-27.1.0-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01c0e07d558b06a60773744ea6251f769cd79a41a97d11b8bf4ab8f034b0424d"}, + {file = "pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:80d834abee71f65253c91540445d37c4c561e293ba6e741b992f20a105d69146"}, + {file = "pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:544b4e3b7198dde4a62b8ff6685e9802a9a1ebf47e77478a5eb88eca2a82f2fd"}, + {file = "pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cedc4c68178e59a4046f97eca31b148ddcf51e88677de1ef4e78cf06c5376c9a"}, + {file = "pyzmq-27.1.0-cp314-cp314t-win32.whl", hash = "sha256:1f0b2a577fd770aa6f053211a55d1c47901f4d537389a034c690291485e5fe92"}, + {file = "pyzmq-27.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:19c9468ae0437f8074af379e986c5d3d7d7bfe033506af442e8c879732bedbe0"}, + {file = "pyzmq-27.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:dc5dbf68a7857b59473f7df42650c621d7e8923fb03fa74a526890f4d33cc4d7"}, + {file = "pyzmq-27.1.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:18339186c0ed0ce5835f2656cdfb32203125917711af64da64dbaa3d949e5a1b"}, + {file = "pyzmq-27.1.0-cp38-cp38-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:753d56fba8f70962cd8295fb3edb40b9b16deaa882dd2b5a3a2039f9ff7625aa"}, + {file = "pyzmq-27.1.0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b721c05d932e5ad9ff9344f708c96b9e1a485418c6618d765fca95d4daacfbef"}, + {file = "pyzmq-27.1.0-cp38-cp38-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7be883ff3d722e6085ee3f4afc057a50f7f2e0c72d289fd54df5706b4e3d3a50"}, + {file = "pyzmq-27.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:b2e592db3a93128daf567de9650a2f3859017b3f7a66bc4ed6e4779d6034976f"}, + {file = "pyzmq-27.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ad68808a61cbfbbae7ba26d6233f2a4aa3b221de379ce9ee468aa7a83b9c36b0"}, + {file = "pyzmq-27.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:e2687c2d230e8d8584fbea433c24382edfeda0c60627aca3446aa5e58d5d1831"}, + {file = "pyzmq-27.1.0-cp38-cp38-win32.whl", hash = "sha256:a1aa0ee920fb3825d6c825ae3f6c508403b905b698b6460408ebd5bb04bbb312"}, + {file = "pyzmq-27.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:df7cd397ece96cf20a76fae705d40efbab217d217897a5053267cd88a700c266"}, + {file = "pyzmq-27.1.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:96c71c32fff75957db6ae33cd961439f386505c6e6b377370af9b24a1ef9eafb"}, + {file = "pyzmq-27.1.0-cp39-cp39-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:49d3980544447f6bd2968b6ac913ab963a49dcaa2d4a2990041f16057b04c429"}, + {file = "pyzmq-27.1.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:849ca054d81aa1c175c49484afaaa5db0622092b5eccb2055f9f3bb8f703782d"}, + {file = "pyzmq-27.1.0-cp39-cp39-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3970778e74cb7f85934d2b926b9900e92bfe597e62267d7499acc39c9c28e345"}, + {file = "pyzmq-27.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:da96ecdcf7d3919c3be2de91a8c513c186f6762aa6cf7c01087ed74fad7f0968"}, + {file = "pyzmq-27.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:9541c444cfe1b1c0156c5c86ece2bb926c7079a18e7b47b0b1b3b1b875e5d098"}, + {file = "pyzmq-27.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e30a74a39b93e2e1591b58eb1acef4902be27c957a8720b0e368f579b82dc22f"}, + {file = "pyzmq-27.1.0-cp39-cp39-win32.whl", hash = "sha256:b1267823d72d1e40701dcba7edc45fd17f71be1285557b7fe668887150a14b78"}, + {file = "pyzmq-27.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0c996ded912812a2fcd7ab6574f4ad3edc27cb6510349431e4930d4196ade7db"}, + {file = "pyzmq-27.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:346e9ba4198177a07e7706050f35d733e08c1c1f8ceacd5eb6389d653579ffbc"}, + {file = "pyzmq-27.1.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c17e03cbc9312bee223864f1a2b13a99522e0dc9f7c5df0177cd45210ac286e6"}, + {file = "pyzmq-27.1.0-pp310-pypy310_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:f328d01128373cb6763823b2b4e7f73bdf767834268c565151eacb3b7a392f90"}, + {file = "pyzmq-27.1.0-pp310-pypy310_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c1790386614232e1b3a40a958454bdd42c6d1811837b15ddbb052a032a43f62"}, + {file = "pyzmq-27.1.0-pp310-pypy310_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:448f9cb54eb0cee4732b46584f2710c8bc178b0e5371d9e4fc8125201e413a74"}, + {file = "pyzmq-27.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:05b12f2d32112bf8c95ef2e74ec4f1d4beb01f8b5e703b38537f8849f92cb9ba"}, + {file = "pyzmq-27.1.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:18770c8d3563715387139060d37859c02ce40718d1faf299abddcdcc6a649066"}, + {file = "pyzmq-27.1.0-pp311-pypy311_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:ac25465d42f92e990f8d8b0546b01c391ad431c3bf447683fdc40565941d0604"}, + {file = "pyzmq-27.1.0-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:53b40f8ae006f2734ee7608d59ed661419f087521edbfc2149c3932e9c14808c"}, + {file = "pyzmq-27.1.0-pp311-pypy311_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f605d884e7c8be8fe1aa94e0a783bf3f591b84c24e4bc4f3e7564c82ac25e271"}, + {file = "pyzmq-27.1.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c9f7f6e13dff2e44a6afeaf2cf54cee5929ad64afaf4d40b50f93c58fc687355"}, + {file = "pyzmq-27.1.0-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:50081a4e98472ba9f5a02850014b4c9b629da6710f8f14f3b15897c666a28f1b"}, + {file = "pyzmq-27.1.0-pp38-pypy38_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:510869f9df36ab97f89f4cff9d002a89ac554c7ac9cadd87d444aa4cf66abd27"}, + {file = "pyzmq-27.1.0-pp38-pypy38_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1f8426a01b1c4098a750973c37131cf585f61c7911d735f729935a0c701b68d3"}, + {file = "pyzmq-27.1.0-pp38-pypy38_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:726b6a502f2e34c6d2ada5e702929586d3ac948a4dbbb7fed9854ec8c0466027"}, + {file = "pyzmq-27.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:bd67e7c8f4654bef471c0b1ca6614af0b5202a790723a58b79d9584dc8022a78"}, + {file = "pyzmq-27.1.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:722ea791aa233ac0a819fc2c475e1292c76930b31f1d828cb61073e2fe5e208f"}, + {file = "pyzmq-27.1.0-pp39-pypy39_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:01f9437501886d3a1dd4b02ef59fb8cc384fa718ce066d52f175ee49dd5b7ed8"}, + {file = "pyzmq-27.1.0-pp39-pypy39_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4a19387a3dddcc762bfd2f570d14e2395b2c9701329b266f83dd87a2b3cbd381"}, + {file = "pyzmq-27.1.0-pp39-pypy39_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c618fbcd069e3a29dcd221739cacde52edcc681f041907867e0f5cc7e85f172"}, + {file = "pyzmq-27.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ff8d114d14ac671d88c89b9224c63d6c4e5a613fe8acd5594ce53d752a3aafe9"}, + {file = "pyzmq-27.1.0.tar.gz", hash = "sha256:ac0765e3d44455adb6ddbf4417dcce460fc40a05978c08efdf2948072f6db540"}, +] + +[package.dependencies] +cffi = {version = "*", markers = "implementation_name == \"pypy\""} + [[package]] name = "questionary" version = "2.1.1" description = "Python library to build pretty command line user prompts ⭐️" optional = false -python-versions = ">=3.9" -groups = ["dev"] +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "questionary-2.1.1-py3-none-any.whl", hash = "sha256:a51af13f345f1cdea62347589fbb6df3b290306ab8930713bfae4d475a7d4a59"}, + {file = "questionary-2.1.1.tar.gz", hash = "sha256:3d7e980292bb0107abaa79c68dd3eee3c561b83a0f89ae482860b181c8bd412d"}, +] + +[package.dependencies] +prompt_toolkit = ">=2.0,<4.0" + +[[package]] +name = "readme-renderer" +version = "43.0" +description = "readme_renderer is a library for rendering readme descriptions for Warehouse" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "readme_renderer-43.0-py3-none-any.whl", hash = "sha256:19db308d86ecd60e5affa3b2a98f017af384678c63c88e5d4556a380e674f3f9"}, + {file = "readme_renderer-43.0.tar.gz", hash = "sha256:1818dd28140813509eeed8d62687f7cd4f7bad90d4db586001c5dc09d4fde311"}, +] + +[package.dependencies] +docutils = ">=0.13.1" +nh3 = ">=0.2.14" +Pygments = ">=2.5.1" + +[package.extras] +md = ["cmarkgfm (>=0.8.0)"] + +[[package]] +name = "readthedocs-sphinx-ext" +version = "2.2.5" +description = "Sphinx extension for Read the Docs overrides" +optional = false +python-versions = "*" +groups = ["docs"] +files = [ + {file = "readthedocs-sphinx-ext-2.2.5.tar.gz", hash = "sha256:ee5fd5b99db9f0c180b2396cbce528aa36671951b9526bb0272dbfce5517bd27"}, + {file = "readthedocs_sphinx_ext-2.2.5-py2.py3-none-any.whl", hash = "sha256:f8c56184ea011c972dd45a90122568587cc85b0127bc9cf064d17c68bc809daa"}, +] + +[package.dependencies] +Jinja2 = ">=2.9" +packaging = "*" +requests = "*" + +[[package]] +name = "referencing" +version = "0.37.0" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.10" +groups = ["docs"] files = [ - {file = "questionary-2.1.1-py3-none-any.whl", hash = "sha256:a51af13f345f1cdea62347589fbb6df3b290306ab8930713bfae4d475a7d4a59"}, - {file = "questionary-2.1.1.tar.gz", hash = "sha256:3d7e980292bb0107abaa79c68dd3eee3c561b83a0f89ae482860b181c8bd412d"}, + {file = "referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231"}, + {file = "referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8"}, ] [package.dependencies] -prompt_toolkit = ">=2.0,<4.0" +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" +typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.13\""} [[package]] name = "requests" @@ -2369,7 +3529,7 @@ version = "2.32.4" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["main", "dev", "docs"] files = [ {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, @@ -2400,6 +3560,21 @@ files = [ [package.dependencies] requests = ">=2.0.1,<3.0.0" +[[package]] +name = "rfc3986" +version = "2.0.0" +description = "Validating URI References per RFC 3986" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"}, + {file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"}, +] + +[package.extras] +idna2008 = ["idna"] + [[package]] name = "rich" version = "14.2.0" @@ -2419,6 +3594,131 @@ pygments = ">=2.13.0,<3.0.0" [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] +[[package]] +name = "rpds-py" +version = "0.30.0" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.10" +groups = ["docs"] +files = [ + {file = "rpds_py-0.30.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:679ae98e00c0e8d68a7fda324e16b90fd5260945b45d3b824c892cec9eea3288"}, + {file = "rpds_py-0.30.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4cc2206b76b4f576934f0ed374b10d7ca5f457858b157ca52064bdfc26b9fc00"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:389a2d49eded1896c3d48b0136ead37c48e221b391c052fba3f4055c367f60a6"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:32c8528634e1bf7121f3de08fa85b138f4e0dc47657866630611b03967f041d7"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f207f69853edd6f6700b86efb84999651baf3789e78a466431df1331608e5324"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:67b02ec25ba7a9e8fa74c63b6ca44cf5707f2fbfadae3ee8e7494297d56aa9df"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0e95f6819a19965ff420f65578bacb0b00f251fefe2c8b23347c37174271f3"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:a452763cc5198f2f98898eb98f7569649fe5da666c2dc6b5ddb10fde5a574221"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e0b65193a413ccc930671c55153a03ee57cecb49e6227204b04fae512eb657a7"}, + {file = "rpds_py-0.30.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:858738e9c32147f78b3ac24dc0edb6610000e56dc0f700fd5f651d0a0f0eb9ff"}, + {file = "rpds_py-0.30.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:da279aa314f00acbb803da1e76fa18666778e8a8f83484fba94526da5de2cba7"}, + {file = "rpds_py-0.30.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7c64d38fb49b6cdeda16ab49e35fe0da2e1e9b34bc38bd78386530f218b37139"}, + {file = "rpds_py-0.30.0-cp310-cp310-win32.whl", hash = "sha256:6de2a32a1665b93233cde140ff8b3467bdb9e2af2b91079f0333a0974d12d464"}, + {file = "rpds_py-0.30.0-cp310-cp310-win_amd64.whl", hash = "sha256:1726859cd0de969f88dc8673bdd954185b9104e05806be64bcd87badbe313169"}, + {file = "rpds_py-0.30.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a2bffea6a4ca9f01b3f8e548302470306689684e61602aa3d141e34da06cf425"}, + {file = "rpds_py-0.30.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dc4f992dfe1e2bc3ebc7444f6c7051b4bc13cd8e33e43511e8ffd13bf407010d"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:422c3cb9856d80b09d30d2eb255d0754b23e090034e1deb4083f8004bd0761e4"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07ae8a593e1c3c6b82ca3292efbe73c30b61332fd612e05abee07c79359f292f"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12f90dd7557b6bd57f40abe7747e81e0c0b119bef015ea7726e69fe550e394a4"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99b47d6ad9a6da00bec6aabe5a6279ecd3c06a329d4aa4771034a21e335c3a97"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33f559f3104504506a44bb666b93a33f5d33133765b0c216a5bf2f1e1503af89"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:946fe926af6e44f3697abbc305ea168c2c31d3e3ef1058cf68f379bf0335a78d"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:495aeca4b93d465efde585977365187149e75383ad2684f81519f504f5c13038"}, + {file = "rpds_py-0.30.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9a0ca5da0386dee0655b4ccdf46119df60e0f10da268d04fe7cc87886872ba7"}, + {file = "rpds_py-0.30.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8d6d1cc13664ec13c1b84241204ff3b12f9bb82464b8ad6e7a5d3486975c2eed"}, + {file = "rpds_py-0.30.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3896fa1be39912cf0757753826bc8bdc8ca331a28a7c4ae46b7a21280b06bb85"}, + {file = "rpds_py-0.30.0-cp311-cp311-win32.whl", hash = "sha256:55f66022632205940f1827effeff17c4fa7ae1953d2b74a8581baaefb7d16f8c"}, + {file = "rpds_py-0.30.0-cp311-cp311-win_amd64.whl", hash = "sha256:a51033ff701fca756439d641c0ad09a41d9242fa69121c7d8769604a0a629825"}, + {file = "rpds_py-0.30.0-cp311-cp311-win_arm64.whl", hash = "sha256:47b0ef6231c58f506ef0b74d44e330405caa8428e770fec25329ed2cb971a229"}, + {file = "rpds_py-0.30.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a161f20d9a43006833cd7068375a94d035714d73a172b681d8881820600abfad"}, + {file = "rpds_py-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6abc8880d9d036ecaafe709079969f56e876fcf107f7a8e9920ba6d5a3878d05"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca28829ae5f5d569bb62a79512c842a03a12576375d5ece7d2cadf8abe96ec28"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1010ed9524c73b94d15919ca4d41d8780980e1765babf85f9a2f90d247153dd"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d1736cfb49381ba528cd5baa46f82fdc65c06e843dab24dd70b63d09121b3f"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d948b135c4693daff7bc2dcfc4ec57237a29bd37e60c2fabf5aff2bbacf3e2f1"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47f236970bccb2233267d89173d3ad2703cd36a0e2a6e92d0560d333871a3d23"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:2e6ecb5a5bcacf59c3f912155044479af1d0b6681280048b338b28e364aca1f6"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8fa71a2e078c527c3e9dc9fc5a98c9db40bcc8a92b4e8858e36d329f8684b51"}, + {file = "rpds_py-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73c67f2db7bc334e518d097c6d1e6fed021bbc9b7d678d6cc433478365d1d5f5"}, + {file = "rpds_py-0.30.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5ba103fb455be00f3b1c2076c9d4264bfcb037c976167a6047ed82f23153f02e"}, + {file = "rpds_py-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee9c752c0364588353e627da8a7e808a66873672bcb5f52890c33fd965b394"}, + {file = "rpds_py-0.30.0-cp312-cp312-win32.whl", hash = "sha256:1ab5b83dbcf55acc8b08fc62b796ef672c457b17dbd7820a11d6c52c06839bdf"}, + {file = "rpds_py-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:a090322ca841abd453d43456ac34db46e8b05fd9b3b4ac0c78bcde8b089f959b"}, + {file = "rpds_py-0.30.0-cp312-cp312-win_arm64.whl", hash = "sha256:669b1805bd639dd2989b281be2cfd951c6121b65e729d9b843e9639ef1fd555e"}, + {file = "rpds_py-0.30.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f83424d738204d9770830d35290ff3273fbb02b41f919870479fab14b9d303b2"}, + {file = "rpds_py-0.30.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7536cd91353c5273434b4e003cbda89034d67e7710eab8761fd918ec6c69cf8"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2771c6c15973347f50fece41fc447c054b7ac2ae0502388ce3b6738cd366e3d4"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0a59119fc6e3f460315fe9d08149f8102aa322299deaa5cab5b40092345c2136"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76fec018282b4ead0364022e3c54b60bf368b9d926877957a8624b58419169b7"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:692bef75a5525db97318e8cd061542b5a79812d711ea03dbc1f6f8dbb0c5f0d2"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9027da1ce107104c50c81383cae773ef5c24d296dd11c99e2629dbd7967a20c6"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:9cf69cdda1f5968a30a359aba2f7f9aa648a9ce4b580d6826437f2b291cfc86e"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4796a717bf12b9da9d3ad002519a86063dcac8988b030e405704ef7d74d2d9d"}, + {file = "rpds_py-0.30.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d4c2aa7c50ad4728a094ebd5eb46c452e9cb7edbfdb18f9e1221f597a73e1e7"}, + {file = "rpds_py-0.30.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ba81a9203d07805435eb06f536d95a266c21e5b2dfbf6517748ca40c98d19e31"}, + {file = "rpds_py-0.30.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:945dccface01af02675628334f7cf49c2af4c1c904748efc5cf7bbdf0b579f95"}, + {file = "rpds_py-0.30.0-cp313-cp313-win32.whl", hash = "sha256:b40fb160a2db369a194cb27943582b38f79fc4887291417685f3ad693c5a1d5d"}, + {file = "rpds_py-0.30.0-cp313-cp313-win_amd64.whl", hash = "sha256:806f36b1b605e2d6a72716f321f20036b9489d29c51c91f4dd29a3e3afb73b15"}, + {file = "rpds_py-0.30.0-cp313-cp313-win_arm64.whl", hash = "sha256:d96c2086587c7c30d44f31f42eae4eac89b60dabbac18c7669be3700f13c3ce1"}, + {file = "rpds_py-0.30.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:eb0b93f2e5c2189ee831ee43f156ed34e2a89a78a66b98cadad955972548be5a"}, + {file = "rpds_py-0.30.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:922e10f31f303c7c920da8981051ff6d8c1a56207dbdf330d9047f6d30b70e5e"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdc62c8286ba9bf7f47befdcea13ea0e26bf294bda99758fd90535cbaf408000"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47f9a91efc418b54fb8190a6b4aa7813a23fb79c51f4bb84e418f5476c38b8db"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3587eb9b17f3789ad50824084fa6f81921bbf9a795826570bda82cb3ed91f2"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39c02563fc592411c2c61d26b6c5fe1e51eaa44a75aa2c8735ca88b0d9599daa"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51a1234d8febafdfd33a42d97da7a43f5dcb120c1060e352a3fbc0c6d36e2083"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:eb2c4071ab598733724c08221091e8d80e89064cd472819285a9ab0f24bcedb9"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6bdfdb946967d816e6adf9a3d8201bfad269c67efe6cefd7093ef959683c8de0"}, + {file = "rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c77afbd5f5250bf27bf516c7c4a016813eb2d3e116139aed0096940c5982da94"}, + {file = "rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:61046904275472a76c8c90c9ccee9013d70a6d0f73eecefd38c1ae7c39045a08"}, + {file = "rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c5f36a861bc4b7da6516dbdf302c55313afa09b81931e8280361a4f6c9a2d27"}, + {file = "rpds_py-0.30.0-cp313-cp313t-win32.whl", hash = "sha256:3d4a69de7a3e50ffc214ae16d79d8fbb0922972da0356dcf4d0fdca2878559c6"}, + {file = "rpds_py-0.30.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f14fc5df50a716f7ece6a80b6c78bb35ea2ca47c499e422aa4463455dd96d56d"}, + {file = "rpds_py-0.30.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:68f19c879420aa08f61203801423f6cd5ac5f0ac4ac82a2368a9fcd6a9a075e0"}, + {file = "rpds_py-0.30.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ec7c4490c672c1a0389d319b3a9cfcd098dcdc4783991553c332a15acf7249be"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f251c812357a3fed308d684a5079ddfb9d933860fc6de89f2b7ab00da481e65f"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac98b175585ecf4c0348fd7b29c3864bda53b805c773cbf7bfdaffc8070c976f"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3e62880792319dbeb7eb866547f2e35973289e7d5696c6e295476448f5b63c87"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e7fc54e0900ab35d041b0601431b0a0eb495f0851a0639b6ef90f7741b39a18"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47e77dc9822d3ad616c3d5759ea5631a75e5809d5a28707744ef79d7a1bcfcad"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:b4dc1a6ff022ff85ecafef7979a2c6eb423430e05f1165d6688234e62ba99a07"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4559c972db3a360808309e06a74628b95eaccbf961c335c8fe0d590cf587456f"}, + {file = "rpds_py-0.30.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:0ed177ed9bded28f8deb6ab40c183cd1192aa0de40c12f38be4d59cd33cb5c65"}, + {file = "rpds_py-0.30.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ad1fa8db769b76ea911cb4e10f049d80bf518c104f15b3edb2371cc65375c46f"}, + {file = "rpds_py-0.30.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:46e83c697b1f1c72b50e5ee5adb4353eef7406fb3f2043d64c33f20ad1c2fc53"}, + {file = "rpds_py-0.30.0-cp314-cp314-win32.whl", hash = "sha256:ee454b2a007d57363c2dfd5b6ca4a5d7e2c518938f8ed3b706e37e5d470801ed"}, + {file = "rpds_py-0.30.0-cp314-cp314-win_amd64.whl", hash = "sha256:95f0802447ac2d10bcc69f6dc28fe95fdf17940367b21d34e34c737870758950"}, + {file = "rpds_py-0.30.0-cp314-cp314-win_arm64.whl", hash = "sha256:613aa4771c99f03346e54c3f038e4cc574ac09a3ddfb0e8878487335e96dead6"}, + {file = "rpds_py-0.30.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7e6ecfcb62edfd632e56983964e6884851786443739dbfe3582947e87274f7cb"}, + {file = "rpds_py-0.30.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a1d0bc22a7cdc173fedebb73ef81e07faef93692b8c1ad3733b67e31e1b6e1b8"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d08f00679177226c4cb8c5265012eea897c8ca3b93f429e546600c971bcbae7"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5965af57d5848192c13534f90f9dd16464f3c37aaf166cc1da1cae1fd5a34898"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a4e86e34e9ab6b667c27f3211ca48f73dba7cd3d90f8d5b11be56e5dbc3fb4e"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d3e6b26f2c785d65cc25ef1e5267ccbe1b069c5c21b8cc724efee290554419"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:626a7433c34566535b6e56a1b39a7b17ba961e97ce3b80ec62e6f1312c025551"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:acd7eb3f4471577b9b5a41baf02a978e8bdeb08b4b355273994f8b87032000a8"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fe5fa731a1fa8a0a56b0977413f8cacac1768dad38d16b3a296712709476fbd5"}, + {file = "rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:74a3243a411126362712ee1524dfc90c650a503502f135d54d1b352bd01f2404"}, + {file = "rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3e8eeb0544f2eb0d2581774be4c3410356eba189529a6b3e36bbbf9696175856"}, + {file = "rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:dbd936cde57abfee19ab3213cf9c26be06d60750e60a8e4dd85d1ab12c8b1f40"}, + {file = "rpds_py-0.30.0-cp314-cp314t-win32.whl", hash = "sha256:dc824125c72246d924f7f796b4f63c1e9dc810c7d9e2355864b3c3a73d59ade0"}, + {file = "rpds_py-0.30.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27f4b0e92de5bfbc6f86e43959e6edd1425c33b5e69aab0984a72047f2bcf1e3"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c2262bdba0ad4fc6fb5545660673925c2d2a5d9e2e0fb603aad545427be0fc58"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ee6af14263f25eedc3bb918a3c04245106a42dfd4f5c2285ea6f997b1fc3f89a"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3adbb8179ce342d235c31ab8ec511e66c73faa27a47e076ccc92421add53e2bb"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:250fa00e9543ac9b97ac258bd37367ff5256666122c2d0f2bc97577c60a1818c"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9854cf4f488b3d57b9aaeb105f06d78e5529d3145b1e4a41750167e8c213c6d3"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:993914b8e560023bc0a8bf742c5f303551992dcb85e247b1e5c7f4a7d145bda5"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58edca431fb9b29950807e301826586e5bbf24163677732429770a697ffe6738"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:dea5b552272a944763b34394d04577cf0f9bd013207bc32323b5a89a53cf9c2f"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ba3af48635eb83d03f6c9735dfb21785303e73d22ad03d489e88adae6eab8877"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:dff13836529b921e22f15cb099751209a60009731a68519630a24d61f0b1b30a"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:1b151685b23929ab7beec71080a8889d4d6d9fa9a983d213f07121205d48e2c4"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ac37f9f516c51e5753f27dfdef11a88330f04de2d564be3991384b2f3535d02e"}, + {file = "rpds_py-0.30.0.tar.gz", hash = "sha256:dd8ff7cf90014af0c0f787eea34794ebf6415242ee1d6fa91eaba725cc441e84"}, +] + [[package]] name = "ruff" version = "0.9.7" @@ -2629,13 +3929,30 @@ dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodest doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.16.5)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.0.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"] test = ["Cython", "array-api-strict (>=2.0,<2.1.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja ; sys_platform != \"emscripten\"", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +[[package]] +name = "secretstorage" +version = "3.5.0" +description = "Python bindings to FreeDesktop.org Secret Service API" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and sys_platform == \"linux\"" +files = [ + {file = "secretstorage-3.5.0-py3-none-any.whl", hash = "sha256:0ce65888c0725fcb2c5bc0fdb8e5438eece02c523557ea40ce0703c266248137"}, + {file = "secretstorage-3.5.0.tar.gz", hash = "sha256:f04b8e4689cbce351744d5537bf6b1329c6fc68f91fa666f60a380edddcd11be"}, +] + +[package.dependencies] +cryptography = ">=2.0" +jeepney = ">=0.6" + [[package]] name = "setuptools" version = "75.8.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" -groups = ["main"] +groups = ["main", "docs"] files = [ {file = "setuptools-75.8.1-py3-none-any.whl", hash = "sha256:3bc32c0b84c643299ca94e77f834730f126efd621de0cc1de64119e0e17dab1f"}, {file = "setuptools-75.8.1.tar.gz", hash = "sha256:65fb779a8f28895242923582eadca2337285f0891c2c9e160754df917c3d2530"}, @@ -2650,42 +3967,313 @@ enabler = ["pytest-enabler (>=2.2)"] test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] -[[package]] -name = "shellingham" -version = "1.5.4" -description = "Tool to Detect Surrounding Shell" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, - {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, -] - [[package]] name = "six" version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] +groups = ["main", "docs"] files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] [[package]] -name = "smmap" -version = "5.0.2" -description = "A pure Python implementation of a sliding window memory map manager" +name = "snowballstemmer" +version = "3.0.1" +description = "This package provides 32 stemmers for 30 languages generated from Snowball algorithms." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*" +groups = ["docs"] +files = [ + {file = "snowballstemmer-3.0.1-py3-none-any.whl", hash = "sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064"}, + {file = "snowballstemmer-3.0.1.tar.gz", hash = "sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895"}, +] + +[[package]] +name = "soupsieve" +version = "2.8.3" +description = "A modern CSS selector implementation for Beautiful Soup." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "soupsieve-2.8.3-py3-none-any.whl", hash = "sha256:ed64f2ba4eebeab06cc4962affce381647455978ffc1e36bb79a545b91f45a95"}, + {file = "soupsieve-2.8.3.tar.gz", hash = "sha256:3267f1eeea4251fb42728b6dfb746edc9acaffc4a45b27e19450b676586e8349"}, +] + +[[package]] +name = "sphinx" +version = "6.2.1" +description = "Python documentation generator" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "Sphinx-6.2.1.tar.gz", hash = "sha256:6d56a34697bb749ffa0152feafc4b19836c755d90a7c59b72bc7dfd371b9cc6b"}, + {file = "sphinx-6.2.1-py3-none-any.whl", hash = "sha256:97787ff1fa3256a3eef9eda523a63dbf299f7b47e053cfcf684a1c2a8380c912"}, +] + +[package.dependencies] +alabaster = ">=0.7,<0.8" +babel = ">=2.9" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +docutils = ">=0.18.1,<0.20" +imagesize = ">=1.3" +Jinja2 = ">=3.0" +packaging = ">=21.0" +Pygments = ">=2.13" +requests = ">=2.25.0" +snowballstemmer = ">=2.0" +sphinxcontrib-applehelp = "*" +sphinxcontrib-devhelp = "*" +sphinxcontrib-htmlhelp = ">=2.0.0" +sphinxcontrib-jsmath = "*" +sphinxcontrib-qthelp = "*" +sphinxcontrib-serializinghtml = ">=1.1.5" + +[package.extras] +docs = ["sphinxcontrib-websupport"] +lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-simplify", "isort", "mypy (>=0.990)", "ruff", "sphinx-lint", "types-requests"] +test = ["cython", "filelock", "html5lib", "pytest (>=4.6)"] + +[[package]] +name = "sphinx-autodoc-typehints" +version = "1.23.0" +description = "Type hints (PEP 484) support for the Sphinx autodoc extension" optional = false python-versions = ">=3.7" -groups = ["dev"] +groups = ["docs"] +files = [ + {file = "sphinx_autodoc_typehints-1.23.0-py3-none-any.whl", hash = "sha256:ac099057e66b09e51b698058ba7dd76e57e1fe696cd91b54e121d3dad188f91d"}, + {file = "sphinx_autodoc_typehints-1.23.0.tar.gz", hash = "sha256:5d44e2996633cdada499b6d27a496ddf9dbc95dd1f0f09f7b37940249e61f6e9"}, +] + +[package.dependencies] +sphinx = ">=5.3" + +[package.extras] +docs = ["furo (>=2022.12.7)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.23.4)"] +testing = ["covdefaults (>=2.2.2)", "coverage (>=7.2.2)", "diff-cover (>=7.5)", "nptyping (>=2.5)", "pytest (>=7.2.2)", "pytest-cov (>=4)", "sphobjinv (>=2.3.1)", "typing-extensions (>=4.5)"] +type-comment = ["typed-ast (>=1.5.4) ; python_version < \"3.8\""] + +[[package]] +name = "sphinx-book-theme" +version = "1.1.2" +description = "A clean book theme for scientific explanations and documentation with Sphinx" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinx_book_theme-1.1.2-py3-none-any.whl", hash = "sha256:cee744466fde48f50302b851291b208aa67e726ca31b7a3bfb9b6e6a145663e0"}, + {file = "sphinx_book_theme-1.1.2.tar.gz", hash = "sha256:7f3abcd146ca82e6f39d6db53711102b1c1d328d12f65e3e47ad9bf842614a49"}, +] + +[package.dependencies] +pydata-sphinx-theme = ">=0.14" +sphinx = ">=5" + +[package.extras] +code-style = ["pre-commit"] +doc = ["ablog", "folium", "ipywidgets", "matplotlib", "myst-nb", "nbclient", "numpy", "numpydoc", "pandas", "plotly", "sphinx-copybutton", "sphinx-design", "sphinx-examples", "sphinx-tabs", "sphinx-thebe", "sphinx-togglebutton", "sphinxcontrib-bibtex", "sphinxcontrib-youtube", "sphinxext-opengraph"] +test = ["beautifulsoup4", "coverage", "myst-nb", "pytest", "pytest-cov", "pytest-regressions", "sphinx_thebe"] + +[[package]] +name = "sphinx-copybutton" +version = "0.5.2" +description = "Add a copy button to each of your code cells." +optional = false +python-versions = ">=3.7" +groups = ["docs"] +files = [ + {file = "sphinx-copybutton-0.5.2.tar.gz", hash = "sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd"}, + {file = "sphinx_copybutton-0.5.2-py3-none-any.whl", hash = "sha256:fb543fd386d917746c9a2c50360c7905b605726b9355cd26e9974857afeae06e"}, +] + +[package.dependencies] +sphinx = ">=1.8" + +[package.extras] +code-style = ["pre-commit (==2.12.1)"] +rtd = ["ipython", "myst-nb", "sphinx", "sphinx-book-theme", "sphinx-examples"] + +[[package]] +name = "sphinx-rtd-theme" +version = "2.0.0" +description = "Read the Docs theme for Sphinx" +optional = false +python-versions = ">=3.6" +groups = ["docs"] +files = [ + {file = "sphinx_rtd_theme-2.0.0-py2.py3-none-any.whl", hash = "sha256:ec93d0856dc280cf3aee9a4c9807c60e027c7f7b461b77aeffed682e68f0e586"}, + {file = "sphinx_rtd_theme-2.0.0.tar.gz", hash = "sha256:bd5d7b80622406762073a04ef8fadc5f9151261563d47027de09910ce03afe6b"}, +] + +[package.dependencies] +docutils = "<0.21" +sphinx = ">=5,<8" +sphinxcontrib-jquery = ">=4,<5" + +[package.extras] +dev = ["bump2version", "sphinxcontrib-httpdomain", "transifex-client", "wheel"] + +[[package]] +name = "sphinx-togglebutton" +version = "0.3.2" +description = "Toggle page content and collapse admonitions in Sphinx." +optional = false +python-versions = "*" +groups = ["docs"] +files = [ + {file = "sphinx-togglebutton-0.3.2.tar.gz", hash = "sha256:ab0c8b366427b01e4c89802d5d078472c427fa6e9d12d521c34fa0442559dc7a"}, + {file = "sphinx_togglebutton-0.3.2-py3-none-any.whl", hash = "sha256:9647ba7874b7d1e2d43413d8497153a85edc6ac95a3fea9a75ef9c1e08aaae2b"}, +] + +[package.dependencies] +docutils = "*" +setuptools = "*" +sphinx = "*" +wheel = "*" + +[package.extras] +sphinx = ["matplotlib", "myst-nb", "numpy", "sphinx-book-theme", "sphinx-design", "sphinx-examples"] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5"}, + {file = "sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["pytest"] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2"}, + {file = "sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["pytest"] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8"}, + {file = "sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["html5lib", "pytest"] + +[[package]] +name = "sphinxcontrib-jquery" +version = "4.1" +description = "Extension to include jQuery on newer Sphinx releases" +optional = false +python-versions = ">=2.7" +groups = ["docs"] +files = [ + {file = "sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a"}, + {file = "sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae"}, +] + +[package.dependencies] +Sphinx = ">=1.8" + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +description = "A sphinx extension which renders display math in HTML via JavaScript" +optional = false +python-versions = ">=3.5" +groups = ["docs"] +files = [ + {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, + {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, +] + +[package.extras] +test = ["flake8", "mypy", "pytest"] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb"}, + {file = "sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["defusedxml (>=0.7.1)", "pytest"] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331"}, + {file = "sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["pytest"] + +[[package]] +name = "stack-data" +version = "0.6.3" +description = "Extract data from python stack frames and tracebacks for informative displays" +optional = false +python-versions = "*" +groups = ["docs"] files = [ - {file = "smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e"}, - {file = "smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5"}, + {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, + {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, ] +[package.dependencies] +asttokens = ">=2.1.0" +executing = ">=1.2.0" +pure-eval = "*" + +[package.extras] +tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] + [[package]] name = "sympy" version = "1.14.0" @@ -2704,6 +4292,21 @@ mpmath = ">=1.1.0,<1.4" [package.extras] dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] +[[package]] +name = "tabulate" +version = "0.10.0" +description = "Pretty-print tabular data" +optional = false +python-versions = ">=3.10" +groups = ["docs"] +files = [ + {file = "tabulate-0.10.0-py3-none-any.whl", hash = "sha256:f0b0622e567335c8fabaaa659f1b33bcb6ddfe2e496071b743aa113f8774f2d3"}, + {file = "tabulate-0.10.0.tar.gz", hash = "sha256:e2cfde8f79420f6deeffdeda9aaec3b6bc5abce947655d17ac662b126e48a60d"}, +] + +[package.extras] +widechars = ["wcwidth"] + [[package]] name = "termcolor" version = "2.5.0" @@ -2731,13 +4334,32 @@ files = [ {file = "threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107"}, ] +[[package]] +name = "tinycss2" +version = "1.5.1" +description = "A tiny CSS parser" +optional = false +python-versions = ">=3.10" +groups = ["docs"] +files = [ + {file = "tinycss2-1.5.1-py3-none-any.whl", hash = "sha256:3415ba0f5839c062696996998176c4a3751d18b7edaaeeb658c9ce21ec150661"}, + {file = "tinycss2-1.5.1.tar.gz", hash = "sha256:d339d2b616ba90ccce58da8495a78f46e55d4d25f9fd71dfd526f07e7d53f957"}, +] + +[package.dependencies] +webencodings = ">=0.4" + +[package.extras] +doc = ["furo", "sphinx"] +test = ["pytest", "ruff"] + [[package]] name = "tomli" version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" -groups = ["dev"] +groups = ["dev", "docs"] markers = "python_version == \"3.10\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, @@ -2877,6 +4499,26 @@ text = ["ipadic (>=1.0.0)", "mecab-python3 (>=1.0.6)", "nltk (>3.8.1)", "regex ( typing = ["mypy (==1.14.0)", "torch (==2.5.1)", "types-PyYAML", "types-emoji", "types-protobuf", "types-requests", "types-setuptools", "types-six", "types-tabulate"] visual = ["SciencePlots (>=2.0.0)", "matplotlib (>=3.6.0)"] +[[package]] +name = "tornado" +version = "6.5.5" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "tornado-6.5.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:487dc9cc380e29f58c7ab88f9e27cdeef04b2140862e5076a66fb6bb68bb1bfa"}, + {file = "tornado-6.5.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:65a7f1d46d4bb41df1ac99f5fcb685fb25c7e61613742d5108b010975a9a6521"}, + {file = "tornado-6.5.5-cp39-abi3-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e74c92e8e65086b338fd56333fb9a68b9f6f2fe7ad532645a290a464bcf46be5"}, + {file = "tornado-6.5.5-cp39-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:435319e9e340276428bbdb4e7fa732c2d399386d1de5686cb331ec8eee754f07"}, + {file = "tornado-6.5.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3f54aa540bdbfee7b9eb268ead60e7d199de5021facd276819c193c0fb28ea4e"}, + {file = "tornado-6.5.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:36abed1754faeb80fbd6e64db2758091e1320f6bba74a4cf8c09cd18ccce8aca"}, + {file = "tornado-6.5.5-cp39-abi3-win32.whl", hash = "sha256:dd3eafaaeec1c7f2f8fdcd5f964e8907ad788fe8a5a32c4426fbbdda621223b7"}, + {file = "tornado-6.5.5-cp39-abi3-win_amd64.whl", hash = "sha256:6443a794ba961a9f619b1ae926a2e900ac20c34483eea67be4ed8f1e58d3ef7b"}, + {file = "tornado-6.5.5-cp39-abi3-win_arm64.whl", hash = "sha256:2c9a876e094109333f888539ddb2de4361743e5d21eece20688e3e351e4990a6"}, + {file = "tornado-6.5.5.tar.gz", hash = "sha256:192b8f3ea91bd7f1f50c06955416ed76c6b72f96779b962f07f911b91e8d30e9"}, +] + [[package]] name = "tqdm" version = "4.67.1" @@ -2899,6 +4541,22 @@ notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] +[[package]] +name = "traitlets" +version = "5.14.3" +description = "Traitlets Python configuration system" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, + {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] + [[package]] name = "triton" version = "3.3.0" @@ -2925,32 +4583,43 @@ tests = ["autopep8", "isort", "llnl-hatchet", "numpy", "pytest", "pytest-forked" tutorials = ["matplotlib", "pandas", "tabulate"] [[package]] -name = "typing-extensions" -version = "4.12.2" -description = "Backported and Experimental Type Hints for Python 3.8+" +name = "twine" +version = "6.2.0" +description = "Collection of utilities for publishing packages on PyPI" optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, + {file = "twine-6.2.0-py3-none-any.whl", hash = "sha256:418ebf08ccda9a8caaebe414433b0ba5e25eb5e4a927667122fbe8f829f985d8"}, + {file = "twine-6.2.0.tar.gz", hash = "sha256:e5ed0d2fd70c9959770dce51c8f39c8945c574e18173a7b81802dab51b4b75cf"}, ] +[package.dependencies] +id = "*" +keyring = {version = ">=21.2.0", markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\""} +packaging = ">=24.0" +readme-renderer = ">=35.0" +requests = ">=2.20" +requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0" +rfc3986 = ">=1.4.0" +rich = ">=12.0.0" +urllib3 = ">=1.26.0" + +[package.extras] +keyring = ["keyring (>=21.2.0)"] + [[package]] -name = "typing-inspection" -version = "0.4.2" -description = "Runtime typing introspection tools" +name = "typing-extensions" +version = "4.15.0" +description = "Backported and Experimental Type Hints for Python 3.9+" optional = false python-versions = ">=3.9" -groups = ["dev"] +groups = ["main", "dev", "docs"] files = [ - {file = "typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7"}, - {file = "typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464"}, + {file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"}, + {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"}, ] -[package.dependencies] -typing-extensions = ">=4.12.0" - [[package]] name = "tzdata" version = "2025.1" @@ -2980,7 +4649,7 @@ version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["main", "dev", "docs"] files = [ {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, @@ -3019,131 +4688,38 @@ version = "0.2.14" description = "Measures the displayed width of unicode strings in a terminal" optional = false python-versions = ">=3.6" -groups = ["dev"] +groups = ["dev", "docs"] files = [ {file = "wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1"}, {file = "wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605"}, ] [[package]] -name = "wrapt" -version = "2.0.1" -description = "Module for decorators, wrappers and monkey patching." +name = "webencodings" +version = "0.5.1" +description = "Character encoding aliases for legacy web content" optional = false -python-versions = ">=3.8" -groups = ["dev"] +python-versions = "*" +groups = ["docs"] +files = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] + +[[package]] +name = "wheel" +version = "0.47.0" +description = "Command line tool for manipulating wheel files" +optional = false +python-versions = ">=3.9" +groups = ["docs"] files = [ - {file = "wrapt-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:64b103acdaa53b7caf409e8d45d39a8442fe6dcfec6ba3f3d141e0cc2b5b4dbd"}, - {file = "wrapt-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:91bcc576260a274b169c3098e9a3519fb01f2989f6d3d386ef9cbf8653de1374"}, - {file = "wrapt-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ab594f346517010050126fcd822697b25a7031d815bb4fbc238ccbe568216489"}, - {file = "wrapt-2.0.1-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:36982b26f190f4d737f04a492a68accbfc6fa042c3f42326fdfbb6c5b7a20a31"}, - {file = "wrapt-2.0.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:23097ed8bc4c93b7bf36fa2113c6c733c976316ce0ee2c816f64ca06102034ef"}, - {file = "wrapt-2.0.1-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8bacfe6e001749a3b64db47bcf0341da757c95959f592823a93931a422395013"}, - {file = "wrapt-2.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8ec3303e8a81932171f455f792f8df500fc1a09f20069e5c16bd7049ab4e8e38"}, - {file = "wrapt-2.0.1-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:3f373a4ab5dbc528a94334f9fe444395b23c2f5332adab9ff4ea82f5a9e33bc1"}, - {file = "wrapt-2.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f49027b0b9503bf6c8cdc297ca55006b80c2f5dd36cecc72c6835ab6e10e8a25"}, - {file = "wrapt-2.0.1-cp310-cp310-win32.whl", hash = "sha256:8330b42d769965e96e01fa14034b28a2a7600fbf7e8f0cc90ebb36d492c993e4"}, - {file = "wrapt-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:1218573502a8235bb8a7ecaed12736213b22dcde9feab115fa2989d42b5ded45"}, - {file = "wrapt-2.0.1-cp310-cp310-win_arm64.whl", hash = "sha256:eda8e4ecd662d48c28bb86be9e837c13e45c58b8300e43ba3c9b4fa9900302f7"}, - {file = "wrapt-2.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0e17283f533a0d24d6e5429a7d11f250a58d28b4ae5186f8f47853e3e70d2590"}, - {file = "wrapt-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:85df8d92158cb8f3965aecc27cf821461bb5f40b450b03facc5d9f0d4d6ddec6"}, - {file = "wrapt-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1be685ac7700c966b8610ccc63c3187a72e33cab53526a27b2a285a662cd4f7"}, - {file = "wrapt-2.0.1-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:df0b6d3b95932809c5b3fecc18fda0f1e07452d05e2662a0b35548985f256e28"}, - {file = "wrapt-2.0.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4da7384b0e5d4cae05c97cd6f94faaf78cc8b0f791fc63af43436d98c4ab37bb"}, - {file = "wrapt-2.0.1-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ec65a78fbd9d6f083a15d7613b2800d5663dbb6bb96003899c834beaa68b242c"}, - {file = "wrapt-2.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7de3cc939be0e1174969f943f3b44e0d79b6f9a82198133a5b7fc6cc92882f16"}, - {file = "wrapt-2.0.1-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:fb1a5b72cbd751813adc02ef01ada0b0d05d3dcbc32976ce189a1279d80ad4a2"}, - {file = "wrapt-2.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3fa272ca34332581e00bf7773e993d4f632594eb2d1b0b162a9038df0fd971dd"}, - {file = "wrapt-2.0.1-cp311-cp311-win32.whl", hash = "sha256:fc007fdf480c77301ab1afdbb6ab22a5deee8885f3b1ed7afcb7e5e84a0e27be"}, - {file = "wrapt-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:47434236c396d04875180171ee1f3815ca1eada05e24a1ee99546320d54d1d1b"}, - {file = "wrapt-2.0.1-cp311-cp311-win_arm64.whl", hash = "sha256:837e31620e06b16030b1d126ed78e9383815cbac914693f54926d816d35d8edf"}, - {file = "wrapt-2.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1fdbb34da15450f2b1d735a0e969c24bdb8d8924892380126e2a293d9902078c"}, - {file = "wrapt-2.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3d32794fe940b7000f0519904e247f902f0149edbe6316c710a8562fb6738841"}, - {file = "wrapt-2.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:386fb54d9cd903ee0012c09291336469eb7b244f7183d40dc3e86a16a4bace62"}, - {file = "wrapt-2.0.1-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7b219cb2182f230676308cdcacd428fa837987b89e4b7c5c9025088b8a6c9faf"}, - {file = "wrapt-2.0.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:641e94e789b5f6b4822bb8d8ebbdfc10f4e4eae7756d648b717d980f657a9eb9"}, - {file = "wrapt-2.0.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fe21b118b9f58859b5ebaa4b130dee18669df4bd111daad082b7beb8799ad16b"}, - {file = "wrapt-2.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:17fb85fa4abc26a5184d93b3efd2dcc14deb4b09edcdb3535a536ad34f0b4dba"}, - {file = "wrapt-2.0.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:b89ef9223d665ab255ae42cc282d27d69704d94be0deffc8b9d919179a609684"}, - {file = "wrapt-2.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a453257f19c31b31ba593c30d997d6e5be39e3b5ad9148c2af5a7314061c63eb"}, - {file = "wrapt-2.0.1-cp312-cp312-win32.whl", hash = "sha256:3e271346f01e9c8b1130a6a3b0e11908049fe5be2d365a5f402778049147e7e9"}, - {file = "wrapt-2.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:2da620b31a90cdefa9cd0c2b661882329e2e19d1d7b9b920189956b76c564d75"}, - {file = "wrapt-2.0.1-cp312-cp312-win_arm64.whl", hash = "sha256:aea9c7224c302bc8bfc892b908537f56c430802560e827b75ecbde81b604598b"}, - {file = "wrapt-2.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:47b0f8bafe90f7736151f61482c583c86b0693d80f075a58701dd1549b0010a9"}, - {file = "wrapt-2.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cbeb0971e13b4bd81d34169ed57a6dda017328d1a22b62fda45e1d21dd06148f"}, - {file = "wrapt-2.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:eb7cffe572ad0a141a7886a1d2efa5bef0bf7fe021deeea76b3ab334d2c38218"}, - {file = "wrapt-2.0.1-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c8d60527d1ecfc131426b10d93ab5d53e08a09c5fa0175f6b21b3252080c70a9"}, - {file = "wrapt-2.0.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c654eafb01afac55246053d67a4b9a984a3567c3808bb7df2f8de1c1caba2e1c"}, - {file = "wrapt-2.0.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:98d873ed6c8b4ee2418f7afce666751854d6d03e3c0ec2a399bb039cd2ae89db"}, - {file = "wrapt-2.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c9e850f5b7fc67af856ff054c71690d54fa940c3ef74209ad9f935b4f66a0233"}, - {file = "wrapt-2.0.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:e505629359cb5f751e16e30cf3f91a1d3ddb4552480c205947da415d597f7ac2"}, - {file = "wrapt-2.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2879af909312d0baf35f08edeea918ee3af7ab57c37fe47cb6a373c9f2749c7b"}, - {file = "wrapt-2.0.1-cp313-cp313-win32.whl", hash = "sha256:d67956c676be5a24102c7407a71f4126d30de2a569a1c7871c9f3cabc94225d7"}, - {file = "wrapt-2.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:9ca66b38dd642bf90c59b6738af8070747b610115a39af2498535f62b5cdc1c3"}, - {file = "wrapt-2.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:5a4939eae35db6b6cec8e7aa0e833dcca0acad8231672c26c2a9ab7a0f8ac9c8"}, - {file = "wrapt-2.0.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a52f93d95c8d38fed0669da2ebdb0b0376e895d84596a976c15a9eb45e3eccb3"}, - {file = "wrapt-2.0.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4e54bbf554ee29fcceee24fa41c4d091398b911da6e7f5d7bffda963c9aed2e1"}, - {file = "wrapt-2.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:908f8c6c71557f4deaa280f55d0728c3bca0960e8c3dd5ceeeafb3c19942719d"}, - {file = "wrapt-2.0.1-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e2f84e9af2060e3904a32cea9bb6db23ce3f91cfd90c6b426757cf7cc01c45c7"}, - {file = "wrapt-2.0.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e3612dc06b436968dfb9142c62e5dfa9eb5924f91120b3c8ff501ad878f90eb3"}, - {file = "wrapt-2.0.1-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6d2d947d266d99a1477cd005b23cbd09465276e302515e122df56bb9511aca1b"}, - {file = "wrapt-2.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:7d539241e87b650cbc4c3ac9f32c8d1ac8a54e510f6dca3f6ab60dcfd48c9b10"}, - {file = "wrapt-2.0.1-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:4811e15d88ee62dbf5c77f2c3ff3932b1e3ac92323ba3912f51fc4016ce81ecf"}, - {file = "wrapt-2.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c1c91405fcf1d501fa5d55df21e58ea49e6b879ae829f1039faaf7e5e509b41e"}, - {file = "wrapt-2.0.1-cp313-cp313t-win32.whl", hash = "sha256:e76e3f91f864e89db8b8d2a8311d57df93f01ad6bb1e9b9976d1f2e83e18315c"}, - {file = "wrapt-2.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:83ce30937f0ba0d28818807b303a412440c4b63e39d3d8fc036a94764b728c92"}, - {file = "wrapt-2.0.1-cp313-cp313t-win_arm64.whl", hash = "sha256:4b55cacc57e1dc2d0991dbe74c6419ffd415fb66474a02335cb10efd1aa3f84f"}, - {file = "wrapt-2.0.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:5e53b428f65ece6d9dad23cb87e64506392b720a0b45076c05354d27a13351a1"}, - {file = "wrapt-2.0.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ad3ee9d0f254851c71780966eb417ef8e72117155cff04821ab9b60549694a55"}, - {file = "wrapt-2.0.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:d7b822c61ed04ee6ad64bc90d13368ad6eb094db54883b5dde2182f67a7f22c0"}, - {file = "wrapt-2.0.1-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7164a55f5e83a9a0b031d3ffab4d4e36bbec42e7025db560f225489fa929e509"}, - {file = "wrapt-2.0.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e60690ba71a57424c8d9ff28f8d006b7ad7772c22a4af432188572cd7fa004a1"}, - {file = "wrapt-2.0.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:3cd1a4bd9a7a619922a8557e1318232e7269b5fb69d4ba97b04d20450a6bf970"}, - {file = "wrapt-2.0.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b4c2e3d777e38e913b8ce3a6257af72fb608f86a1df471cb1d4339755d0a807c"}, - {file = "wrapt-2.0.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:3d366aa598d69416b5afedf1faa539fac40c1d80a42f6b236c88c73a3c8f2d41"}, - {file = "wrapt-2.0.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c235095d6d090aa903f1db61f892fffb779c1eaeb2a50e566b52001f7a0f66ed"}, - {file = "wrapt-2.0.1-cp314-cp314-win32.whl", hash = "sha256:bfb5539005259f8127ea9c885bdc231978c06b7a980e63a8a61c8c4c979719d0"}, - {file = "wrapt-2.0.1-cp314-cp314-win_amd64.whl", hash = "sha256:4ae879acc449caa9ed43fc36ba08392b9412ee67941748d31d94e3cedb36628c"}, - {file = "wrapt-2.0.1-cp314-cp314-win_arm64.whl", hash = "sha256:8639b843c9efd84675f1e100ed9e99538ebea7297b62c4b45a7042edb84db03e"}, - {file = "wrapt-2.0.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:9219a1d946a9b32bb23ccae66bdb61e35c62773ce7ca6509ceea70f344656b7b"}, - {file = "wrapt-2.0.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:fa4184e74197af3adad3c889a1af95b53bb0466bced92ea99a0c014e48323eec"}, - {file = "wrapt-2.0.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c5ef2f2b8a53b7caee2f797ef166a390fef73979b15778a4a153e4b5fedce8fa"}, - {file = "wrapt-2.0.1-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e042d653a4745be832d5aa190ff80ee4f02c34b21f4b785745eceacd0907b815"}, - {file = "wrapt-2.0.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2afa23318136709c4b23d87d543b425c399887b4057936cd20386d5b1422b6fa"}, - {file = "wrapt-2.0.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6c72328f668cf4c503ffcf9434c2b71fdd624345ced7941bc6693e61bbe36bef"}, - {file = "wrapt-2.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:3793ac154afb0e5b45d1233cb94d354ef7a983708cc3bb12563853b1d8d53747"}, - {file = "wrapt-2.0.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:fec0d993ecba3991645b4857837277469c8cc4c554a7e24d064d1ca291cfb81f"}, - {file = "wrapt-2.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:949520bccc1fa227274da7d03bf238be15389cd94e32e4297b92337df9b7a349"}, - {file = "wrapt-2.0.1-cp314-cp314t-win32.whl", hash = "sha256:be9e84e91d6497ba62594158d3d31ec0486c60055c49179edc51ee43d095f79c"}, - {file = "wrapt-2.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:61c4956171c7434634401db448371277d07032a81cc21c599c22953374781395"}, - {file = "wrapt-2.0.1-cp314-cp314t-win_arm64.whl", hash = "sha256:35cdbd478607036fee40273be8ed54a451f5f23121bd9d4be515158f9498f7ad"}, - {file = "wrapt-2.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:90897ea1cf0679763b62e79657958cd54eae5659f6360fc7d2ccc6f906342183"}, - {file = "wrapt-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:50844efc8cdf63b2d90cd3d62d4947a28311e6266ce5235a219d21b195b4ec2c"}, - {file = "wrapt-2.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:49989061a9977a8cbd6d20f2efa813f24bf657c6990a42967019ce779a878dbf"}, - {file = "wrapt-2.0.1-cp38-cp38-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:09c7476ab884b74dce081ad9bfd07fe5822d8600abade571cb1f66d5fc915af6"}, - {file = "wrapt-2.0.1-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1a8a09a004ef100e614beec82862d11fc17d601092c3599afd22b1f36e4137e"}, - {file = "wrapt-2.0.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:89a82053b193837bf93c0f8a57ded6e4b6d88033a499dadff5067e912c2a41e9"}, - {file = "wrapt-2.0.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f26f8e2ca19564e2e1fdbb6a0e47f36e0efbab1acc31e15471fad88f828c75f6"}, - {file = "wrapt-2.0.1-cp38-cp38-win32.whl", hash = "sha256:115cae4beed3542e37866469a8a1f2b9ec549b4463572b000611e9946b86e6f6"}, - {file = "wrapt-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:c4012a2bd37059d04f8209916aa771dfb564cccb86079072bdcd48a308b6a5c5"}, - {file = "wrapt-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:68424221a2dc00d634b54f92441914929c5ffb1c30b3b837343978343a3512a3"}, - {file = "wrapt-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6bd1a18f5a797fe740cb3d7a0e853a8ce6461cc62023b630caec80171a6b8097"}, - {file = "wrapt-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fb3a86e703868561c5cad155a15c36c716e1ab513b7065bd2ac8ed353c503333"}, - {file = "wrapt-2.0.1-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5dc1b852337c6792aa111ca8becff5bacf576bf4a0255b0f05eb749da6a1643e"}, - {file = "wrapt-2.0.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c046781d422f0830de6329fa4b16796096f28a92c8aef3850674442cdcb87b7f"}, - {file = "wrapt-2.0.1-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f73f9f7a0ebd0db139253d27e5fc8d2866ceaeef19c30ab5d69dcbe35e1a6981"}, - {file = "wrapt-2.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b667189cf8efe008f55bbda321890bef628a67ab4147ebf90d182f2dadc78790"}, - {file = "wrapt-2.0.1-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:a9a83618c4f0757557c077ef71d708ddd9847ed66b7cc63416632af70d3e2308"}, - {file = "wrapt-2.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e9b121e9aeb15df416c2c960b8255a49d44b4038016ee17af03975992d03931"}, - {file = "wrapt-2.0.1-cp39-cp39-win32.whl", hash = "sha256:1f186e26ea0a55f809f232e92cc8556a0977e00183c3ebda039a807a42be1494"}, - {file = "wrapt-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:bf4cb76f36be5de950ce13e22e7fdf462b35b04665a12b64f3ac5c1bbbcf3728"}, - {file = "wrapt-2.0.1-cp39-cp39-win_arm64.whl", hash = "sha256:d6cc985b9c8b235bd933990cdbf0f891f8e010b65a3911f7a55179cd7b0fc57b"}, - {file = "wrapt-2.0.1-py3-none-any.whl", hash = "sha256:4d2ce1bf1a48c5277d7969259232b57645aae5686dba1eaeade39442277afbca"}, - {file = "wrapt-2.0.1.tar.gz", hash = "sha256:9c9c635e78497cacb81e84f8b11b23e0aacac7a136e73b8e5b2109a1d9fc468f"}, -] - -[package.extras] -dev = ["pytest", "setuptools"] + {file = "wheel-0.47.0-py3-none-any.whl", hash = "sha256:212281cab4dff978f6cedd499cd893e1f620791ca6ff7107cf270781e587eced"}, + {file = "wheel-0.47.0.tar.gz", hash = "sha256:cc72bd1009ba0cf63922e28f94d9d83b920aa2bb28f798a31d0691b02fa3c9b3"}, +] + +[package.dependencies] +packaging = ">=24.0" [[package]] name = "yarl" @@ -3242,7 +4818,28 @@ idna = ">=2.0" multidict = ">=4.0" propcache = ">=0.2.0" +[[package]] +name = "zipp" +version = "3.23.1" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version < \"3.12\" and platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" +files = [ + {file = "zipp-3.23.1-py3-none-any.whl", hash = "sha256:0b3596c50a5c700c9cb40ba8d86d9f2cc4807e9bedb06bcdf7fac85633e444dc"}, + {file = "zipp-3.23.1.tar.gz", hash = "sha256:32120e378d32cd9714ad503c1d024619063ec28aad2248dc6672ad13edfa5110"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + [metadata] lock-version = "2.1" python-versions = ">=3.10,<3.14" -content-hash = "c6aed75ce372f38a297e13463961256c0fcebe60c437fdba5315783c322e42b6" +content-hash = "36b408701f0e7f455f038dd44aec7e6d789f1d5a4abebbba384901e14291f82b" diff --git a/pyproject.toml b/pyproject.toml index bad470af..9d954b1a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,25 @@ ruff = ">=0.3" pre-commit = "^3.6" docformatter = "^1.4" commitizen = "^3.29.1" -python-semantic-release = "^9.12.0" +twine = "^6.2.0" +pyright = "^1.1.409" + +[tool.poetry.group.docs.dependencies] +setuptools = "*" +sphinx = "6.2.1" +sphinx-autodoc-typehints = "1.23.0" +numpydoc = "1.7.0" +nbsphinx = "0.9.4" +ipython = "*" +myst-parser = "3.0.1" +m2r2 = "0.3.3.post2" +sphinx-copybutton = "0.5.2" +sphinx-togglebutton = "0.3.2" +sphinx-book-theme = "1.1.2" +sphinx-rtd-theme = "2.0.0" +readthedocs-sphinx-ext = "2.2.5" +lxml-html-clean = "0.4.0" +pydata-sphinx-theme = "0.15.2" [tool.poetry.urls] @@ -48,13 +66,29 @@ package = "https://pypi.org/project/deeptab/" # code quality tools [tool.pyright] -ignore = [] -reportMissingImports = true -reportMissingTypeStubs = false +include = ["deeptab", "tests"] +exclude = [ + "**/__pycache__", + ".venv", + "build", + "dist", + "docs", + "notebooks", + "examples", +] + pythonVersion = "3.10" +typeCheckingMode = "basic" venvPath = "." venv = ".venv" +reportMissingImports = true +reportMissingTypeStubs = false +reportPrivateImportUsage = false +reportUnknownMemberType = false +reportUnknownArgumentType = false +reportUnknownVariableType = false + # Configure code linting [tool.ruff] line-length = 120 @@ -92,85 +126,16 @@ ignore = [ [tool.ruff.lint.per-file-ignores] # allow asserts in test files (bandit) "test_*" = ["S101"] +# long docstring line — suppress until refactored +"deeptab/base_models/tangos.py" = ["W505"] [tool.ruff.lint.pycodestyle] max-doc-length = 120 -[tool.docformatter] -recursive = true -in-place = true -wrap-summaries = 120 -wrap-descriptions = 120 -exclude = ["build", "dist", ".venv", ".git"] - # Commitizen configuration [tool.commitizen] name = "cz_conventional_commits" -version = "1.6.1" -version_files = ["pyproject.toml:version", "deeptab/__version__.py"] +version_provider = "poetry" tag_format = "v$version" update_changelog_on_bump = true major_version_zero = false - -# Python Semantic Release configuration -[tool.semantic_release] -version_toml = ["pyproject.toml:tool.poetry.version"] -version_variables = ["deeptab/__version__.py:__version__"] -build_command = "poetry build" -branch = "master" -upload_to_pypi = true -upload_to_release = true -commit_parser = "angular" - -[tool.semantic_release.branches.master] -match = "(master|main)" -prerelease = false - -[tool.semantic_release.changelog] -template_dir = "templates" -changelog_file = "CHANGELOG.md" -exclude_commit_patterns = [] - -[tool.semantic_release.changelog.environment] -block_start_string = "{%" -block_end_string = "%}" -variable_start_string = "{{" -variable_end_string = "}}" -comment_start_string = "{#" -comment_end_string = "#}" -trim_blocks = false -lstrip_blocks = false -newline_sequence = "\n" -keep_trailing_newline = false -extensions = [] -autoescape = true - -[tool.semantic_release.commit_parser_options] -allowed_tags = [ - "build", - "chore", - "ci", - "docs", - "feat", - "fix", - "perf", - "style", - "refactor", - "test", -] -minor_tags = ["feat"] -patch_tags = ["fix", "perf"] - -[tool.semantic_release.remote] -name = "origin" -type = "github" -ignore_token_for_push = false - -[tool.semantic_release.publish] -dist_glob_patterns = ["dist/*"] -upload_to_vcs_release = true - -[[tool.semantic_release.publish.repositories]] -name = "pypi" -url = "https://upload.pypi.org/legacy/" -token_var = "PYPI_TOKEN" diff --git a/readthedocs.yaml b/readthedocs.yaml index 07d63361..38e2867a 100644 --- a/readthedocs.yaml +++ b/readthedocs.yaml @@ -29,4 +29,7 @@ formats: # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html python: install: + - method: pip + path: . + extra_requirements: [] - requirements: docs/requirements_docs.txt diff --git a/tests/test_base.py b/tests/test_base.py index 3af042d2..e71f3fea 100644 --- a/tests/test_base.py +++ b/tests/test_base.py @@ -1,8 +1,10 @@ -import pytest +import importlib import inspect -import torch import os -import importlib + +import pytest +import torch + from deeptab.base_models.utils import BaseModel # Paths for models and configs @@ -23,12 +25,7 @@ module = importlib.import_module(module_name) for name, obj in inspect.getmembers(module, inspect.isclass): - if ( - issubclass(obj, BaseModel) - and obj is not BaseModel - and obj.__name__ not in EXCLUDED_CLASSES - ): - + if issubclass(obj, BaseModel) and obj is not BaseModel and obj.__name__ not in EXCLUDED_CLASSES: model_classes.append(obj) @@ -38,63 +35,51 @@ def get_model_config(model_class): config_class_name = f"Default{model_name}Config" # e.g., "DefaultMambularConfig" try: - config_module = importlib.import_module( - f"{CONFIG_MODULE_PATH}.{model_name.lower()}_config" - ) + config_module = importlib.import_module(f"{CONFIG_MODULE_PATH}.{model_name.lower()}_config") config_class = getattr(config_module, config_class_name) return config_class() # Instantiate config except (ModuleNotFoundError, AttributeError) as e: - pytest.fail( - f"Could not find or instantiate config {config_class_name} for {model_name}: {e}" - ) + pytest.fail(f"Could not find or instantiate config {config_class_name} for {model_name}: {e}") @pytest.mark.parametrize("model_class", model_classes) def test_model_inherits_base_model(model_class): """Test that each model correctly inherits from BaseModel.""" - assert issubclass( - model_class, BaseModel - ), f"{model_class.__name__} should inherit from BaseModel." + assert issubclass(model_class, BaseModel), f"{model_class.__name__} should inherit from BaseModel." @pytest.mark.parametrize("model_class", model_classes) def test_model_has_forward_method(model_class): """Test that each model has a forward method with *data.""" - assert hasattr( - model_class, "forward" - ), f"{model_class.__name__} is missing a forward method." + assert hasattr(model_class, "forward"), f"{model_class.__name__} is missing a forward method." sig = inspect.signature(model_class.forward) - assert any( - p.kind == inspect.Parameter.VAR_POSITIONAL for p in sig.parameters.values() - ), f"{model_class.__name__}.forward should have *data argument." + assert any(p.kind == inspect.Parameter.VAR_POSITIONAL for p in sig.parameters.values()), ( + f"{model_class.__name__}.forward should have *data argument." + ) @pytest.mark.parametrize("model_class", model_classes) def test_model_takes_config(model_class): """Test that each model accepts a config argument.""" sig = inspect.signature(model_class.__init__) - assert ( - "config" in sig.parameters - ), f"{model_class.__name__} should accept a 'config' parameter." + assert "config" in sig.parameters, f"{model_class.__name__} should accept a 'config' parameter." @pytest.mark.parametrize("model_class", model_classes) def test_model_has_num_classes(model_class): """Test that each model accepts a num_classes argument.""" sig = inspect.signature(model_class.__init__) - assert ( - "num_classes" in sig.parameters - ), f"{model_class.__name__} should accept a 'num_classes' parameter." + assert "num_classes" in sig.parameters, f"{model_class.__name__} should accept a 'num_classes' parameter." @pytest.mark.parametrize("model_class", model_classes) def test_model_calls_super_init(model_class): """Test that each model calls super().__init__(config=config, **kwargs).""" source = inspect.getsource(model_class.__init__) - assert ( - "super().__init__(config=config" in source - ), f"{model_class.__name__} should call super().__init__(config=config, **kwargs)." + assert "super().__init__(config=config" in source, ( + f"{model_class.__name__} should call super().__init__(config=config, **kwargs)." + ) @pytest.mark.parametrize("model_class", model_classes) @@ -120,9 +105,7 @@ def test_model_initialization(model_class): ) # Mock feature info try: - model = model_class( - feature_information=feature_info, num_classes=3, config=config - ) + model = model_class(feature_information=feature_info, num_classes=3, config=config) except Exception as e: pytest.fail(f"Failed to initialize {model_class.__name__}: {e}") @@ -150,9 +133,7 @@ def test_model_defines_key_attributes(model_class): ) # Mock feature info try: - model = model_class( - feature_information=feature_info, num_classes=3, config=config - ) + model = model_class(feature_information=feature_info, num_classes=3, config=config) except TypeError as e: pytest.fail(f"Failed to initialize {model_class.__name__}: {e}") diff --git a/tests/test_configs.py b/tests/test_configs.py index 6b936724..3b00bb30 100644 --- a/tests/test_configs.py +++ b/tests/test_configs.py @@ -1,9 +1,11 @@ -import pytest -import inspect +import dataclasses import importlib +import inspect import os -import dataclasses import typing + +import pytest + from deeptab.configs.base_config import BaseConfig # Ensure correct path CONFIG_MODULE_PATH = "deeptab.configs" @@ -11,11 +13,7 @@ # Discover all config classes in deeptab/configs/ for filename in os.listdir(os.path.dirname(__file__) + "/../deeptab/configs"): - if ( - filename.endswith(".py") - and filename != "base_config.py" - and not filename.startswith("__") - ): + if filename.endswith(".py") and filename != "base_config.py" and not filename.startswith("__"): module_name = f"{CONFIG_MODULE_PATH}.{filename[:-3]}" module = importlib.import_module(module_name) @@ -27,9 +25,7 @@ @pytest.mark.parametrize("config_class", config_classes) def test_config_inherits_baseconfig(config_class): """Test that each config class correctly inherits from BaseConfig.""" - assert issubclass( - config_class, BaseConfig - ), f"{config_class.__name__} should inherit from BaseConfig." + assert issubclass(config_class, BaseConfig), f"{config_class.__name__} should inherit from BaseConfig." @pytest.mark.parametrize("config_class", config_classes) @@ -48,9 +44,7 @@ def test_config_has_expected_attributes(config_class): config_attrs = {field.name for field in dataclasses.fields(config_class)} missing_attrs = base_attrs - config_attrs - assert ( - not missing_attrs - ), f"{config_class.__name__} is missing attributes: {missing_attrs}" + assert not missing_attrs, f"{config_class.__name__} is missing attributes: {missing_attrs}" @pytest.mark.parametrize("config_class", config_classes) @@ -62,9 +56,7 @@ def test_config_default_values(config_class): attr = field.name expected_type = field.type - assert hasattr( - config, attr - ), f"{config_class.__name__} is missing attribute '{attr}'." + assert hasattr(config, attr), f"{config_class.__name__} is missing attribute '{attr}'." value = getattr(config, attr) @@ -74,24 +66,24 @@ def test_config_default_values(config_class): if origin is typing.Literal: # If the field is a Literal, ensure the value is one of the allowed options allowed_values = typing.get_args(expected_type) - assert ( - value in allowed_values - ), f"{config_class.__name__}.{attr} has incorrect value: expected one of {allowed_values}, got {value}" + assert value in allowed_values, ( + f"{config_class.__name__}.{attr} has incorrect value: expected one of {allowed_values}, got {value}" + ) elif origin is typing.Union: # For Union types (e.g., Optional[str]), check if value matches any type in the union allowed_types = typing.get_args(expected_type) - assert any( - isinstance(value, t) for t in allowed_types - ), f"{config_class.__name__}.{attr} has incorrect type: expected one of {allowed_types}, got {type(value)}" + assert any(isinstance(value, t) for t in allowed_types), ( + f"{config_class.__name__}.{attr} has incorrect type: expected one of {allowed_types}, got {type(value)}" + ) elif origin is not None: # If it's another generic type (e.g., list[str]), check against the base type - assert ( - isinstance(value, origin) or value is None - ), f"{config_class.__name__}.{attr} has incorrect type: expected {expected_type}, got {type(value)}" + assert isinstance(value, origin) or value is None, ( + f"{config_class.__name__}.{attr} has incorrect type: expected {expected_type}, got {type(value)}" + ) else: # Standard type check assert ( - isinstance(value, expected_type) or value is None + isinstance(value, expected_type) or value is None # type: ignore[arg-type] ), f"{config_class.__name__}.{attr} has incorrect type: expected {expected_type}, got {type(value)}" @@ -110,6 +102,4 @@ def test_config_allows_updates(config_class): for attr, new_value in update_values.items(): if hasattr(config, attr): setattr(config, attr, new_value) - assert ( - getattr(config, attr) == new_value - ), f"{config_class.__name__}.{attr} did not update correctly." + assert getattr(config, attr) == new_value, f"{config_class.__name__}.{attr} did not update correctly."