diff --git a/.github/dependabot.yml b/.github/dependabot.yml
deleted file mode 100644
index 8410b079..00000000
--- a/.github/dependabot.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-version: 2
-updates:
- # Python dependencies
- - package-ecosystem: "pip"
- directory: "/"
- schedule:
- interval: "weekly"
- day: "monday"
- open-pull-requests-limit: 10
- groups:
- dev-dependencies:
- patterns:
- - "black"
- - "isort"
- - "flake8*"
- - "bandit"
- - "pre-commit"
- - "pytest*"
- versioning-strategy: "lockfile-only"
- labels:
- - "dependencies"
- - "python"
-
- # GitHub Actions
- - package-ecosystem: "github-actions"
- directory: "/"
- schedule:
- interval: "weekly"
- day: "monday"
- open-pull-requests-limit: 10
- labels:
- - "dependencies"
- - "github-actions"
diff --git a/.github/issues/README.md b/.github/issues/README.md
new file mode 100644
index 00000000..1d983d20
--- /dev/null
+++ b/.github/issues/README.md
@@ -0,0 +1,92 @@
+# Ground Control — Implementation Issues
+
+This directory contains all implementation issues organized by phase. Each issue file is designed to be imported into GitHub Issues. Issues are cross-referenced to user stories, use cases, and design documents.
+
+## Phase Summary
+
+| Phase | Name | Issues | PRD Alignment | Priority |
+|---|---|---|---|---|
+| **0** | Project Bootstrap & Engineering Foundation | 001–025 (+006b, 023b) | Pre-v0.1 | P0 |
+| **1** | Core Data Model & Persistence | 026–040 | v0.1 | P0 |
+| **2** | API Foundation & Auth | 041–055 | v0.1 | P0 |
+| **3** | Authorization & Identity | 056–063 | v0.1–v0.2 | P0 |
+| **4** | Core Business Logic | 064–077 | v0.1 | P0 |
+| **5** | Event System & Workflow | 078–084 | v0.2 | P0–P1 |
+| **6** | Frameworks & Templates | 085–092 | v0.3 | P0–P1 |
+| **7** | Agent System | 093–099 | v0.4 | P0–P1 |
+| **8** | Reporting & Search | 100–103 | v0.5 | P0–P1 |
+| **9** | Frontend | 104–113 | v0.1–v0.5 | P0–P1 |
+| **10** | Plugin System | 114–116 | v1.0 | P0–P1 |
+| **11** | Production Readiness | 117–127 | v1.0 | P0–P1 |
+
+**Total: 127 issues across 12 phases**
+
+## Cross-Cutting Concerns
+
+These issues establish patterns used throughout the entire codebase:
+
+| Concern | Issue(s) | Description |
+|---|---|---|
+| **Coding Standards** | #003, #007, #008, #009 | Ruff, mypy, ESLint, pre-commit |
+| **Architecture as Code** | #002, #024 | ADRs, C4 models, import-linter |
+| **Policy as Code** | #025, #057 | Authorization policies as testable artifacts |
+| **Formal Verification** | #023, #023b | icontract, Hypothesis (Python); Coq/Rocq proofs for critical invariants |
+| **Structured Logging** | #018 | structlog, JSON, context propagation |
+| **Exception Handling** | #017 | Typed hierarchy, HTTP mapping, scrubbing |
+| **Audit Logging** | #039 | Append-only, hash-chained, tamper-proof |
+| **Configuration** | #019 | pydantic-settings, fail-fast validation |
+| **Base Schemas** | #020 | API envelope, pagination, error format |
+| **CI/CD Security** | #012–#016 | SonarQube, SAST, DAST, OpenANT, deps |
+| **MCP Dev Tooling** | #006b | rocq-mcp (Coq proofs), AWS MCP (infra), custom ops MCP (deferred) |
+
+## Design Document References
+
+| Document | Path | Key Issues |
+|---|---|---|
+| PRD | `docs/PRD.md` | All issues |
+| Architecture | `docs/architecture/ARCHITECTURE.md` | #001–#025, #041, #078 |
+| Data Model | `docs/architecture/DATA_MODEL.md` | #026–#040 |
+| API Spec | `docs/api/API_SPEC.md` | #041–#055, #093–#099 |
+| User Stories | `docs/user-stories/USER_STORIES.md` | #044–#050, #064–#077 |
+| Use Cases | `docs/user-stories/USE_CASES.md` | #044–#050, #064–#077 |
+| Deployment | `docs/deployment/DEPLOYMENT.md` | #006, #119–#124 |
+| Coding Standards | `docs/CODING_STANDARDS.md` | #001–#025 |
+
+## User Story Coverage
+
+| Epic | User Stories | Primary Issues |
+|---|---|---|
+| Risk Management | US-1.1–US-1.5 | #029, #044, #064–#067, #106 |
+| Control Management | US-2.1–US-2.3 | #030–#032, #045, #068–#069, #107 |
+| Assessment & Testing | US-3.1–US-3.5 | #033–#034, #046, #070–#072, #108 |
+| Evidence Management | US-4.1–US-4.4 | #035, #047, #073–#075, #109 |
+| Findings & Remediation | US-5.1–US-5.3 | #036–#037, #048, #076, #110 |
+| Reporting | US-6.1–US-6.3 | #101–#103, #112 |
+| Administration | US-7.1–US-7.5 | #026–#028, #038, #051–#063, #111 |
+| Agent System | US-8.1–US-8.4 | #093–#099 |
+
+## Technology Decisions
+
+| Decision | Choice | Issue |
+|---|---|---|
+| Backend Language | Python 3.12+ | #004 |
+| API Framework | FastAPI | #041 |
+| Database | PostgreSQL 16+ | #021 |
+| ORM | SQLAlchemy 2.0 + Alembic | #021, #022 |
+| Frontend | React + TypeScript + Vite | #005, #104 |
+| UI Components | Shadcn/ui + Tailwind CSS | #104 |
+| Search | Meilisearch | #100 |
+| Cache/Queue | Redis/Valkey | #079 |
+| Object Storage | S3-compatible (MinIO) | #035, #073 |
+| CI/CD | GitHub Actions | #008–#016 |
+| Linting | Ruff (Python), ESLint (TS) | #003, #008 |
+| Type Checking | mypy (Python), tsc (TS) | #003, #009 |
+| Testing | pytest, vitest, Playwright | #010, #126 |
+| Code Quality | SonarQube/SonarCloud | #012 |
+| SAST | Semgrep + Bandit | #013 |
+| DAST | OWASP ZAP | #014 |
+| AI Security | OpenANT (Knostic) | #015 |
+| Formal Verification | Hypothesis (Python), Coq/Rocq (proofs) | #023, #023b |
+| MCP Servers | rocq-mcp, AWS MCP (official) | #006b |
+| Containerization | Docker + Docker Compose | #011, #119 |
+| Orchestration | Kubernetes (Helm) | #120 |
diff --git a/.github/issues/phase-00-foundation/001-repo-structure.md b/.github/issues/phase-00-foundation/001-repo-structure.md
new file mode 100644
index 00000000..a5955bfe
--- /dev/null
+++ b/.github/issues/phase-00-foundation/001-repo-structure.md
@@ -0,0 +1,87 @@
+---
+title: "Establish repository structure and monorepo layout"
+labels: [foundation, architecture, devex]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Define and create the canonical directory layout for the Ground Control monorepo. This structure must cleanly separate the Python backend, React frontend, shared schemas, deployment artifacts, documentation, and plugin SDK — while keeping everything in a single repository for atomic commits and unified CI.
+
+## References
+
+- PRD: Section 10 (Release Roadmap — v0.1 Foundation)
+- Architecture: Section 7 (Technology Stack Summary)
+- Deployment: Section 2 (Docker Compose)
+
+## Proposed Structure
+
+```
+Ground-Control/
+├── .github/
+│ ├── workflows/ # CI/CD pipelines
+│ ├── ISSUE_TEMPLATE/
+│ └── PULL_REQUEST_TEMPLATE.md
+├── backend/
+│ ├── src/
+│ │ └── ground_control/
+│ │ ├── __init__.py
+│ │ ├── main.py # FastAPI app entry
+│ │ ├── config.py # pydantic-settings
+│ │ ├── api/ # Route handlers (v1/)
+│ │ ├── domain/ # Domain models & services
+│ │ ├── infrastructure/ # DB, S3, cache, search adapters
+│ │ ├── schemas/ # Pydantic request/response schemas
+│ │ ├── middleware/ # Tenant, auth, logging, request-id
+│ │ ├── events/ # Domain event bus
+│ │ ├── exceptions/ # Shared exception hierarchy
+│ │ ├── logging/ # Structured logging setup
+│ │ └── plugins/ # Plugin runtime
+│ ├── tests/
+│ ├── migrations/ # Alembic
+│ ├── pyproject.toml
+│ └── alembic.ini
+├── frontend/
+│ ├── src/
+│ ├── public/
+│ ├── package.json
+│ ├── tsconfig.json
+│ └── vite.config.ts
+├── sdks/
+│ ├── python/ # Agent SDK (Python)
+│ └── typescript/ # Agent SDK (TypeScript)
+├── plugins/
+│ ├── frameworks/ # Built-in framework definitions
+│ └── integrations/ # Built-in integration plugins
+├── deploy/
+│ ├── docker/
+│ │ ├── Dockerfile
+│ │ ├── Dockerfile.frontend
+│ │ └── docker-compose.yml
+│ ├── helm/
+│ └── terraform/
+├── docs/ # Existing design docs
+├── architecture/ # C4/Structurizr models, ADRs
+│ ├── adrs/
+│ ├── c4/
+│ └── policies/ # Policy-as-code (Rego/YAML)
+├── .editorconfig
+├── .pre-commit-config.yaml
+├── CLAUDE.md
+└── README.md
+```
+
+## Acceptance Criteria
+
+- [ ] Directory structure created with placeholder `__init__.py` and `.gitkeep` files
+- [ ] Root `README.md` updated to describe structure
+- [ ] `CLAUDE.md` created with project conventions for AI-assisted development
+- [ ] `.gitignore` covers Python (`__pycache__`, `.venv`, `.mypy_cache`), Node (`node_modules`, `dist`), IDE files, `.env`
+- [ ] All existing `docs/` content remains intact and accessible
+
+## Technical Notes
+
+- Use a flat `src/ground_control/` layout (not nested `src/src/`) for clean imports
+- Backend package name: `ground_control` (underscore, PEP 8)
+- Keep `deploy/` separate from app code for clean Docker contexts
diff --git a/.github/issues/phase-00-foundation/002-adr-framework.md b/.github/issues/phase-00-foundation/002-adr-framework.md
new file mode 100644
index 00000000..78c16395
--- /dev/null
+++ b/.github/issues/phase-00-foundation/002-adr-framework.md
@@ -0,0 +1,38 @@
+---
+title: "Establish Architecture Decision Records (ADR) framework"
+labels: [foundation, architecture, documentation]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Set up a lightweight ADR process to document significant architectural decisions. Each ADR captures context, decision, consequences, and status. ADRs are stored as Markdown files in `architecture/adrs/` and versioned alongside code.
+
+Create initial ADRs for the foundational decisions already captured in design docs.
+
+## References
+
+- Architecture: All sections (captures decisions needing ADRs)
+- PRD: Section 10 (Roadmap implies phased decisions)
+
+## Acceptance Criteria
+
+- [ ] ADR template created at `architecture/adrs/000-template.md`
+- [ ] ADR index (`architecture/adrs/README.md`) with table of all ADRs
+- [ ] Initial ADRs created:
+ - `001-python-fastapi-backend.md` — Why Python 3.12+ / FastAPI
+ - `002-postgresql-primary-database.md` — Why PostgreSQL 16+
+ - `003-api-first-design.md` — REST-first with optional GraphQL
+ - `004-plugin-architecture.md` — Plugin extensibility approach
+ - `005-event-driven-architecture.md` — Domain event bus design
+ - `006-multi-tenancy-strategy.md` — Shared schema default, configurable
+ - `007-agent-first-design.md` — AI agents as first-class actors
+ - `008-clean-architecture.md` — Layered architecture (API → Domain → Infrastructure)
+- [ ] Each ADR follows format: Title, Status, Context, Decision, Consequences
+
+## Technical Notes
+
+- Use [MADR](https://adr.github.io/madr/) format (Markdown Any Decision Records)
+- Status values: `proposed`, `accepted`, `deprecated`, `superseded`
+- ADRs are immutable once accepted; superseding creates a new ADR
diff --git a/.github/issues/phase-00-foundation/003-coding-standards.md b/.github/issues/phase-00-foundation/003-coding-standards.md
new file mode 100644
index 00000000..6be6cbae
--- /dev/null
+++ b/.github/issues/phase-00-foundation/003-coding-standards.md
@@ -0,0 +1,55 @@
+---
+title: "Define and enforce coding standards and style guide"
+labels: [foundation, devex, quality]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Establish coding standards for both the Python backend and TypeScript frontend. Document conventions in a `CONTRIBUTING.md` and enforce via tooling (linters, formatters, pre-commit hooks). Standards cover naming, typing, imports, error handling, logging patterns, and testing conventions.
+
+## References
+
+- Architecture: Section 7 (Technology Stack — Python 3.12+, TypeScript, FastAPI)
+- PRD: Section 7 (Non-Functional Requirements — maintainability)
+
+## Acceptance Criteria
+
+### Python Backend
+
+- [ ] `ruff` configured for linting + formatting (replaces flake8, isort, black)
+ - Line length: 100
+ - Target: Python 3.12
+ - Rules: pycodestyle (E), pyflakes (F), isort (I), pep8-naming (N), bugbear (B), type annotations (ANN), security (S)
+- [ ] `mypy` configured for strict type checking
+ - `strict = true` in `pyproject.toml`
+ - Disallow untyped defs, no implicit optional
+- [ ] Naming conventions documented:
+ - Modules: `snake_case`
+ - Classes: `PascalCase`
+ - Functions/methods: `snake_case`
+ - Constants: `UPPER_SNAKE_CASE`
+ - Private: `_leading_underscore`
+- [ ] Import ordering: stdlib → third-party → local (enforced by ruff)
+- [ ] Docstring format: Google style (enforced by ruff D rules)
+- [ ] Type hints required on all public functions and methods
+
+### TypeScript Frontend
+
+- [ ] ESLint configured with `@typescript-eslint` strict rules
+- [ ] Prettier configured (single quotes, trailing commas, 100 line length)
+- [ ] TypeScript `strict: true` in `tsconfig.json`
+- [ ] React-specific: `eslint-plugin-react-hooks`, `eslint-plugin-jsx-a11y`
+
+### Shared
+
+- [ ] `.editorconfig` for consistent whitespace (2 spaces for TS/YAML, 4 spaces for Python)
+- [ ] `CONTRIBUTING.md` documenting all standards
+- [ ] Standards are an ADR: `architecture/adrs/009-coding-standards.md`
+
+## Technical Notes
+
+- Prefer `ruff` over individual tools — single fast binary for Python linting and formatting
+- `mypy` strict mode catches many bugs at type-check time; use `# type: ignore[code]` sparingly with justification
+- All new code must pass type checking before merge
diff --git a/.github/issues/phase-00-foundation/004-python-backend-scaffold.md b/.github/issues/phase-00-foundation/004-python-backend-scaffold.md
new file mode 100644
index 00000000..3ca3264b
--- /dev/null
+++ b/.github/issues/phase-00-foundation/004-python-backend-scaffold.md
@@ -0,0 +1,42 @@
+---
+title: "Scaffold Python backend project with pyproject.toml"
+labels: [foundation, backend, devex]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Create the Python backend project structure with `pyproject.toml` as the single source of truth for dependencies, build config, and tool settings. Set up the virtual environment tooling, dependency management, and basic project metadata.
+
+## References
+
+- Architecture: Section 7 (Python 3.12+, FastAPI, SQLAlchemy 2.0, Pydantic)
+- Issue #001 (Repository Structure)
+
+## Acceptance Criteria
+
+- [ ] `backend/pyproject.toml` created with:
+ - Project metadata (name=`ground-control`, version, description, license=Apache-2.0)
+ - Python `requires-python = ">=3.12"`
+ - Core dependencies: `fastapi`, `uvicorn[standard]`, `pydantic>=2.0`, `pydantic-settings`, `sqlalchemy[asyncio]>=2.0`, `asyncpg`, `alembic`, `redis`, `boto3`, `meilisearch`, `python-jose[cryptography]`, `passlib[argon2]`, `structlog`, `httpx`, `icontract`
+ - Dev dependencies: `pytest`, `pytest-asyncio`, `pytest-cov`, `hypothesis`, `crosshair-tool`, `mypy`, `ruff`, `pre-commit`, `factory-boy`, `respx`, `deal`
+ - Ruff config section
+ - Mypy config section
+ - Pytest config section (asyncio_mode = "auto", testpaths = ["tests"])
+- [ ] `backend/src/ground_control/__init__.py` with `__version__`
+- [ ] `backend/src/ground_control/py.typed` marker file (PEP 561)
+- [ ] `backend/tests/__init__.py` and `backend/tests/conftest.py`
+- [ ] `Makefile` or `justfile` at repo root with common commands:
+ - `make install` — create venv, install deps
+ - `make lint` — run ruff check + mypy
+ - `make format` — run ruff format
+ - `make test` — run pytest
+ - `make dev` — start development server
+- [ ] Can run `pip install -e ".[dev]"` and import `ground_control`
+
+## Technical Notes
+
+- Use `uv` as the package installer for speed (document as recommended, `pip` as fallback)
+- Pin major versions of critical dependencies; use compatible release (`~=`) for minors
+- Separate `[project.optional-dependencies]` groups: `dev`, `test`, `docs`
diff --git a/.github/issues/phase-00-foundation/005-frontend-scaffold.md b/.github/issues/phase-00-foundation/005-frontend-scaffold.md
new file mode 100644
index 00000000..6a3afb1a
--- /dev/null
+++ b/.github/issues/phase-00-foundation/005-frontend-scaffold.md
@@ -0,0 +1,49 @@
+---
+title: "Scaffold frontend project (React + TypeScript + Vite)"
+labels: [foundation, frontend, devex]
+phase: 0
+priority: P1
+---
+
+## Description
+
+Create the React + TypeScript frontend project using Vite as the build tool. Establish the initial project configuration, dependency set, and folder structure for a modern SPA that will serve as Ground Control's web UI.
+
+## References
+
+- Architecture: Section 7 (React + TypeScript, Shadcn/ui + Tailwind CSS)
+- PRD: Section 10 (v0.1 — Basic web UI)
+- User Stories: All UI-related stories
+
+## Acceptance Criteria
+
+- [ ] `frontend/` directory with Vite + React + TypeScript scaffold
+- [ ] `package.json` with core dependencies:
+ - `react`, `react-dom`, `react-router-dom`
+ - `@tanstack/react-query` (data fetching/caching)
+ - `tailwindcss`, `postcss`, `autoprefixer`
+ - `zod` (runtime validation)
+ - `axios` or `ky` (HTTP client)
+ - `lucide-react` (icons)
+- [ ] Dev dependencies: `typescript`, `@types/react`, `@types/react-dom`, `eslint`, `prettier`, `vitest`, `@testing-library/react`, `playwright`
+- [ ] `tsconfig.json` with `strict: true`, path aliases (`@/` → `src/`)
+- [ ] `tailwind.config.ts` configured
+- [ ] ESLint + Prettier configured per coding standards (#003)
+- [ ] Basic `src/` structure:
+ ```
+ src/
+ ├── components/ # Shared UI components
+ ├── features/ # Feature-based modules
+ ├── hooks/ # Custom React hooks
+ ├── lib/ # Utilities, API client
+ ├── pages/ # Route-level components
+ ├── types/ # TypeScript type definitions
+ └── App.tsx
+ ```
+- [ ] Dev server proxies `/api` to backend (Vite proxy config)
+- [ ] `npm run dev`, `npm run build`, `npm run lint`, `npm run test` all work
+
+## Technical Notes
+
+- Shadcn/ui setup deferred to Phase 9 (#118) — just Tailwind for now
+- Keep frontend lightweight at this stage; focus on build tooling and structure
diff --git a/.github/issues/phase-00-foundation/006-dev-environment.md b/.github/issues/phase-00-foundation/006-dev-environment.md
new file mode 100644
index 00000000..6abbda52
--- /dev/null
+++ b/.github/issues/phase-00-foundation/006-dev-environment.md
@@ -0,0 +1,43 @@
+---
+title: "Set up development environment (devcontainer + Docker Compose dev profile)"
+labels: [foundation, devex, docker]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Create a reproducible development environment using Docker Compose (dev profile) for local services and an optional VS Code devcontainer. Developers should be able to run `docker compose -f docker-compose.dev.yml up` and have PostgreSQL, Redis, MinIO, and Meilisearch available locally.
+
+## References
+
+- Architecture: Section 5.1 (Docker Compose deployment)
+- Deployment: Section 2 (Docker Compose — adapted for dev)
+
+## Acceptance Criteria
+
+- [ ] `deploy/docker/docker-compose.dev.yml` with:
+ - PostgreSQL 16 (port 5432, with healthcheck)
+ - Redis 7 (port 6379)
+ - MinIO (ports 9000/9001, auto-creates `gc-artifacts` bucket)
+ - Meilisearch (port 7700)
+ - No app container (developers run the app natively)
+- [ ] `.env.example` with all required environment variables and safe defaults
+- [ ] `Makefile` target: `make services-up` / `make services-down`
+- [ ] `.devcontainer/devcontainer.json` with:
+ - Python 3.12 + Node 20 base image
+ - Extensions: Python, Pylance, ESLint, Prettier, Docker, GitLens
+ - Post-create command: install backend + frontend deps
+ - Docker Compose integration for services
+- [ ] `scripts/setup-dev.sh` — idempotent script that:
+ - Creates `.env` from `.env.example` if not exists
+ - Starts Docker services
+ - Runs database migrations
+ - Seeds initial taxonomy data
+ - Prints "ready" message with URLs
+
+## Technical Notes
+
+- Dev MinIO should auto-create bucket via `mc` init container or entrypoint script
+- Use Docker healthchecks so `depends_on` with `condition: service_healthy` works
+- PostgreSQL should use same version (16) as production target
diff --git a/.github/issues/phase-00-foundation/006b-mcp-dev-tooling.md b/.github/issues/phase-00-foundation/006b-mcp-dev-tooling.md
new file mode 100644
index 00000000..ffa7d4c6
--- /dev/null
+++ b/.github/issues/phase-00-foundation/006b-mcp-dev-tooling.md
@@ -0,0 +1,74 @@
+---
+title: "Configure MCP servers for AI-assisted development (Rocq, AWS, ops)"
+labels: [foundation, devex, tooling]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Set up Model Context Protocol (MCP) servers that accelerate AI-assisted development. These give the AI developer (Claude) direct tool access to proof assistants, cloud infrastructure, and operational tooling — reducing hallucination and token waste on tasks outside core training data.
+
+## References
+
+- Coding Standards: Section 11 (Formal Methods — Coq/Rocq proofs in `proofs/`)
+- Issue #023b (Formal proof infrastructure)
+- Architecture: Section 5 (Deployment Architecture — AWS, Docker Compose, Kubernetes)
+- Architecture: Section 3.6 (Data & Storage — PostgreSQL, Redis, S3)
+
+## MCP Servers
+
+### 1. rocq-mcp (Rocq/Coq Proof Assistant)
+
+**Purpose:** Direct interaction with the Rocq/Coq proof assistant via coq-lsp and Pytanque. Enables type checking, tactic execution, and proof feedback without guessing syntax.
+
+**Source:** https://github.com/LLM4Rocq/rocq-mcp
+
+**Setup:**
+```bash
+# Install coq-lsp
+opam install lwt logs coq-lsp
+
+# Install rocq-mcp
+pip install git+https://github.com/LLM4Rocq/rocq-mcp.git
+```
+
+**Used for:** All work in `proofs/` — audit log integrity, RBAC policy correctness, state machine transitions, tenant isolation proofs.
+
+### 2. AWS MCP (Official)
+
+**Purpose:** Authenticated access to AWS APIs for infrastructure management, deployment, and evidence collection plugin development.
+
+**Source:** https://github.com/awslabs/mcp
+
+**Used for:** Deployment to AWS (ECS/EKS, RDS, S3, ElastiCache), CloudWatch log access, Secrets Manager, infrastructure validation.
+
+### 3. Ground Control Ops MCP (Custom, later phase)
+
+**Purpose:** Project-specific operational tooling — local Docker Compose management, database migrations, health checks, log tailing, test data seeding. Modeled on the Shifter ops MCP pattern.
+
+**Deferred to:** Phase 11 (Production Readiness), when the application is running and needs operational tooling. Skeleton can be created earlier if useful.
+
+**Planned tools:**
+- `gc_health` — Check health of all services (app, db, redis, search)
+- `gc_migrate` — Run/rollback Alembic migrations
+- `gc_logs` — Tail application logs with structured filtering
+- `gc_seed` — Seed test data for development
+- `gc_db_query` — Read-only SQL queries against local dev database
+- `gc_docker` — Start/stop/restart Docker Compose services
+
+## Acceptance Criteria
+
+- [ ] rocq-mcp installed and configured in Claude Code MCP settings
+- [ ] Verify rocq-mcp works: create a trivial Coq file in `proofs/`, type-check it via MCP
+- [ ] AWS MCP configured (requires AWS credentials — document setup in README)
+- [ ] `proofs/README.md` includes MCP setup instructions for contributors
+- [ ] Claude Code `.claude/settings.json` or project config documents MCP server configurations
+- [ ] Issue #023b updated to note rocq-mcp as the development interface for proofs
+
+## Notes
+
+- rocq-mcp is the highest priority — formal proofs are the area most prone to AI errors without tool feedback.
+- AWS MCP requires IAM credentials. For local dev, use AWS SSO profiles. Document the required permissions.
+- The custom ops MCP is deferred but the pattern is proven (see Shifter ops MCP). Create it when there's an app to operate.
+- MCP servers already available in the dev environment (GitHub, Playwright, Serena, Context7, DigitalOcean, Neo4j, Figma) do not need additional setup.
diff --git a/.github/issues/phase-00-foundation/007-pre-commit-hooks.md b/.github/issues/phase-00-foundation/007-pre-commit-hooks.md
new file mode 100644
index 00000000..c06b457a
--- /dev/null
+++ b/.github/issues/phase-00-foundation/007-pre-commit-hooks.md
@@ -0,0 +1,44 @@
+---
+title: "Configure pre-commit hooks and editor settings"
+labels: [foundation, devex, quality]
+phase: 0
+priority: P1
+---
+
+## Description
+
+Set up `pre-commit` framework to enforce code quality gates before commits reach CI. Configure hooks for both Python and TypeScript code.
+
+## References
+
+- Issue #003 (Coding Standards)
+- Issue #004 (Python Backend Scaffold)
+
+## Acceptance Criteria
+
+- [ ] `.pre-commit-config.yaml` at repo root with hooks:
+ - `ruff` — lint and format Python
+ - `mypy` — type check (optional, can be slow; may run on CI only)
+ - `eslint` — lint TypeScript/React
+ - `prettier` — format TypeScript/YAML/JSON/Markdown
+ - `check-yaml` — validate YAML files
+ - `check-json` — validate JSON files
+ - `check-toml` — validate TOML files
+ - `detect-secrets` — prevent accidental secret commits
+ - `trailing-whitespace` — strip trailing whitespace
+ - `end-of-file-fixer` — ensure files end with newline
+ - `check-merge-conflict` — detect unresolved merge markers
+ - `check-added-large-files` — block files > 1MB (configurable)
+- [ ] `.editorconfig` at repo root:
+ - `root = true`
+ - Python: 4 spaces, 100 char line length
+ - TypeScript/JavaScript: 2 spaces
+ - YAML: 2 spaces
+ - Markdown: trailing whitespace allowed (for line breaks)
+- [ ] Documentation in `CONTRIBUTING.md` on installing pre-commit hooks
+- [ ] `make hooks` target to install pre-commit hooks
+
+## Technical Notes
+
+- `detect-secrets` uses the `yelp/detect-secrets` tool — initialize baseline with `detect-secrets scan > .secrets.baseline`
+- Consider making `mypy` a CI-only check if it's too slow for pre-commit
diff --git a/.github/issues/phase-00-foundation/008-ci-lint-format.md b/.github/issues/phase-00-foundation/008-ci-lint-format.md
new file mode 100644
index 00000000..83a5aa39
--- /dev/null
+++ b/.github/issues/phase-00-foundation/008-ci-lint-format.md
@@ -0,0 +1,41 @@
+---
+title: "CI pipeline: Linting and formatting checks"
+labels: [foundation, ci-cd, quality]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Create a GitHub Actions workflow that runs linting and formatting checks on every push and pull request. This is the first line of defense for code quality.
+
+## References
+
+- Architecture: Section 7 (GitHub Actions for CI/CD)
+- Issue #003 (Coding Standards)
+
+## Acceptance Criteria
+
+- [ ] `.github/workflows/lint.yml` workflow:
+ - Triggers on: push to `main`, pull requests
+ - Matrix strategy: Python linting, TypeScript linting
+ - **Python job:**
+ - Checkout, setup Python 3.12, install deps
+ - `ruff check --output-format=github .` — linting with GitHub annotations
+ - `ruff format --check .` — formatting verification
+ - **TypeScript job:**
+ - Checkout, setup Node 20, install deps
+ - `npm run lint` — ESLint
+ - `npx prettier --check .` — Prettier
+ - **YAML/Markdown job:**
+ - `yamllint` on `.yml` / `.yaml` files
+ - `markdownlint-cli2` on `.md` files (with reasonable config)
+- [ ] Workflow fails if any check fails
+- [ ] Workflow runs in < 2 minutes for typical changes
+- [ ] Status checks required for PR merge (document in branch protection rules)
+
+## Technical Notes
+
+- Use `ruff` native GitHub output format for inline PR annotations
+- Cache pip/npm dependencies using `actions/cache` for speed
+- Consider `concurrency` group to cancel stale runs on the same branch
diff --git a/.github/issues/phase-00-foundation/009-ci-type-checking.md b/.github/issues/phase-00-foundation/009-ci-type-checking.md
new file mode 100644
index 00000000..839440c4
--- /dev/null
+++ b/.github/issues/phase-00-foundation/009-ci-type-checking.md
@@ -0,0 +1,39 @@
+---
+title: "CI pipeline: Type checking (mypy + TypeScript tsc)"
+labels: [foundation, ci-cd, quality]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Create a GitHub Actions workflow for static type checking on both the Python backend (mypy) and TypeScript frontend (tsc). Type checking catches a class of bugs that linters miss.
+
+## References
+
+- Issue #003 (Coding Standards — mypy strict, TypeScript strict)
+- Architecture: Section 7 (Python 3.12+, TypeScript)
+
+## Acceptance Criteria
+
+- [ ] `.github/workflows/typecheck.yml` workflow:
+ - Triggers on: push to `main`, pull requests
+ - **Python job:**
+ - `mypy backend/src/ --strict` with project-specific config from `pyproject.toml`
+ - Reports type errors as GitHub annotations
+ - **TypeScript job:**
+ - `npx tsc --noEmit` — type check without emitting
+ - Reports errors as GitHub annotations
+- [ ] Both jobs must pass for PR merge
+- [ ] Use incremental mypy caching (`--cache-dir`) via `actions/cache`
+- [ ] Document how to run type checks locally: `make typecheck`
+
+## Technical Notes
+
+- mypy may need stubs for some dependencies: `types-redis`, `types-boto3`, etc.
+- Add `mypy` plugin for `pydantic` and `sqlalchemy` in `pyproject.toml`:
+ ```toml
+ [tool.mypy]
+ plugins = ["pydantic.mypy", "sqlalchemy.ext.mypy.plugin"]
+ ```
+- Type stubs should be dev dependencies, not runtime
diff --git a/.github/issues/phase-00-foundation/010-ci-unit-tests.md b/.github/issues/phase-00-foundation/010-ci-unit-tests.md
new file mode 100644
index 00000000..3c32d8e1
--- /dev/null
+++ b/.github/issues/phase-00-foundation/010-ci-unit-tests.md
@@ -0,0 +1,41 @@
+---
+title: "CI pipeline: Unit and integration test runner"
+labels: [foundation, ci-cd, testing]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Create a GitHub Actions workflow that runs the test suites for both backend (pytest) and frontend (vitest). Include coverage reporting and thresholds.
+
+## References
+
+- Architecture: Section 7 (pytest + Playwright for testing)
+- PRD: Section 7 (Non-Functional Requirements)
+
+## Acceptance Criteria
+
+- [ ] `.github/workflows/test.yml` workflow:
+ - Triggers on: push to `main`, pull requests
+ - **Python job:**
+ - Services: PostgreSQL 16, Redis 7 (GitHub Actions service containers)
+ - Setup Python 3.12, install deps
+ - `pytest --cov=ground_control --cov-report=xml --cov-report=term -v`
+ - Upload coverage report as artifact
+ - Coverage threshold: 80% minimum (fail if below)
+ - **TypeScript job:**
+ - `npm run test -- --coverage`
+ - Coverage threshold: 80% minimum
+- [ ] Test results displayed in PR checks (use pytest-github-actions-annotate-failures or similar)
+- [ ] Coverage reports uploadable to SonarQube (see #012)
+- [ ] Workflow uses concurrency groups to cancel stale runs
+- [ ] `make test` and `make test-cov` targets in Makefile
+
+## Technical Notes
+
+- Use `pytest-asyncio` with `asyncio_mode = "auto"` for async test support
+- Use `factory-boy` for test data factories
+- Use `respx` for mocking httpx calls
+- PostgreSQL service should use same version as production
+- Tests should use a test database that is created/destroyed per session
diff --git a/.github/issues/phase-00-foundation/011-ci-build-docker.md b/.github/issues/phase-00-foundation/011-ci-build-docker.md
new file mode 100644
index 00000000..95f4d45a
--- /dev/null
+++ b/.github/issues/phase-00-foundation/011-ci-build-docker.md
@@ -0,0 +1,42 @@
+---
+title: "CI pipeline: Build and Docker image publishing"
+labels: [foundation, ci-cd, docker]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Create a GitHub Actions workflow that builds the application, creates Docker images, and publishes them to GitHub Container Registry (ghcr.io). This workflow should produce multi-arch images for both `linux/amd64` and `linux/arm64`.
+
+## References
+
+- Architecture: Section 5 (Deployment — Docker)
+- Architecture: Section 7 (Containerization — Docker)
+- Deployment: Section 2 (Docker Compose)
+
+## Acceptance Criteria
+
+- [ ] `deploy/docker/Dockerfile` (multi-stage):
+ - Stage 1: Python build (install deps, compile)
+ - Stage 2: Frontend build (npm install, vite build)
+ - Stage 3: Production runtime (slim base, copy artifacts, non-root user)
+ - Uses `python:3.12-slim` as base
+ - Runs as non-root user (`gc`)
+ - Health check: `CMD curl -f http://localhost:8000/health || exit 1`
+- [ ] `.github/workflows/build.yml` workflow:
+ - Triggers on: push to `main`, tags (`v*`), pull requests (build only, no push)
+ - Build Docker image with `docker/build-push-action`
+ - Push to `ghcr.io/keplerops/ground-control` on main/tags
+ - Tag strategy: `latest`, git SHA, semver from tags
+ - Multi-arch: `linux/amd64`, `linux/arm64`
+ - SBOM generation attached to image
+- [ ] Image size < 500MB
+- [ ] `make docker-build` target for local builds
+- [ ] Docker image labels (OCI standard): maintainer, version, source URL
+
+## Technical Notes
+
+- Use Docker layer caching (`actions/cache` or `type=gha` cache) for fast rebuilds
+- SBOM via `docker/sbom-action` or Syft
+- Consider separate Dockerfile for frontend (Nginx static serving) for K8s deployments
diff --git a/.github/issues/phase-00-foundation/012-ci-sonarqube.md b/.github/issues/phase-00-foundation/012-ci-sonarqube.md
new file mode 100644
index 00000000..a75ea4cc
--- /dev/null
+++ b/.github/issues/phase-00-foundation/012-ci-sonarqube.md
@@ -0,0 +1,46 @@
+---
+title: "CI pipeline: SonarQube integration for code quality"
+labels: [foundation, ci-cd, quality, security]
+phase: 0
+priority: P1
+---
+
+## Description
+
+Integrate SonarQube (or SonarCloud for open source) into the CI pipeline for continuous code quality and security analysis. SonarQube provides code smell detection, bug detection, security vulnerability scanning, coverage tracking, and technical debt measurement.
+
+## References
+
+- PRD: Section 7 (Non-Functional Requirements — quality, security)
+- Issue #010 (Unit Tests — coverage reports feed into Sonar)
+
+## Acceptance Criteria
+
+- [ ] `.github/workflows/sonar.yml` workflow:
+ - Triggers on: push to `main`, pull requests
+ - Runs SonarCloud analysis (free for open source)
+ - Uploads Python coverage report (`coverage.xml` from pytest)
+ - Uploads TypeScript coverage report
+- [ ] `sonar-project.properties` at repo root:
+ - Project key, organization
+ - Source directories: `backend/src`, `frontend/src`
+ - Test directories: `backend/tests`, `frontend/src/**/*.test.*`
+ - Exclusions: migrations, generated files, vendor
+ - Coverage report paths
+ - Python-specific: `sonar.python.version=3.12`
+- [ ] Quality gate configured:
+ - New code coverage > 80%
+ - No new bugs (A rating)
+ - No new vulnerabilities (A rating)
+ - No new security hotspots (A rating)
+ - Technical debt ratio < 5%
+ - Duplication < 3%
+- [ ] Quality gate status reported on PRs
+- [ ] SonarCloud badge in README
+
+## Technical Notes
+
+- Use `SonarSource/sonarcloud-github-action` for analysis
+- SonarCloud is free for public repos; for private, use self-hosted SonarQube
+- The `SONAR_TOKEN` secret must be configured in GitHub repo settings
+- Coverage reports must be generated before Sonar analysis runs
diff --git a/.github/issues/phase-00-foundation/013-ci-sast.md b/.github/issues/phase-00-foundation/013-ci-sast.md
new file mode 100644
index 00000000..c89124c6
--- /dev/null
+++ b/.github/issues/phase-00-foundation/013-ci-sast.md
@@ -0,0 +1,41 @@
+---
+title: "CI pipeline: SAST scanning (Semgrep + Bandit)"
+labels: [foundation, ci-cd, security]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Implement Static Application Security Testing (SAST) in the CI pipeline using Semgrep (multi-language, rule-based) and Bandit (Python-specific). SAST analyzes source code for security vulnerabilities before deployment.
+
+## References
+
+- Architecture: Section 6 (Security Architecture — Supply chain)
+- PRD: Section 7 (Non-Functional Requirements — security)
+
+## Acceptance Criteria
+
+- [ ] `.github/workflows/sast.yml` workflow:
+ - Triggers on: push to `main`, pull requests, weekly schedule
+ - **Semgrep job:**
+ - Uses `semgrep/semgrep-action`
+ - Rulesets: `p/security-audit`, `p/python`, `p/typescript`, `p/owasp-top-ten`, `p/secrets`
+ - Findings reported as GitHub annotations
+ - Upload SARIF results to GitHub Security tab
+ - **Bandit job:**
+ - Runs `bandit -r backend/src/ -f sarif -o bandit.sarif`
+ - Excludes test files
+ - Upload SARIF to GitHub Security tab
+ - **CodeQL job (optional, GitHub-native):**
+ - `github/codeql-action/init` + `analyze` for Python and JavaScript
+- [ ] SAST findings block PR merge if severity ≥ High
+- [ ] Baseline file for known/accepted findings (`.semgrepignore`, `bandit.yaml`)
+- [ ] `make security-scan` target for local SAST runs
+
+## Technical Notes
+
+- Semgrep is fast and supports custom rules — can add project-specific rules later (e.g., "never use `eval()`", "always validate tenant_id")
+- Bandit covers Python-specific issues (SQL injection, hardcoded passwords, insecure crypto)
+- SARIF format enables unified view in GitHub Security tab
+- Consider adding `eslint-plugin-security` for TypeScript-specific findings
diff --git a/.github/issues/phase-00-foundation/014-ci-dast.md b/.github/issues/phase-00-foundation/014-ci-dast.md
new file mode 100644
index 00000000..4345ed86
--- /dev/null
+++ b/.github/issues/phase-00-foundation/014-ci-dast.md
@@ -0,0 +1,39 @@
+---
+title: "CI pipeline: DAST scanning (OWASP ZAP)"
+labels: [foundation, ci-cd, security]
+phase: 0
+priority: P1
+---
+
+## Description
+
+Implement Dynamic Application Security Testing (DAST) using OWASP ZAP to test the running application for vulnerabilities. DAST complements SAST by testing the deployed application as a black box, finding runtime vulnerabilities like XSS, CSRF, injection, and misconfigurations.
+
+## References
+
+- Architecture: Section 6 (Security Architecture — defense in depth)
+- PRD: Section 7 (Non-Functional Requirements — security)
+
+## Acceptance Criteria
+
+- [ ] `.github/workflows/dast.yml` workflow:
+ - Triggers on: push to `main` (not PRs — too slow), weekly schedule
+ - Spins up application + services via Docker Compose
+ - Waits for health check to pass
+ - Runs OWASP ZAP baseline scan against `http://localhost:8000`
+ - Runs ZAP API scan using OpenAPI spec (`/api/v1/openapi.json`)
+ - Results uploaded as SARIF to GitHub Security tab
+ - Report artifact saved for download
+- [ ] ZAP configuration file (`.zap/rules.tsv`) for:
+ - Tuning false positives
+ - Setting alert thresholds (FAIL on High, WARN on Medium)
+- [ ] DAST scan completes in < 15 minutes
+- [ ] Known false positives documented and suppressed
+
+## Technical Notes
+
+- Use `zaproxy/action-baseline` for baseline scan and `zaproxy/action-api-scan` for API scan
+- The API scan uses the OpenAPI spec to discover and test all endpoints
+- DAST requires a running application — use `docker compose up` in CI
+- Consider running DAST only on `main` to avoid slowing PR feedback loops
+- ZAP can authenticate — configure it with a test user for authenticated scanning
diff --git a/.github/issues/phase-00-foundation/015-ci-openant.md b/.github/issues/phase-00-foundation/015-ci-openant.md
new file mode 100644
index 00000000..14150696
--- /dev/null
+++ b/.github/issues/phase-00-foundation/015-ci-openant.md
@@ -0,0 +1,48 @@
+---
+title: "CI pipeline: OpenANT AI vulnerability scanning"
+labels: [foundation, ci-cd, security, ai]
+phase: 0
+priority: P1
+---
+
+## Description
+
+Integrate [OpenANT](https://github.com/knostic/OpenAnt) — an open-source, LLM-based vulnerability discovery tool from Knostic — into the CI pipeline. OpenANT uses a two-stage approach: Stage 1 detects potential vulnerabilities, Stage 2 simulates attacks to verify them. Only findings that survive both stages are reported, significantly reducing false positives compared to traditional SAST.
+
+## References
+
+- Architecture: Section 6 (Security Architecture — supply chain, defense in depth)
+- [OpenANT GitHub](https://github.com/knostic/OpenAnt)
+- [OpenANT Documentation](https://openant.knostic.ai/)
+
+## Acceptance Criteria
+
+- [ ] `.github/workflows/openant.yml` workflow:
+ - Triggers on: weekly schedule, manual dispatch (`workflow_dispatch`)
+ - **Setup:**
+ - Install Go 1.25+ (required to build OpenANT CLI)
+ - Build `openant` binary from source or download release
+ - Configure `ANTHROPIC_API_KEY` secret (requires Claude Opus 4.6 access)
+ - **Scan:**
+ - `openant init . -l python --name KeplerOps/Ground-Control`
+ - `openant scan --verify` (runs full pipeline: parse → enhance → analyze → verify → build-output → report)
+ - Alternatively run staged: `openant parse && openant enhance && openant analyze && openant verify && openant build-output`
+ - **Report:**
+ - `openant report -f summary` — generate human-readable summary
+ - Save report as GitHub Actions artifact
+ - Parse findings and create GitHub issues for verified vulnerabilities (optional)
+- [ ] OpenANT configuration documented:
+ - Supported languages: Python (stable), TypeScript (beta)
+ - Token cost estimates documented for team budgeting
+ - Scan frequency: weekly (or on-demand for releases)
+- [ ] Results reviewed by a human before action is taken (AI findings are advisory)
+- [ ] `make openant-scan` target for local scanning
+
+## Technical Notes
+
+- OpenANT requires an Anthropic API key with Claude Opus 4.6 access — store as `ANTHROPIC_API_KEY` secret
+- OpenANT stores project data in `~/.openant/` — use GitHub Actions cache to avoid re-parsing unchanged files
+- The verify stage performs simulated attacks — this is safe against source code (no network interaction)
+- Run weekly rather than on every PR to manage API costs
+- OpenANT is Apache 2.0 licensed — compatible with Ground Control's license
+- Python and TypeScript are supported; scan both `backend/src/` and `frontend/src/`
diff --git a/.github/issues/phase-00-foundation/016-ci-dependency-scanning.md b/.github/issues/phase-00-foundation/016-ci-dependency-scanning.md
new file mode 100644
index 00000000..895aacd9
--- /dev/null
+++ b/.github/issues/phase-00-foundation/016-ci-dependency-scanning.md
@@ -0,0 +1,42 @@
+---
+title: "CI pipeline: Dependency and license scanning"
+labels: [foundation, ci-cd, security]
+phase: 0
+priority: P1
+---
+
+## Description
+
+Implement dependency vulnerability scanning and license compliance checking in CI. This ensures known vulnerable dependencies are caught before deployment and all dependencies comply with the project's Apache-2.0 license.
+
+## References
+
+- Architecture: Section 6 (Security — supply chain, SBOM generation)
+
+## Acceptance Criteria
+
+- [ ] `.github/workflows/deps.yml` workflow:
+ - Triggers on: push to `main`, pull requests, weekly schedule
+ - **Python dependency scanning:**
+ - `pip-audit` — checks against OSV/PyPI advisories
+ - `safety` (optional) — checks against Safety DB
+ - **Node dependency scanning:**
+ - `npm audit --audit-level=high`
+ - **License scanning:**
+ - `pip-licenses --format=json --with-urls` — export Python license info
+ - `license-checker` (npm) — export Node license info
+ - Fail if any dependency uses GPL, AGPL, or other copyleft licenses (incompatible with Apache-2.0)
+ - **GitHub Dependabot:**
+ - `.github/dependabot.yml` configured for Python (pip), npm, GitHub Actions, Docker
+ - Weekly update schedule
+ - Grouped updates for minor/patch versions
+- [ ] SBOM generation (CycloneDX format) on each release
+- [ ] `make audit` target for local dependency scanning
+- [ ] Renovate or Dependabot auto-creates PRs for dependency updates
+
+## Technical Notes
+
+- `pip-audit` uses the OSV database — more comprehensive than `safety` for open-source
+- License allowlist: Apache-2.0, MIT, BSD-2-Clause, BSD-3-Clause, ISC, PSF, MPL-2.0
+- License denylist: GPL-2.0, GPL-3.0, AGPL-3.0, SSPL, EUPL
+- SBOM (Software Bill of Materials) is increasingly required for enterprise adoption
diff --git a/.github/issues/phase-00-foundation/017-exception-hierarchy.md b/.github/issues/phase-00-foundation/017-exception-hierarchy.md
new file mode 100644
index 00000000..2a8ea30c
--- /dev/null
+++ b/.github/issues/phase-00-foundation/017-exception-hierarchy.md
@@ -0,0 +1,59 @@
+---
+title: "Implement shared exception hierarchy"
+labels: [foundation, backend, cross-cutting]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Design and implement a structured exception hierarchy for the Python backend. Exceptions should be domain-aware, carry error codes, and map cleanly to HTTP status codes. This enables consistent error handling across all services and API endpoints.
+
+## References
+
+- API Spec: Section 3.3 (Error Response format)
+- Architecture: Section 3.4 (Domain Services)
+
+## Acceptance Criteria
+
+- [ ] `backend/src/ground_control/exceptions/__init__.py` with hierarchy:
+ ```
+ GroundControlError (base)
+ ├── ValidationError → 422
+ │ ├── SchemaValidationError
+ │ └── BusinessRuleError
+ ├── AuthenticationError → 401
+ │ ├── InvalidCredentialsError
+ │ ├── TokenExpiredError
+ │ └── MFARequiredError
+ ├── AuthorizationError → 403
+ │ ├── InsufficientPermissionsError
+ │ └── TenantAccessDeniedError
+ ├── NotFoundError → 404
+ │ └── EntityNotFoundError(entity_type, entity_id)
+ ├── ConflictError → 409
+ │ ├── DuplicateEntityError
+ │ └── OptimisticLockError
+ ├── RateLimitError → 429
+ ├── ExternalServiceError → 502
+ │ ├── StorageError
+ │ ├── SearchIndexError
+ │ └── PluginError
+ └── InternalError → 500
+ ```
+- [ ] Each exception carries:
+ - `error_code: str` — machine-readable code (e.g., `"entity_not_found"`)
+ - `message: str` — human-readable message
+ - `details: list[dict]` — optional field-level details
+ - `status_code: int` — HTTP status code
+- [ ] Global exception handler in FastAPI that converts exceptions to API error response format
+- [ ] Exceptions are logged at appropriate levels (4xx → WARNING, 5xx → ERROR)
+- [ ] Sensitive information (stack traces, internal paths) stripped from production error responses
+- [ ] Unit tests for exception hierarchy and handler
+
+## Technical Notes
+
+- Use `@app.exception_handler(GroundControlError)` in FastAPI
+- Also handle `RequestValidationError` (Pydantic) and `HTTPException` (Starlette) for uniform output
+- Include `request_id` in error responses for correlation
+- Never expose raw database errors to clients
diff --git a/.github/issues/phase-00-foundation/018-structured-logging.md b/.github/issues/phase-00-foundation/018-structured-logging.md
new file mode 100644
index 00000000..5336cce0
--- /dev/null
+++ b/.github/issues/phase-00-foundation/018-structured-logging.md
@@ -0,0 +1,57 @@
+---
+title: "Implement structured logging framework"
+labels: [foundation, backend, cross-cutting, observability]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Set up a structured JSON logging framework using `structlog` that provides consistent, machine-parseable log output across all backend services. Every log entry should include request context (tenant, user, request ID) for correlation and filtering.
+
+## References
+
+- Deployment: Section 8.3 (Logging — structured JSON format)
+- Architecture: Section 6 (Security — audit logging)
+
+## Acceptance Criteria
+
+- [ ] `backend/src/ground_control/logging/` module:
+ - `setup.py` — configures structlog with processors
+ - `context.py` — context variable management (tenant_id, request_id, user_id)
+ - `middleware.py` — request logging middleware
+- [ ] Structlog configured with processors:
+ - Add log level
+ - Add timestamp (ISO 8601 UTC)
+ - Add service name
+ - Add context variables (tenant_id, request_id, user_id, actor_type)
+ - Exception formatting (exc_info → structured)
+ - JSON renderer (production) or colored console (development)
+- [ ] Log output format matches deployment spec:
+ ```json
+ {
+ "timestamp": "2026-03-07T15:30:00Z",
+ "level": "info",
+ "service": "gc-app",
+ "tenant_id": "...",
+ "request_id": "req_abc123",
+ "user_id": "...",
+ "message": "Risk created",
+ "event_data": { "risk_id": "...", "ref_id": "RISK-001" }
+ }
+ ```
+- [ ] Request logging middleware logs:
+ - Request start: method, path, user_agent
+ - Request end: method, path, status_code, duration_ms
+- [ ] Log levels configurable via environment variable (`LOG_LEVEL`)
+- [ ] Sensitive data never logged (passwords, tokens, secrets) — add scrubbing processor
+- [ ] `get_logger()` helper that returns a bound logger with current context
+- [ ] Unit tests for logging configuration and context binding
+
+## Technical Notes
+
+- Use `contextvars` for request-scoped context (works with asyncio)
+- `structlog` integrates with stdlib logging — configure both to output consistently
+- In development, use `structlog.dev.ConsoleRenderer` for human-readable output
+- In production, use `structlog.processors.JSONRenderer`
+- Add log sampling for high-volume events (e.g., health checks) to reduce noise
diff --git a/.github/issues/phase-00-foundation/019-config-management.md b/.github/issues/phase-00-foundation/019-config-management.md
new file mode 100644
index 00000000..109a055f
--- /dev/null
+++ b/.github/issues/phase-00-foundation/019-config-management.md
@@ -0,0 +1,67 @@
+---
+title: "Implement configuration management with pydantic-settings"
+labels: [foundation, backend, cross-cutting]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Create a centralized, typed configuration system using `pydantic-settings` that loads from environment variables (with `.env` fallback). All configuration is validated at startup — fail fast on misconfiguration.
+
+## References
+
+- Deployment: Section 2.5 (Environment Variables)
+- Architecture: Section 7 (Pydantic for validation)
+
+## Acceptance Criteria
+
+- [ ] `backend/src/ground_control/config.py` with `Settings` class:
+ ```python
+ class Settings(BaseSettings):
+ # Core
+ secret_key: SecretStr
+ allowed_origins: list[str]
+ log_level: Literal["debug", "info", "warning", "error"] = "info"
+ environment: Literal["development", "staging", "production"] = "development"
+
+ # Database
+ database_url: PostgresDsn
+ db_pool_size: int = 20
+ db_pool_overflow: int = 10
+
+ # Redis
+ redis_url: RedisDsn
+
+ # Object Storage
+ s3_endpoint: AnyHttpUrl
+ s3_bucket: str = "gc-artifacts"
+ s3_access_key: SecretStr
+ s3_secret_key: SecretStr
+
+ # Search
+ search_url: AnyHttpUrl
+ search_key: SecretStr
+
+ # Auth
+ jwt_algorithm: str = "HS256"
+ access_token_expire_minutes: int = 60
+ refresh_token_expire_days: int = 30
+
+ # Multi-tenancy
+ multi_tenancy_mode: Literal["shared_schema", "schema_per_tenant", "database_per_tenant"] = "shared_schema"
+
+ model_config = SettingsConfigDict(env_file=".env", env_file_encoding="utf-8")
+ ```
+- [ ] Settings are a singleton, loaded once at startup
+- [ ] Validation errors produce clear messages indicating which env vars are missing/invalid
+- [ ] `SecretStr` used for all sensitive values (prevents accidental logging)
+- [ ] Settings are injectable via FastAPI dependency injection
+- [ ] Unit tests with mock environment variables
+
+## Technical Notes
+
+- Use `pydantic-settings` v2 (Pydantic v2 compatible)
+- Nested settings use env prefix: `SSO__PROVIDER`, `SMTP__HOST`, etc.
+- Never log settings values — only log which settings were loaded
+- Consider adding a `settings.display_safe()` method that redacts secrets for debug output
diff --git a/.github/issues/phase-00-foundation/020-base-schemas.md b/.github/issues/phase-00-foundation/020-base-schemas.md
new file mode 100644
index 00000000..8ced0988
--- /dev/null
+++ b/.github/issues/phase-00-foundation/020-base-schemas.md
@@ -0,0 +1,44 @@
+---
+title: "Define base Pydantic schemas and API response envelope"
+labels: [foundation, backend, api]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Create the base Pydantic schema classes and API response envelope that all endpoints will use. This ensures uniform request/response formats across the entire API, matching the specification in the API design doc.
+
+## References
+
+- API Spec: Section 3 (Response Format — single resource, collection, error)
+- API Spec: Section 1 (API Design Principles — consistent envelopes)
+
+## Acceptance Criteria
+
+- [ ] `backend/src/ground_control/schemas/base.py`:
+ - `BaseSchema` — base class with `model_config` (camelCase aliases, from_attributes)
+ - `TimestampMixin` — `created_at`, `updated_at` fields
+ - `TenantScopedMixin` — `tenant_id` field
+- [ ] `backend/src/ground_control/schemas/envelope.py`:
+ - `SingleResponse[T]` — `{"data": T}` wrapper (generic)
+ - `CollectionResponse[T]` — `{"data": [T], "meta": PaginationMeta, "links": PaginationLinks}`
+ - `PaginationMeta` — `total`, `page`, `per_page`, `total_pages`
+ - `PaginationLinks` — `self`, `next`, `prev`, `first`, `last`
+ - `ErrorResponse` — `{"error": {"code": str, "message": str, "details": list, "request_id": str}}`
+ - `ErrorDetail` — `{"field": str, "message": str, "code": str}`
+- [ ] `backend/src/ground_control/schemas/pagination.py`:
+ - `PaginationParams` — query parameter model (`page`, `per_page`, `sort`, `fields`)
+ - Validation: `page >= 1`, `1 <= per_page <= 100`
+- [ ] `backend/src/ground_control/schemas/filters.py`:
+ - Base filter schema with operator support (`[gte]`, `[lte]`, `[in]`, `[contains]`)
+- [ ] All schemas use strict Pydantic v2 mode
+- [ ] CamelCase serialization for API output, snake_case internally
+- [ ] Unit tests for serialization/deserialization of all base schemas
+
+## Technical Notes
+
+- Use Pydantic's `model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)`
+- Generic types: `SingleResponse[RiskRead]` produces `{"data": {...risk fields...}}`
+- The envelope pattern prevents JSON array root (security best practice)
+- Consider using Pydantic's `computed_field` for pagination link generation
diff --git a/.github/issues/phase-00-foundation/021-database-setup.md b/.github/issues/phase-00-foundation/021-database-setup.md
new file mode 100644
index 00000000..a054fb6a
--- /dev/null
+++ b/.github/issues/phase-00-foundation/021-database-setup.md
@@ -0,0 +1,45 @@
+---
+title: "Set up database connection and SQLAlchemy async engine"
+labels: [foundation, backend, database]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Configure SQLAlchemy 2.0 async engine, session management, and base model classes for the PostgreSQL database. This forms the persistence foundation for all domain entities.
+
+## References
+
+- Architecture: Section 3.7 (Data & Storage — PostgreSQL 16+)
+- Architecture: Section 7 (SQLAlchemy 2.0 + Alembic)
+- Data Model: All entity definitions
+
+## Acceptance Criteria
+
+- [ ] `backend/src/ground_control/infrastructure/database/`:
+ - `engine.py` — async engine creation with connection pooling (PgBouncer-compatible)
+ - `session.py` — async session factory, `get_db_session` dependency for FastAPI
+ - `base.py` — declarative base with common columns and mixins
+- [ ] Base model class with:
+ - `id: UUID` primary key (default `gen_random_uuid()`)
+ - `created_at: datetime` (server default `now()`)
+ - `updated_at: datetime` (auto-update on modification)
+ - `__tablename__` auto-generated from class name (snake_case)
+- [ ] Tenant-scoped mixin:
+ - `tenant_id: UUID` foreign key to tenants
+ - Index on `tenant_id` for all tenant-scoped tables
+- [ ] Session middleware that sets `app.current_tenant_id` for Row-Level Security
+- [ ] Connection pool settings configurable via Settings (#019):
+ - `pool_size`, `max_overflow`, `pool_timeout`, `pool_recycle`
+- [ ] Health check query: `SELECT 1`
+- [ ] Graceful shutdown: close all connections on app shutdown
+- [ ] Unit tests with async test session (use in-memory or test database)
+
+## Technical Notes
+
+- Use `create_async_engine` with `asyncpg` driver
+- Session pattern: `async with async_session() as session:` — auto-commit/rollback
+- Use `sessionmaker(class_=AsyncSession, expire_on_commit=False)` for async
+- Add `repr=False` to sensitive columns to prevent accidental logging
+- Design-by-contract: use `icontract.require` on session factory to validate connection params
diff --git a/.github/issues/phase-00-foundation/022-alembic-setup.md b/.github/issues/phase-00-foundation/022-alembic-setup.md
new file mode 100644
index 00000000..8ecd7150
--- /dev/null
+++ b/.github/issues/phase-00-foundation/022-alembic-setup.md
@@ -0,0 +1,45 @@
+---
+title: "Configure Alembic migration framework"
+labels: [foundation, backend, database]
+phase: 0
+priority: P0
+---
+
+## Description
+
+Set up Alembic for database schema migrations with async support, auto-generation, and a disciplined migration workflow. Every schema change must go through a migration — no manual DDL.
+
+## References
+
+- Data Model: Section 5 (Migration Strategy)
+- Architecture: Section 7 (SQLAlchemy 2.0 + Alembic)
+
+## Acceptance Criteria
+
+- [ ] `backend/alembic.ini` configured:
+ - Script location: `migrations`
+ - SQLAlchemy URL from settings (env var)
+ - File template with timestamp prefix
+- [ ] `backend/migrations/` directory:
+ - `env.py` — async migration runner using `asyncpg`
+ - `versions/` — migration scripts
+ - Target metadata imported from SQLAlchemy base
+- [ ] First migration: `001_initial_schema.py` — creates empty database (just the migration infrastructure)
+- [ ] Migration workflow:
+ - `make migrate-create MSG="description"` — auto-generate migration
+ - `make migrate-up` — apply all pending migrations
+ - `make migrate-down` — rollback last migration
+ - `make migrate-status` — show current state
+- [ ] Migrations support:
+ - Forward (`upgrade`) and backward (`downgrade`) for every migration
+ - Data migrations (not just DDL)
+ - Zero-downtime patterns: add column → backfill → add constraint
+- [ ] CI runs migrations against a fresh database to verify they work from scratch
+- [ ] Migration naming: `{timestamp}_{description}.py`
+
+## Technical Notes
+
+- Use `run_async` wrapper in `env.py` for async migration support
+- Import all models in `env.py` so auto-generation detects them
+- Add `--autogenerate` safety: review every auto-generated migration before committing
+- Consider stamping migration IDs with branch names to avoid conflicts in parallel development
diff --git a/.github/issues/phase-00-foundation/023-design-by-contract.md b/.github/issues/phase-00-foundation/023-design-by-contract.md
new file mode 100644
index 00000000..bb3a5df4
--- /dev/null
+++ b/.github/issues/phase-00-foundation/023-design-by-contract.md
@@ -0,0 +1,57 @@
+---
+title: "Set up design-by-contract and partial formal verification"
+labels: [foundation, backend, quality, verification]
+phase: 0
+priority: P1
+---
+
+## Description
+
+Establish a partial formal verification strategy using design-by-contract (DbC) decorators and SMT-based verification tools. This provides stronger correctness guarantees than testing alone for critical business logic like risk scoring, permission checks, and state machine transitions.
+
+**Tools:**
+- **icontract** — Runtime contract checking (preconditions, postconditions, invariants) with informative violation messages
+- **CrossHair** — SMT solver-based concolic testing that finds counterexamples to contracts
+- **Hypothesis** — Property-based testing with CrossHair as optional backend
+- **deal** — Alternative DbC library with Z3-based formal verification support
+
+## References
+
+- PRD: Section 7 (Non-Functional Requirements — correctness, security)
+- Architecture: Section 3.4 (Domain Services — business logic)
+- [CrossHair docs](https://crosshair.readthedocs.io/)
+- [icontract GitHub](https://github.com/Parquery/icontract)
+- [deal docs](https://deal.readthedocs.io/)
+
+## Acceptance Criteria
+
+- [ ] Dependencies added: `icontract`, `crosshair-tool`, `hypothesis`, `deal`
+- [ ] `backend/src/ground_control/contracts/` module:
+ - `invariants.py` — shared invariants (e.g., score ranges, valid states)
+ - `README.md` — guide on when and how to use contracts
+- [ ] Contract patterns documented and applied to critical areas:
+ - **Risk scoring:** `@icontract.require(lambda likelihood: 1 <= likelihood <= 5)`
+ - **State machines:** `@icontract.require(lambda old, new: new in VALID_TRANSITIONS[old])`
+ - **Permission checks:** `@icontract.ensure(lambda result: result.tenant_id == current_tenant_id)`
+ - **Audit log:** `@icontract.ensure(lambda result: result.entry_hash is not None)`
+- [ ] CrossHair integration:
+ - `crosshair watch backend/src/ground_control/domain/` for IDE integration
+ - `make verify` target runs CrossHair on annotated modules
+ - CrossHair configured in `pyproject.toml` or `setup.cfg`
+- [ ] Hypothesis property-based tests:
+ - Test strategies for domain objects (risks, controls, etc.)
+ - `@given` tests for scoring, state transitions, permission logic
+ - CrossHair backend enabled for Hypothesis where applicable
+- [ ] ADR: `architecture/adrs/010-design-by-contract.md` documenting:
+ - Which code gets contracts (domain logic, NOT infrastructure)
+ - Performance impact and production toggle strategy
+ - Contract enforcement: development=strict, production=configurable
+
+## Technical Notes
+
+- Contracts add runtime overhead — use `ICONTRACT_SLOW` env var to disable in production or use `icontract.SLOW` for expensive checks
+- CrossHair is best at finding counterexamples (proving something CAN fail) rather than proving correctness
+- Focus contracts on pure functions and domain logic — avoid on I/O-heavy code
+- icontract integrates with CrossHair and FastAPI (via `fastapi-icontract`)
+- deal's Z3 solver can formally verify simple functions — use for critical math (scoring formulas)
+- Property-based tests with Hypothesis complement contracts by generating edge cases
diff --git a/.github/issues/phase-00-foundation/023b-formal-proofs-coq.md b/.github/issues/phase-00-foundation/023b-formal-proofs-coq.md
new file mode 100644
index 00000000..1a3f70be
--- /dev/null
+++ b/.github/issues/phase-00-foundation/023b-formal-proofs-coq.md
@@ -0,0 +1,55 @@
+---
+title: "Set up Coq/Rocq formal proof infrastructure and initial proof targets"
+labels: [foundation, quality, formal-methods]
+phase: 0
+priority: P1
+---
+
+## Description
+
+Set up the Coq/Rocq proof assistant infrastructure and create initial proof skeletons for the critical invariants that testing alone cannot guarantee. Proofs model the domain logic — they verify properties of algorithms and state machines, not the Python code directly.
+
+## References
+
+- Coding Standards: Section 11 (Formal Methods)
+- Architecture: Section 6 (Security Architecture — audit log, RBAC/ABAC, tenant isolation)
+- Data Model: Section 2.15 (Audit Log — hash chain), Section 2.3 (Roles & Permissions)
+
+## Proof Targets
+
+### 1. Audit Log Integrity
+- Model the hash chain: each entry hashes (content + previous_hash).
+- Prove: any modification to a past entry is detectable (the chain breaks).
+- Prove: entries can only be appended, never inserted or reordered.
+
+### 2. RBAC/ABAC Policy Evaluation
+- Model the permission format: `resource:action:scope`.
+- Model role → permission assignment and user → role assignment.
+- Prove: a user without a matching permission cannot pass an authorization check.
+- Prove: no combination of valid role assignments can escalate beyond declared permissions.
+
+### 3. Entity State Machines
+- Model lifecycle states for: Finding (draft → open → remediation_in_progress → validation → closed), Assessment Campaign (planning → active → review → finalized → archived), Test Procedure (not_started → in_progress → completed → review → approved).
+- Prove: only valid transitions are possible. No state can be reached that is not in the declared set.
+- Prove: terminal states (closed, finalized, approved) cannot transition to non-terminal states without explicit re-open.
+
+### 4. Tenant Isolation
+- Model a query as a function from (tenant_id, query_params) → result_set.
+- Prove: for any two distinct tenant_ids, the result sets are disjoint (no row appears in both).
+
+## Acceptance Criteria
+
+- [ ] `proofs/` directory created with subdirectories: `audit_log/`, `authorization/`, `state_machines/`, `tenant_isolation/`
+- [ ] `proofs/README.md` with: what the proofs cover, how to install Coq, how to verify all proofs (`make` or `coq_makefile`)
+- [ ] Coq project file (`_CoqProject`) and Makefile for building all proofs
+- [ ] Audit log hash chain: proof of append-only tamper detection compiles
+- [ ] State machine: at least one entity lifecycle (Finding) fully proved
+- [ ] CI step: `make -C proofs` runs in GitHub Actions and fails if any proof breaks
+- [ ] Each proof file documents which domain code it models (file path + function/class name)
+
+## Notes
+
+- Proofs model the logic, they don't extract to Python. The Python implementation must match the model — verified by review and testing.
+- Start with the simplest proof (state machine transitions) to validate the toolchain, then tackle audit log and authorization.
+- Use Coq stdlib where possible. Avoid heavy dependencies.
+- Development uses rocq-mcp (MCP server for Coq/Rocq) for interactive proof development. See #006b for setup.
diff --git a/.github/issues/phase-00-foundation/024-architecture-as-code.md b/.github/issues/phase-00-foundation/024-architecture-as-code.md
new file mode 100644
index 00000000..754c5133
--- /dev/null
+++ b/.github/issues/phase-00-foundation/024-architecture-as-code.md
@@ -0,0 +1,46 @@
+---
+title: "Implement architecture as code (C4 model + architecture tests)"
+labels: [foundation, architecture, quality]
+phase: 0
+priority: P1
+---
+
+## Description
+
+Define the system architecture as code using C4 model diagrams (Structurizr DSL or PlantUML) and enforce architectural boundaries with import-linting tests. Architecture should be a living artifact that is verified on every CI build, not just a static document.
+
+## References
+
+- Architecture: All sections (system context, containers, components)
+- Issue #002 (ADR Framework)
+
+## Acceptance Criteria
+
+- [ ] `architecture/c4/` directory with architecture models:
+ - `workspace.dsl` (Structurizr DSL) or PlantUML files:
+ - **System Context:** Ground Control, users, external systems
+ - **Container:** API Server, Web UI, PostgreSQL, Redis, MinIO, Meilisearch, Worker
+ - **Component:** Domain services (Risk, Control, Assessment, etc.)
+ - Generated diagrams (PNG/SVG) committed for quick reference
+- [ ] Architecture tests using `import-linter`:
+ - `backend/.importlinter` configuration enforcing:
+ - `api` layer can import from `domain` and `schemas`, NOT from `infrastructure`
+ - `domain` layer has NO imports from `api` or `infrastructure`
+ - `infrastructure` layer can import from `domain` (implements interfaces)
+ - `schemas` layer has NO imports from `infrastructure`
+ - No circular dependencies between domain services
+ - These rules enforce Clean Architecture / Hexagonal boundaries
+- [ ] CI check: `make arch-test` runs import-linter and fails on violations
+- [ ] `.github/workflows/lint.yml` updated to include architecture check
+- [ ] Architecture diagrams auto-generated on changes (optional GitHub Action)
+
+## Technical Notes
+
+- Clean Architecture layers (outside → inside):
+ ```
+ API (routes, middleware) → Domain (services, models, events) ← Infrastructure (DB, S3, cache)
+ ```
+- `import-linter` is fast and runs as part of the lint step
+- Structurizr DSL can generate diagrams via Structurizr CLI or Lite — consider a GitHub Action
+- The C4 model should match ARCHITECTURE.md but be the source of truth for diagrams
+- Consider `pytest-archon` as an alternative to `import-linter` for more flexible rules
diff --git a/.github/issues/phase-00-foundation/025-policy-as-code.md b/.github/issues/phase-00-foundation/025-policy-as-code.md
new file mode 100644
index 00000000..31e25bfb
--- /dev/null
+++ b/.github/issues/phase-00-foundation/025-policy-as-code.md
@@ -0,0 +1,59 @@
+---
+title: "Implement policy as code framework for authorization"
+labels: [foundation, backend, security, cross-cutting]
+phase: 0
+priority: P1
+---
+
+## Description
+
+Establish a policy-as-code framework where authorization policies are defined as declarative, version-controlled, testable artifacts rather than hard-coded `if/else` logic. Policies govern who can do what on which resources under what conditions.
+
+**Approach:** Use Open Policy Agent (OPA) with Rego policies, or a lightweight Python-native policy engine with YAML-defined rules. The policy engine should support both RBAC and ABAC patterns described in the architecture.
+
+## References
+
+- Architecture: Section 4 (Authentication & Authorization — RBAC + ABAC)
+- Data Model: Section 2.3 (Role & Permission)
+- User Stories: US-7.2 (Manage Users and Roles)
+- Use Cases: UC-07 (Configure SSO and Provision Users)
+
+## Acceptance Criteria
+
+- [ ] `architecture/policies/` directory with policy definitions:
+ - `rbac.rego` (or `rbac.yaml`) — role-based access rules
+ - `abac.rego` (or `abac.yaml`) — attribute-based access rules
+ - `tenant_isolation.rego` — tenant boundary enforcement
+ - `agent_permissions.rego` — agent-specific restrictions
+- [ ] Policy engine module: `backend/src/ground_control/domain/auth/policy_engine.py`:
+ - `evaluate(subject, action, resource, context) → Decision`
+ - Decisions: `allow`, `deny` with reason
+ - Context includes: tenant_id, business_unit, role, resource attributes
+- [ ] Default policies implementing the authorization model:
+ ```
+ # Permission format: resource:action:scope
+ risks:read:* → Risk Manager, Auditor, CISO
+ risks:write:bu=engineering → Risk Manager (scoped to BU)
+ assessments:approve:campaign=* → Audit Manager
+ agents:execute:scope=testing → Agent role
+ audit_logs:read:* → Admin only
+ ```
+- [ ] Policy tests:
+ - Unit tests for each policy (test allow AND deny cases)
+ - `make policy-test` target
+ - Policies tested in CI
+- [ ] Policy change audit trail (policy files are version-controlled)
+- [ ] ADR: `architecture/adrs/011-policy-as-code.md`
+
+## Technical Notes
+
+- **Option A: OPA/Rego** — industry standard, powerful, but adds a service dependency
+ - Can embed via `opa-python-client` or `regorus` (Rust OPA engine with Python bindings)
+ - Rego policies are highly testable (`opa test`)
+- **Option B: Python-native** — simpler, fewer dependencies
+ - Define policies in YAML, evaluate with a custom engine
+ - Less expressive but sufficient for RBAC+ABAC patterns
+ - Easier to integrate with SQLAlchemy queries (generate WHERE clauses)
+- Recommended: Start with Python-native, migrate to OPA if complexity grows
+- Policies must be hot-reloadable (no restart to change policies)
+- The policy engine is used by authorization middleware (#058) and decorators
diff --git a/.github/issues/phase-01-data-model/026-tenant-model.md b/.github/issues/phase-01-data-model/026-tenant-model.md
new file mode 100644
index 00000000..96c0cec6
--- /dev/null
+++ b/.github/issues/phase-01-data-model/026-tenant-model.md
@@ -0,0 +1,37 @@
+---
+title: "Implement tenant model and row-level security"
+labels: [data-model, backend, multi-tenancy, security]
+phase: 1
+priority: P0
+---
+
+## Description
+
+Create the tenant entity model and implement PostgreSQL Row-Level Security (RLS) policies for tenant isolation. This is the foundational isolation mechanism — every data access must be scoped to a tenant.
+
+## References
+
+- Data Model: Section 2.1 (Tenant)
+- Data Model: Section 4.1 (RLS policies)
+- Architecture: Section 8.2 (Multi-Tenancy Models)
+- User Stories: US-7.5 (Manage Taxonomy & Configuration — tenant settings)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy model: `Tenant` with fields per data model spec (id, name, slug, settings, status, timestamps)
+- [ ] Alembic migration creating `tenants` table
+- [ ] RLS policy: `CREATE POLICY tenant_isolation ON
USING (tenant_id = current_setting('app.current_tenant_id')::UUID)`
+- [ ] RLS enablement for all tenant-scoped tables (applied as tables are created)
+- [ ] Session-level tenant setting via `SET app.current_tenant_id = '{uuid}'`
+- [ ] Repository: `TenantRepository` with CRUD operations
+- [ ] Pydantic schemas: `TenantCreate`, `TenantRead`, `TenantUpdate`
+- [ ] Tenant slug validation (lowercase alphanumeric + hyphens, unique)
+- [ ] Contracts: `@icontract.ensure(lambda result: result.slug == result.slug.lower())`
+- [ ] Unit tests including RLS isolation verification (insert as tenant A, query as tenant B → empty)
+
+## Technical Notes
+
+- RLS is enforced at the database level — even raw SQL queries respect tenant boundaries
+- The middleware (#042) sets `app.current_tenant_id` at the start of each request
+- For schema-per-tenant mode, RLS is replaced by schema switching
+- Consider a `system` tenant for global resources (CCL, framework definitions)
diff --git a/.github/issues/phase-01-data-model/027-user-model.md b/.github/issues/phase-01-data-model/027-user-model.md
new file mode 100644
index 00000000..37b5df5a
--- /dev/null
+++ b/.github/issues/phase-01-data-model/027-user-model.md
@@ -0,0 +1,35 @@
+---
+title: "Implement user model and repository"
+labels: [data-model, backend, auth]
+phase: 1
+priority: P0
+---
+
+## Description
+
+Create the user entity model, repository, and schemas. Users are tenant-scoped and support multiple authentication providers (local, SAML, OIDC).
+
+## References
+
+- Data Model: Section 2.2 (User)
+- User Stories: US-7.2 (Manage Users and Roles)
+- PRD: Section 2 (Personas — P1 through P7)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy model: `User` with all fields from data model spec
+- [ ] Alembic migration creating `users` table with indexes
+- [ ] Repository: `UserRepository` with:
+ - `create(user_data) → User`
+ - `get_by_id(user_id) → User | None`
+ - `get_by_email(tenant_id, email) → User | None`
+ - `list(tenant_id, filters, pagination) → list[User], total`
+ - `update(user_id, updates) → User`
+ - `deactivate(user_id) → User` (soft-delete, preserves data)
+- [ ] Pydantic schemas: `UserCreate`, `UserRead`, `UserUpdate`, `UserSummary`
+- [ ] Email validation (format + uniqueness within tenant)
+- [ ] Password field NEVER included in `UserRead` schemas
+- [ ] `external_id` for IdP subject identifier (SAML/OIDC)
+- [ ] Unique constraint: `(tenant_id, email)`
+- [ ] Unit tests for all repository methods
+- [ ] Contracts: user email must be non-empty, status must be valid enum value
diff --git a/.github/issues/phase-01-data-model/028-role-permission-model.md b/.github/issues/phase-01-data-model/028-role-permission-model.md
new file mode 100644
index 00000000..53a783e8
--- /dev/null
+++ b/.github/issues/phase-01-data-model/028-role-permission-model.md
@@ -0,0 +1,42 @@
+---
+title: "Implement role and permission model"
+labels: [data-model, backend, auth, security]
+phase: 1
+priority: P0
+---
+
+## Description
+
+Create the role, permission, and user-role assignment models. Roles define permission sets, users are assigned roles, and permissions follow the `resource:action:scope` format.
+
+## References
+
+- Data Model: Section 2.3 (Role & Permission, user_roles)
+- Architecture: Section 4 (Authorization Model — RBAC + ABAC)
+- User Stories: US-7.2 (Manage Users and Roles)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy models: `Role`, `UserRole`
+- [ ] Alembic migration creating `roles` and `user_roles` tables
+- [ ] System (built-in) roles seeded via migration:
+ - `admin` — full access
+ - `risk_manager` — risks:*, controls:read, assessments:read, reports:*
+ - `auditor` — assessments:*, controls:*, findings:*, artifacts:*
+ - `control_owner` — controls:read, artifacts:write (own controls), evidence_requests:respond
+ - `compliance_analyst` — controls:*, frameworks:*, ccl:*
+ - `viewer` — *:read
+ - `agent` — assessments:read, test_procedures:write, artifacts:write
+- [ ] Permission format: `resource:action:scope` stored as JSONB array
+- [ ] `UserRole` junction table with optional `scope` (JSONB for ABAC: `{"business_unit": "eng"}`)
+- [ ] Repository: `RoleRepository` with CRUD + permission checking
+- [ ] Pydantic schemas: `RoleCreate`, `RoleRead`, `PermissionSet`
+- [ ] `is_system` flag on built-in roles prevents deletion
+- [ ] Unit tests for permission resolution (user → roles → permissions)
+- [ ] Contracts: system roles cannot be deleted, role names unique per tenant
+
+## Technical Notes
+
+- Permissions are evaluated by the policy engine (#025) not hardcoded if/else
+- `scope` on `UserRole` enables ABAC: same role can be scoped to different BUs
+- Consider caching resolved permissions in Redis (invalidate on role change)
diff --git a/.github/issues/phase-01-data-model/029-risk-entity.md b/.github/issues/phase-01-data-model/029-risk-entity.md
new file mode 100644
index 00000000..faf843ae
--- /dev/null
+++ b/.github/issues/phase-01-data-model/029-risk-entity.md
@@ -0,0 +1,46 @@
+---
+title: "Implement risk entity (model, schema, repository)"
+labels: [data-model, backend, risk-management]
+phase: 1
+priority: P0
+---
+
+## Description
+
+Create the risk entity — the centerpiece of the risk management domain. Includes the SQLAlchemy model, Pydantic schemas, and repository with full CRUD and filtering support.
+
+## References
+
+- Data Model: Section 2.4 (Risk)
+- PRD: Section 4.1 (Risk Management)
+- User Stories: US-1.1 (Maintain Risk Register), US-1.2, US-1.3, US-1.4, US-1.5
+- Use Cases: UC-01 (Manage Risk Register), UC-02 (Run Risk Assessment)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy model: `Risk` with all fields from data model (ref_id, title, description, category, owner_id, inherent/residual scores, appetite, business_units, tags, custom_fields, etc.)
+- [ ] Computed columns: `inherent_score = inherent_likelihood * inherent_impact` (STORED)
+- [ ] Alembic migration with indexes (tenant_category, tenant_owner, tenant_status)
+- [ ] Repository: `RiskRepository` with:
+ - `create`, `get_by_id`, `get_by_ref_id`, `list` (with filters), `update`, `archive`
+ - Filtering: category, status, owner_id, score ranges, business_unit, tags
+ - Sorting: any field, default `-inherent_score`
+- [ ] Pydantic schemas:
+ - `RiskCreate` — validates likelihood/impact ranges (1-5 by default, configurable)
+ - `RiskRead` — includes computed scores, relationships
+ - `RiskUpdate` — partial update
+ - `RiskSummary` — lightweight for lists
+ - `RiskFilter` — filter parameters
+- [ ] `ref_id` auto-generation: `RISK-{sequence}` if not provided
+- [ ] Soft delete: `archive()` sets `status='archived'` and `archived_at`
+- [ ] Contracts:
+ - `@icontract.require(lambda likelihood: 1 <= likelihood <= 5)`
+ - `@icontract.require(lambda impact: 1 <= impact <= 5)`
+ - `@icontract.ensure(lambda result: result.inherent_score == result.inherent_likelihood * result.inherent_impact)`
+- [ ] Unit tests for model, schema validation, repository methods
+
+## Technical Notes
+
+- `custom_fields` (JSONB) allows tenant-specific fields without schema changes
+- `business_units` and `tags` are `ARRAY(TEXT)` for flexible categorization
+- Generated columns (`inherent_score`, `residual_score`) are computed by PostgreSQL — read-only in ORM
diff --git a/.github/issues/phase-01-data-model/030-control-entity.md b/.github/issues/phase-01-data-model/030-control-entity.md
new file mode 100644
index 00000000..bb1c9216
--- /dev/null
+++ b/.github/issues/phase-01-data-model/030-control-entity.md
@@ -0,0 +1,29 @@
+---
+title: "Implement control entity (model, schema, repository)"
+labels: [data-model, backend, control-management]
+phase: 1
+priority: P0
+---
+
+## Description
+
+Create the control entity for the control catalog. Controls are reusable definitions linked to the Common Control Library and mapped to framework requirements.
+
+## References
+
+- Data Model: Section 2.5 (Control)
+- PRD: Section 4.2 (Control Management)
+- User Stories: US-2.1 (Maintain Control Catalog), US-2.2, US-2.3
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy model: `Control` with all fields (ref_id, ccl_entry_id, title, objective, description, control_type, control_nature, frequency, owner_id, effectiveness_rating, etc.)
+- [ ] Alembic migration with indexes
+- [ ] Enum validation: control_type ∈ {preventive, detective, corrective}, control_nature ∈ {manual, automated, it_dependent_manual}, frequency ∈ {continuous, daily, weekly, monthly, quarterly, annual, ad_hoc}
+- [ ] Repository: `ControlRepository` with CRUD + filtering by framework, type, nature, owner, effectiveness
+- [ ] Pydantic schemas: `ControlCreate`, `ControlRead`, `ControlUpdate`, `ControlSummary`, `ControlFilter`
+- [ ] `ref_id` pattern: `CTRL-{category_abbrev}-{sequence}` (e.g., `CTRL-AM-001`)
+- [ ] Foreign key to `ccl_entries` (optional — not all controls come from CCL)
+- [ ] Relationship to owner (User)
+- [ ] Unit tests for all repository methods
+- [ ] Contracts: control_type and control_nature must be valid enum values
diff --git a/.github/issues/phase-01-data-model/031-framework-entity.md b/.github/issues/phase-01-data-model/031-framework-entity.md
new file mode 100644
index 00000000..b8ec607b
--- /dev/null
+++ b/.github/issues/phase-01-data-model/031-framework-entity.md
@@ -0,0 +1,29 @@
+---
+title: "Implement framework and requirements entities"
+labels: [data-model, backend, compliance]
+phase: 1
+priority: P0
+---
+
+## Description
+
+Create the framework and framework requirements entities, plus the control-framework mapping junction table. This enables cross-framework compliance mapping.
+
+## References
+
+- Data Model: Section 2.6 (Framework & Requirements, control_framework_mappings)
+- PRD: Section 3 (Frameworks & Standards), Section 4.2 (Control-to-Framework Mapping)
+- User Stories: US-2.2 (Map Controls Across Frameworks)
+- Use Cases: UC-05 (Cross-Framework Control Mapping)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy models: `Framework`, `FrameworkRequirement`, `ControlFrameworkMapping`
+- [ ] Alembic migration with all tables, indexes, and unique constraints
+- [ ] `FrameworkRequirement` supports hierarchy (`parent_id` self-referencing FK)
+- [ ] `ControlFrameworkMapping` with mapping_type, notes, agent suggestion fields
+- [ ] Repositories: `FrameworkRepository`, `FrameworkRequirementRepository`
+- [ ] Pydantic schemas for all three entities (Create, Read, Update)
+- [ ] Framework `tenant_id` is nullable (NULL = system/global framework)
+- [ ] Hierarchical requirement query (tree structure)
+- [ ] Unit tests including hierarchy traversal and many-to-many mapping
diff --git a/.github/issues/phase-01-data-model/032-ccl-entity.md b/.github/issues/phase-01-data-model/032-ccl-entity.md
new file mode 100644
index 00000000..c11bd741
--- /dev/null
+++ b/.github/issues/phase-01-data-model/032-ccl-entity.md
@@ -0,0 +1,27 @@
+---
+title: "Implement Common Control Library (CCL) entity"
+labels: [data-model, backend, compliance]
+phase: 1
+priority: P1
+---
+
+## Description
+
+Create the CCL entity and CCL-to-framework-requirement mapping. The CCL provides reusable, standardized control definitions that map across multiple frameworks.
+
+## References
+
+- Data Model: Section 2.7 (Common Control Library)
+- PRD: Section 6.2 (Common Control Library)
+- User Stories: US-2.3 (Use Common Control Library)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy models: `CCLEntry`, `CCLFrameworkMapping`
+- [ ] Alembic migration
+- [ ] CCL entries are global (not tenant-scoped) — shared across all tenants
+- [ ] `ref_id` pattern: `CC-{category}-{sequence}` (e.g., `CC-AM-001`)
+- [ ] Repository: `CCLRepository` with browse, search, and adopt operations
+- [ ] Pydantic schemas: `CCLEntryRead`, `CCLEntryCreate`, `CCLAdoptRequest`
+- [ ] "Adopt" operation: creates a tenant-scoped Control linked to the CCL entry
+- [ ] Unit tests
diff --git a/.github/issues/phase-01-data-model/033-assessment-entity.md b/.github/issues/phase-01-data-model/033-assessment-entity.md
new file mode 100644
index 00000000..985aa5df
--- /dev/null
+++ b/.github/issues/phase-01-data-model/033-assessment-entity.md
@@ -0,0 +1,29 @@
+---
+title: "Implement assessment campaign entity"
+labels: [data-model, backend, assessment]
+phase: 1
+priority: P0
+---
+
+## Description
+
+Create the assessment campaign entity. Campaigns are time-boxed cycles for evaluating controls (e.g., "Q1 2026 SOX ITGC Testing").
+
+## References
+
+- Data Model: Section 2.8 (Assessment Campaign)
+- PRD: Section 4.3 (Assessment & Testing)
+- User Stories: US-3.1 (Plan Assessment Campaign)
+- Use Cases: UC-03 (Execute Test Procedures)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy model: `AssessmentCampaign` with all fields (name, campaign_type, status, period dates, scope_filter, etc.)
+- [ ] Status enum: planning, active, review, finalized, archived
+- [ ] Alembic migration
+- [ ] Repository: `AssessmentCampaignRepository` with CRUD + status transitions
+- [ ] Pydantic schemas: `CampaignCreate`, `CampaignRead`, `CampaignUpdate`, `CampaignProgress`
+- [ ] `scope_filter` (JSONB) defines which controls/systems are in scope
+- [ ] Status transition validation (e.g., can't go from `planning` to `finalized`)
+- [ ] Contracts: `period_end >= period_start`, valid status transitions
+- [ ] Unit tests
diff --git a/.github/issues/phase-01-data-model/034-test-procedure-entity.md b/.github/issues/phase-01-data-model/034-test-procedure-entity.md
new file mode 100644
index 00000000..d40562a1
--- /dev/null
+++ b/.github/issues/phase-01-data-model/034-test-procedure-entity.md
@@ -0,0 +1,30 @@
+---
+title: "Implement test procedure and test steps entities"
+labels: [data-model, backend, assessment]
+phase: 1
+priority: P0
+---
+
+## Description
+
+Create the test procedure and test step entities. Test procedures belong to assessment campaigns and link to controls. Steps are the ordered test actions within a procedure.
+
+## References
+
+- Data Model: Section 2.9 (Test Procedure & Steps)
+- PRD: Section 4.3 (Test Procedures, Workpapers, Sampling)
+- User Stories: US-3.2 (Execute Test Procedures), US-3.5 (Agent-Performed Testing)
+- Use Cases: UC-03 (Execute Test Procedures), UC-06 (Agent-Performed Testing)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy models: `TestProcedure`, `TestStep`
+- [ ] TestProcedure: campaign_id, control_id, status, assigned_to, reviewer_id, conclusion, agent fields, sampling fields
+- [ ] TestStep: procedure_id, step_number, instruction, expected_result, actual_result, conclusion, notes
+- [ ] Alembic migration with unique constraint on (procedure_id, step_number)
+- [ ] Repositories for both entities
+- [ ] Pydantic schemas including agent provenance fields
+- [ ] Status transitions: not_started → in_progress → completed → review → approved
+- [ ] Step conclusion enum: pass, fail, na
+- [ ] Contracts: step_number must be positive, valid status transitions
+- [ ] Unit tests
diff --git a/.github/issues/phase-01-data-model/035-artifact-entity.md b/.github/issues/phase-01-data-model/035-artifact-entity.md
new file mode 100644
index 00000000..99f873f6
--- /dev/null
+++ b/.github/issues/phase-01-data-model/035-artifact-entity.md
@@ -0,0 +1,33 @@
+---
+title: "Implement artifact (evidence) entity and storage abstraction"
+labels: [data-model, backend, evidence]
+phase: 1
+priority: P0
+---
+
+## Description
+
+Create the artifact entity, artifact linking (polymorphic many-to-many), and the storage abstraction layer for S3-compatible object storage.
+
+## References
+
+- Data Model: Section 2.10 (Artifact), artifact_links
+- PRD: Section 4.4 (Evidence & Artifact Management)
+- User Stories: US-4.1 (Upload and Manage Artifacts), US-4.2 (Link Evidence)
+- Architecture: Section 3.7 (Object Store — S3/MinIO)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy models: `Artifact`, `ArtifactLink`
+- [ ] Artifact: filename, content_type, size_bytes, storage_key, sha256_hash, version, encryption fields, retention
+- [ ] ArtifactLink: polymorphic link (artifact_id, entity_type, entity_id, context_note)
+- [ ] Alembic migration with indexes on hash and entity links
+- [ ] Storage abstraction: `backend/src/ground_control/infrastructure/storage/`:
+ - `interface.py` — `ObjectStorage` protocol (upload, download, delete, presigned_url)
+ - `s3.py` — S3/MinIO implementation using `boto3`
+ - `local.py` — local filesystem implementation (development fallback)
+- [ ] Repository: `ArtifactRepository` with CRUD, versioning, linking
+- [ ] Pydantic schemas: `ArtifactCreate`, `ArtifactRead`, `ArtifactLinkCreate`
+- [ ] SHA-256 hash verification on upload
+- [ ] Version tracking: new upload creates version, previous retained
+- [ ] Unit tests with mocked storage
diff --git a/.github/issues/phase-01-data-model/036-finding-entity.md b/.github/issues/phase-01-data-model/036-finding-entity.md
new file mode 100644
index 00000000..1b2372ad
--- /dev/null
+++ b/.github/issues/phase-01-data-model/036-finding-entity.md
@@ -0,0 +1,28 @@
+---
+title: "Implement finding entity"
+labels: [data-model, backend, findings]
+phase: 1
+priority: P0
+---
+
+## Description
+
+Create the finding entity for documenting control deficiencies identified during assessments.
+
+## References
+
+- Data Model: Section 2.12 (Finding)
+- PRD: Section 4.5 (Findings & Issues)
+- User Stories: US-5.1 (Record Findings)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy model: `Finding` with all fields (ref_id, campaign_id, control_id, procedure_id, title, description, root_cause, risk_rating, classification, status, owner_id, due_date, agent_produced)
+- [ ] Status enum: draft, open, remediation_in_progress, validation, closed
+- [ ] Classification enum: deficiency, significant_deficiency, material_weakness
+- [ ] Alembic migration
+- [ ] Repository: `FindingRepository` with CRUD, status transitions, filtering
+- [ ] Pydantic schemas: `FindingCreate`, `FindingRead`, `FindingUpdate`, `FindingFilter`
+- [ ] `ref_id` auto-generation: `FIND-{sequence}`
+- [ ] Status transition contracts (e.g., can't close without going through validation)
+- [ ] Unit tests
diff --git a/.github/issues/phase-01-data-model/037-remediation-entity.md b/.github/issues/phase-01-data-model/037-remediation-entity.md
new file mode 100644
index 00000000..6f59b8e5
--- /dev/null
+++ b/.github/issues/phase-01-data-model/037-remediation-entity.md
@@ -0,0 +1,26 @@
+---
+title: "Implement remediation plan and actions entities"
+labels: [data-model, backend, findings]
+phase: 1
+priority: P1
+---
+
+## Description
+
+Create remediation plan and action entities for tracking finding resolution.
+
+## References
+
+- Data Model: Section 2.13 (Remediation Plan, Remediation Actions)
+- User Stories: US-5.2 (Manage Remediation), US-5.3 (Validate Remediation)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy models: `RemediationPlan`, `RemediationAction`
+- [ ] Plan: finding_id, description, owner_id, target_date, status
+- [ ] Action: plan_id, description, owner_id, due_date, status, sort_order
+- [ ] Alembic migration
+- [ ] Repository with CRUD and status management
+- [ ] Pydantic schemas
+- [ ] Status tracking: planned → in_progress → completed → validated
+- [ ] Unit tests
diff --git a/.github/issues/phase-01-data-model/038-taxonomy-entity.md b/.github/issues/phase-01-data-model/038-taxonomy-entity.md
new file mode 100644
index 00000000..7e9d9299
--- /dev/null
+++ b/.github/issues/phase-01-data-model/038-taxonomy-entity.md
@@ -0,0 +1,32 @@
+---
+title: "Implement taxonomy configuration entity"
+labels: [data-model, backend, configuration]
+phase: 1
+priority: P1
+---
+
+## Description
+
+Create the taxonomy configuration entity that stores configurable enumerations (risk categories, control types, likelihood/impact scales, rating scales, etc.) per tenant.
+
+## References
+
+- Data Model: Section 2.17 (Taxonomy Configuration)
+- PRD: Section 6.1 (Shared Taxonomy)
+- User Stories: US-7.5 (Manage Taxonomy & Configuration)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy model: `TaxonomyCategory` with fields (taxonomy_type, value, label, description, color, sort_order, is_active)
+- [ ] Alembic migration with unique constraint on (tenant_id, taxonomy_type, value)
+- [ ] Seed migration with default taxonomy values:
+ - Risk categories: Access Management, Change Management, Operations, Data Protection, Third Party, Business Continuity
+ - Control types: Preventive, Detective, Corrective
+ - Control nature: Manual, Automated, IT-Dependent Manual
+ - Likelihood: 1-5 scale with labels (Rare, Unlikely, Possible, Likely, Almost Certain)
+ - Impact: 1-5 scale with labels (Negligible, Minor, Moderate, Major, Severe)
+ - Effectiveness: Effective, Needs Improvement, Ineffective
+- [ ] Repository: `TaxonomyRepository` with CRUD, ordering, activation/deactivation
+- [ ] Pydantic schemas with validation against active taxonomy values
+- [ ] Cache-friendly: taxonomy rarely changes, should be cached (#019 Redis config)
+- [ ] Unit tests
diff --git a/.github/issues/phase-01-data-model/039-audit-log-entity.md b/.github/issues/phase-01-data-model/039-audit-log-entity.md
new file mode 100644
index 00000000..ed44d170
--- /dev/null
+++ b/.github/issues/phase-01-data-model/039-audit-log-entity.md
@@ -0,0 +1,47 @@
+---
+title: "Implement audit log entity with append-only enforcement"
+labels: [data-model, backend, security, audit]
+phase: 1
+priority: P0
+---
+
+## Description
+
+Create the immutable audit log entity with hash chaining for tamper detection. Every state change in the system must be recorded. The audit log table must enforce append-only semantics — no UPDATE or DELETE allowed.
+
+## References
+
+- Data Model: Section 2.15 (Audit Log)
+- Architecture: Section 6.3 (Audit Log Architecture)
+- User Stories: US-7.4 (View Audit Logs)
+- PRD: Section 7 (Non-Functional — Immutable audit log)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy model: `AuditLogEntry` with all fields (tenant_id, timestamp, actor_id, actor_type, action, resource_type, resource_id, changes, ip_address, user_agent, previous_hash, entry_hash)
+- [ ] Alembic migration with:
+ - Indexes on (tenant_id, timestamp), (resource_type, resource_id), (actor_id, timestamp)
+ - **Trigger or rule to PREVENT UPDATE and DELETE** on audit_log table
+ - `CREATE RULE no_update AS ON UPDATE TO audit_log DO INSTEAD NOTHING;`
+ - `CREATE RULE no_delete AS ON DELETE TO audit_log DO INSTEAD NOTHING;`
+- [ ] Hash chaining implementation:
+ - Each entry's `entry_hash` = SHA-256(entry_data + previous_hash)
+ - Enables tamper detection by verifying the chain
+- [ ] Audit logging service: `AuditLogger`:
+ - `log(actor, action, resource_type, resource_id, changes, request_context)`
+ - Automatically captures IP, user_agent, tenant_id from request context
+ - Computes hash chain
+- [ ] Actor types: `user`, `agent`, `system`
+- [ ] Action types: `create`, `update`, `delete`, `login`, `logout`, `approve`, `reject`, `archive`
+- [ ] `changes` field captures old/new values: `{"field": {"old": "x", "new": "y"}}`
+- [ ] Read-only repository for querying logs (no mutation methods)
+- [ ] Contracts: `@icontract.ensure(lambda result: result.entry_hash is not None)`
+- [ ] Unit tests including tamper detection verification
+
+## Technical Notes
+
+- The audit log table should never have RLS disabled — always tenant-scoped reads
+- Consider partitioning by month for performance (large tables)
+- Hash chain verification: `verify_chain(start_id, end_id) → bool`
+- For SIEM forwarding, add a background job that publishes new entries via webhook/syslog
+- The `changes` field should use a diff helper that compares old and new model instances
diff --git a/.github/issues/phase-01-data-model/040-comment-notification-entities.md b/.github/issues/phase-01-data-model/040-comment-notification-entities.md
new file mode 100644
index 00000000..482b77e0
--- /dev/null
+++ b/.github/issues/phase-01-data-model/040-comment-notification-entities.md
@@ -0,0 +1,34 @@
+---
+title: "Implement comment and notification entities"
+labels: [data-model, backend, collaboration]
+phase: 1
+priority: P1
+---
+
+## Description
+
+Create the comment (threaded, polymorphic) and notification entities for platform collaboration features.
+
+## References
+
+- Data Model: Section 2.19 (Notification & Comment)
+- PRD: Section 4.7 (Workflow & Collaboration)
+- User Stories: US-3.4 (Review and Approve Workpapers — review notes)
+
+## Acceptance Criteria
+
+- [ ] SQLAlchemy models: `Comment`, `Notification`
+- [ ] Comment: polymorphic (entity_type + entity_id), threaded (parent_id), tenant-scoped
+- [ ] Notification: user-targeted, typed, entity-linked, read status
+- [ ] Notification types enum: assignment, deadline, review_request, evidence_request, mention, system
+- [ ] Alembic migration with indexes on (entity_type, entity_id) and (user_id, is_read)
+- [ ] Repositories for both entities
+- [ ] Pydantic schemas
+- [ ] Comment supports @-mention parsing (extract mentioned user IDs from body)
+- [ ] Unit tests
+
+## Technical Notes
+
+- Comments and notifications are high-volume — indexes are critical for query performance
+- @-mention format: `@[user_id]` in comment body, parsed to create notifications
+- Notifications should be insertable in bulk (e.g., notify all reviewers)
diff --git a/.github/issues/phase-02-api-foundation/041-fastapi-scaffold.md b/.github/issues/phase-02-api-foundation/041-fastapi-scaffold.md
new file mode 100644
index 00000000..4dd010b0
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/041-fastapi-scaffold.md
@@ -0,0 +1,41 @@
+---
+title: "Scaffold FastAPI application with middleware stack"
+labels: [api, backend, foundation]
+phase: 2
+priority: P0
+---
+
+## Description
+
+Create the FastAPI application entry point with the full middleware stack, lifespan management, and dependency injection setup. This is the HTTP server scaffold that all API endpoints will be mounted on.
+
+## References
+
+- Architecture: Section 3.1 (API Gateway), Section 3.2 (REST API Server)
+- API Spec: Section 1 (API Design Principles)
+- Issue #017 (Exception Hierarchy), #018 (Logging), #019 (Config)
+
+## Acceptance Criteria
+
+- [ ] `backend/src/ground_control/main.py` — FastAPI app factory:
+ - `create_app() → FastAPI` function (testable, configurable)
+ - Lifespan handler: startup (DB pool, Redis, search), shutdown (cleanup)
+ - Middleware stack (ordered):
+ 1. CORS middleware (configurable origins from settings)
+ 2. Request ID middleware (generates/extracts `X-Request-ID`)
+ 3. Tenant context middleware (extracts `X-Tenant-ID`)
+ 4. Authentication middleware (validates JWT/API key)
+ 5. Request logging middleware (log request/response)
+ 6. Rate limiting middleware
+ - Global exception handlers (#017)
+ - OpenAPI configuration (title, version, description, servers)
+- [ ] `backend/src/ground_control/dependencies.py` — FastAPI dependencies:
+ - `get_db` — async database session
+ - `get_current_user` — extract authenticated user from token
+ - `get_current_tenant` — extract tenant context
+ - `get_settings` — application settings
+- [ ] Health endpoints: `/health`, `/health/ready`, `/health/live`
+- [ ] OpenAPI spec served at `/api/v1/openapi.json`
+- [ ] Swagger UI at `/api/v1/docs` (disabled in production)
+- [ ] `uvicorn` configuration for development and production
+- [ ] Integration test: app starts, health check returns 200
diff --git a/.github/issues/phase-02-api-foundation/042-request-middleware.md b/.github/issues/phase-02-api-foundation/042-request-middleware.md
new file mode 100644
index 00000000..53bcb934
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/042-request-middleware.md
@@ -0,0 +1,35 @@
+---
+title: "Implement request ID, correlation, and tenant context middleware"
+labels: [api, backend, cross-cutting, observability]
+phase: 2
+priority: P0
+---
+
+## Description
+
+Implement middleware components that establish request context (request ID, tenant, correlation ID) for every inbound request. This context flows through logging, audit trails, and error responses.
+
+## References
+
+- Architecture: Section 3.1 (API Gateway — request routing)
+- Issue #018 (Structured Logging — context variables)
+- API Spec: Section 2.2 (X-Tenant-ID header)
+
+## Acceptance Criteria
+
+- [ ] **Request ID middleware:**
+ - Extracts `X-Request-ID` header or generates UUID
+ - Sets in `contextvars` for logging
+ - Returns `X-Request-ID` in response headers
+- [ ] **Tenant context middleware:**
+ - Extracts `X-Tenant-ID` from header (required for multi-tenant, optional for single-tenant)
+ - Validates tenant exists and is active
+ - Sets PostgreSQL session variable: `SET app.current_tenant_id = '{uuid}'`
+ - Sets in `contextvars` for logging and downstream access
+ - Returns 400 if tenant ID missing (multi-tenant mode)
+ - Returns 404 if tenant not found or suspended
+- [ ] **Correlation ID middleware:**
+ - Extracts or generates `X-Correlation-ID` for cross-service tracing
+ - Propagated to outbound HTTP calls
+- [ ] All context variables accessible via `get_request_context()` helper
+- [ ] Unit tests for each middleware (valid, missing, invalid inputs)
diff --git a/.github/issues/phase-02-api-foundation/043-api-versioning.md b/.github/issues/phase-02-api-foundation/043-api-versioning.md
new file mode 100644
index 00000000..295cd7c1
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/043-api-versioning.md
@@ -0,0 +1,45 @@
+---
+title: "Set up API versioning and router structure"
+labels: [api, backend, architecture]
+phase: 2
+priority: P0
+---
+
+## Description
+
+Establish the API versioning scheme and router organization. All endpoints live under `/api/v1/` with a clean, modular router structure.
+
+## References
+
+- API Spec: Section 4 (Core API Endpoints — all resource paths)
+- API Spec: Section 10 (API Versioning & Deprecation)
+
+## Acceptance Criteria
+
+- [ ] Router hierarchy:
+ ```
+ /api/v1/
+ ├── /auth/ → auth_router
+ ├── /risks/ → risk_router
+ ├── /controls/ → control_router
+ ├── /frameworks/ → framework_router
+ ├── /assessments/ → assessment_router
+ ├── /test-procedures/ → test_procedure_router
+ ├── /artifacts/ → artifact_router
+ ├── /evidence-requests/ → evidence_request_router
+ ├── /findings/ → finding_router
+ ├── /agents/ → agent_router
+ ├── /ccl/ → ccl_router
+ ├── /taxonomy/ → taxonomy_router
+ ├── /reports/ → report_router
+ ├── /audit-logs/ → audit_log_router
+ ├── /search/ → search_router
+ ├── /webhooks/ → webhook_router
+ └── /users/ → user_router
+ ```
+- [ ] Each router in its own module: `backend/src/ground_control/api/v1/{resource}.py`
+- [ ] Version prefix applied at mount time, not repeated in each route
+- [ ] Deprecation middleware: `Sunset` header on deprecated endpoints
+- [ ] OpenAPI tags match router structure for organized Swagger docs
+- [ ] `__init__.py` in api/v1 that assembles all routers
+- [ ] Placeholder routers (empty, returning 501) for endpoints not yet implemented
diff --git a/.github/issues/phase-02-api-foundation/044-risk-api.md b/.github/issues/phase-02-api-foundation/044-risk-api.md
new file mode 100644
index 00000000..da0250a4
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/044-risk-api.md
@@ -0,0 +1,39 @@
+---
+title: "Implement risk API endpoints"
+labels: [api, backend, risk-management]
+phase: 2
+priority: P0
+---
+
+## Description
+
+Implement the full REST API for risk management, including CRUD, filtering, pagination, and sub-resource endpoints.
+
+## References
+
+- API Spec: Section 4.1 (Risks — all endpoints)
+- User Stories: US-1.1 (Maintain Risk Register)
+- Use Cases: UC-01 (Manage Risk Register)
+- Issue #029 (Risk Entity)
+
+## Acceptance Criteria
+
+- [ ] Endpoints per API spec:
+ - `GET /api/v1/risks` — list with filtering, pagination, sorting, field selection, includes
+ - `POST /api/v1/risks` — create (validates against taxonomy)
+ - `GET /api/v1/risks/{id}` — get by ID with optional includes
+ - `PUT /api/v1/risks/{id}` — full replace
+ - `PATCH /api/v1/risks/{id}` — partial update
+ - `DELETE /api/v1/risks/{id}` — archive (soft delete)
+ - `GET /api/v1/risks/{id}/treatments` — list treatment plans
+ - `POST /api/v1/risks/{id}/treatments` — create treatment plan
+ - `GET /api/v1/risks/{id}/controls` — linked controls
+ - `GET /api/v1/risks/{id}/artifacts` — linked evidence
+ - `GET /api/v1/risks/{id}/audit-log` — audit history
+- [ ] Risk domain service: `RiskService` orchestrates repository + audit logging + events
+- [ ] All mutations logged to audit trail
+- [ ] Response format matches envelope spec (#020)
+- [ ] Filter parameters per API spec (category, status, score ranges, owner, etc.)
+- [ ] Authorization checked per endpoint (uses dependencies from #041)
+- [ ] Integration tests with test database
+- [ ] Input validation produces 422 with field-level errors
diff --git a/.github/issues/phase-02-api-foundation/045-control-api.md b/.github/issues/phase-02-api-foundation/045-control-api.md
new file mode 100644
index 00000000..dd224310
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/045-control-api.md
@@ -0,0 +1,30 @@
+---
+title: "Implement control API endpoints"
+labels: [api, backend, control-management]
+phase: 2
+priority: P0
+---
+
+## Description
+
+Implement the REST API for control management, including framework mapping sub-endpoints and AI suggestion endpoint.
+
+## References
+
+- API Spec: Section 4.2 (Controls — all endpoints)
+- User Stories: US-2.1, US-2.2 (Maintain Control Catalog, Map Controls)
+- Issue #030 (Control Entity), #031 (Framework Entity)
+
+## Acceptance Criteria
+
+- [ ] Endpoints per API spec:
+ - CRUD: `GET/POST/PUT/PATCH/DELETE /api/v1/controls[/{id}]`
+ - `GET /api/v1/controls/{id}/mappings` — framework mappings
+ - `POST /api/v1/controls/{id}/mappings` — add mapping
+ - `DELETE /api/v1/controls/{id}/mappings/{mapping_id}` — remove
+ - `POST /api/v1/controls/{id}/suggest-mappings` — AI suggestion (stub, implemented in Phase 7)
+ - `GET /api/v1/controls/{id}/test-history` — historical test results
+- [ ] Control domain service: `ControlService`
+- [ ] All mutations audit-logged
+- [ ] Response envelope format
+- [ ] Integration tests
diff --git a/.github/issues/phase-02-api-foundation/046-assessment-api.md b/.github/issues/phase-02-api-foundation/046-assessment-api.md
new file mode 100644
index 00000000..ff14bc01
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/046-assessment-api.md
@@ -0,0 +1,27 @@
+---
+title: "Implement assessment and test procedure API endpoints"
+labels: [api, backend, assessment]
+phase: 2
+priority: P0
+---
+
+## Description
+
+Implement the REST API for assessment campaigns, test procedures, and test steps.
+
+## References
+
+- API Spec: Section 4.4 (Assessment Campaigns), Section 4.5 (Test Procedures)
+- User Stories: US-3.1, US-3.2, US-3.4 (Plan Campaign, Execute Tests, Review Workpapers)
+- Use Cases: UC-03 (Execute Test Procedures)
+
+## Acceptance Criteria
+
+- [ ] Assessment campaign endpoints: CRUD, finalize, progress, test-procedures listing
+- [ ] Test procedure endpoints: get, update, submit results, steps CRUD, submit-for-review, approve, reject
+- [ ] Campaign finalization locks all child test procedures
+- [ ] Progress endpoint returns completion percentage, overdue counts
+- [ ] Result submission accepts both human and agent payloads
+- [ ] Domain services: `AssessmentService`, `TestProcedureService`
+- [ ] All mutations audit-logged
+- [ ] Integration tests
diff --git a/.github/issues/phase-02-api-foundation/047-artifact-api.md b/.github/issues/phase-02-api-foundation/047-artifact-api.md
new file mode 100644
index 00000000..a5ef7402
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/047-artifact-api.md
@@ -0,0 +1,31 @@
+---
+title: "Implement artifact/evidence API endpoints"
+labels: [api, backend, evidence]
+phase: 2
+priority: P0
+---
+
+## Description
+
+Implement the REST API for artifact management including upload (pre-signed URL flow), download, versioning, and linking.
+
+## References
+
+- API Spec: Section 4.6 (Artifacts — all endpoints, upload flow)
+- User Stories: US-4.1 (Upload Artifacts), US-4.2 (Link Evidence), US-4.4 (Evidence Lineage)
+
+## Acceptance Criteria
+
+- [ ] Upload flow:
+ - `POST /api/v1/artifacts/upload-url` → returns pre-signed S3 URL
+ - Client uploads directly to S3
+ - `POST /api/v1/artifacts` — registers artifact, verifies hash
+- [ ] CRUD endpoints per API spec
+- [ ] Versioning: `GET /{id}/versions`, `POST /{id}/versions`
+- [ ] Linking: `GET /{id}/links`, `POST /{id}/links`, `DELETE /{id}/links/{link_id}`
+- [ ] Lineage: `GET /{id}/lineage` — full chain of custody timeline
+- [ ] Download: `GET /{id}/download` → redirect to pre-signed URL
+- [ ] File size validation (configurable max, default 500MB)
+- [ ] SHA-256 hash verification on registration
+- [ ] Domain service: `EvidenceService`
+- [ ] Integration tests with mocked S3
diff --git a/.github/issues/phase-02-api-foundation/048-finding-api.md b/.github/issues/phase-02-api-foundation/048-finding-api.md
new file mode 100644
index 00000000..a42b88a9
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/048-finding-api.md
@@ -0,0 +1,24 @@
+---
+title: "Implement finding and remediation API endpoints"
+labels: [api, backend, findings]
+phase: 2
+priority: P0
+---
+
+## Description
+
+Implement the REST API for findings, remediation plans, and remediation validation.
+
+## References
+
+- API Spec: Section 4.8 (Findings — all endpoints)
+- User Stories: US-5.1, US-5.2, US-5.3 (Record Findings, Manage Remediation, Validate)
+
+## Acceptance Criteria
+
+- [ ] Finding endpoints: CRUD, remediation sub-resources, validate, close
+- [ ] Remediation plan: create, update, list actions
+- [ ] Finding closure requires validation step (enforced by state machine)
+- [ ] Domain service: `FindingService`
+- [ ] All mutations audit-logged
+- [ ] Integration tests
diff --git a/.github/issues/phase-02-api-foundation/049-taxonomy-audit-api.md b/.github/issues/phase-02-api-foundation/049-taxonomy-audit-api.md
new file mode 100644
index 00000000..46cb593b
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/049-taxonomy-audit-api.md
@@ -0,0 +1,24 @@
+---
+title: "Implement taxonomy, audit log, and search API endpoints"
+labels: [api, backend, configuration, audit]
+phase: 2
+priority: P1
+---
+
+## Description
+
+Implement the REST API for taxonomy configuration, audit log querying, and full-text search.
+
+## References
+
+- API Spec: Section 4.11 (Taxonomy), 4.13 (Audit Logs), 4.14 (Search)
+- User Stories: US-7.4 (View Audit Logs), US-7.5 (Manage Taxonomy)
+
+## Acceptance Criteria
+
+- [ ] Taxonomy: list types, list values, add value, update value
+- [ ] Audit logs: filterable query (date range, actor, action, entity), export (CSV/JSON)
+- [ ] Audit log queries are read-only (no mutation endpoints)
+- [ ] Search: `GET /api/v1/search?q=...&type=...` — full-text across entities
+- [ ] Search delegates to Meilisearch (stub if not yet integrated)
+- [ ] Integration tests
diff --git a/.github/issues/phase-02-api-foundation/050-evidence-request-api.md b/.github/issues/phase-02-api-foundation/050-evidence-request-api.md
new file mode 100644
index 00000000..409a8e48
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/050-evidence-request-api.md
@@ -0,0 +1,25 @@
+---
+title: "Implement evidence request API endpoints"
+labels: [api, backend, evidence]
+phase: 2
+priority: P1
+---
+
+## Description
+
+Implement the REST API for evidence requests — the workflow where auditors request evidence from control owners.
+
+## References
+
+- API Spec: Section 4.7 (Evidence Requests)
+- User Stories: US-3.3 (Collect Evidence via Requests)
+- Use Cases: UC-04 (Collect Evidence)
+
+## Acceptance Criteria
+
+- [ ] Endpoints: list, create, update, submit, accept, reject
+- [ ] Submit endpoint allows control owner to upload evidence and link it
+- [ ] Accept/reject by auditor with comments
+- [ ] Overdue detection (status auto-updated based on due_date)
+- [ ] Domain service: `EvidenceRequestService`
+- [ ] Integration tests
diff --git a/.github/issues/phase-02-api-foundation/051-local-auth.md b/.github/issues/phase-02-api-foundation/051-local-auth.md
new file mode 100644
index 00000000..3721122b
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/051-local-auth.md
@@ -0,0 +1,31 @@
+---
+title: "Implement local authentication (Argon2id + login flow)"
+labels: [api, backend, auth, security]
+phase: 2
+priority: P0
+---
+
+## Description
+
+Implement local (username/password) authentication with Argon2id password hashing, login/logout endpoints, and password management.
+
+## References
+
+- Architecture: Section 4 (Authentication — local path)
+- Architecture: Section 6.2 (Data Protection — Argon2id hashing)
+- API Spec: Section 2 (Authentication — token endpoint)
+- User Stories: US-7.1 (Configure SSO — local as fallback)
+
+## Acceptance Criteria
+
+- [ ] Password hashing using Argon2id (via `passlib`)
+- [ ] Login endpoint: `POST /api/v1/auth/login` — email + password → JWT tokens
+- [ ] Registration endpoint: `POST /api/v1/auth/register` (admin-only or first-user bootstrap)
+- [ ] Password change: `POST /api/v1/auth/change-password`
+- [ ] Password reset flow: request → email token → reset
+- [ ] Password validation: minimum 12 chars, complexity configurable
+- [ ] Account lockout after N failed attempts (configurable, default 5)
+- [ ] Login events audit-logged (success and failure)
+- [ ] Rate limiting on auth endpoints (stricter than general API)
+- [ ] Unit tests for hashing, login flow, lockout
+- [ ] Contracts: password never stored in plaintext, never returned in API responses
diff --git a/.github/issues/phase-02-api-foundation/052-jwt-management.md b/.github/issues/phase-02-api-foundation/052-jwt-management.md
new file mode 100644
index 00000000..f0f7fdfc
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/052-jwt-management.md
@@ -0,0 +1,37 @@
+---
+title: "Implement JWT token management (access + refresh)"
+labels: [api, backend, auth, security]
+phase: 2
+priority: P0
+---
+
+## Description
+
+Implement JWT access and refresh token issuance, validation, and rotation. All authenticated API access requires a valid JWT.
+
+## References
+
+- Architecture: Section 4 (Authentication — JWT Access Token)
+- API Spec: Section 2 (Authentication — token usage, bearer header)
+
+## Acceptance Criteria
+
+- [ ] Token service: `TokenService`:
+ - `create_access_token(user_id, tenant_id, roles, scopes) → str`
+ - `create_refresh_token(user_id) → str`
+ - `validate_token(token) → TokenPayload`
+ - `refresh(refresh_token) → (access_token, refresh_token)` — rotation
+ - `revoke(token)` — adds to denylist
+- [ ] Access token payload: `sub` (user_id), `tenant_id`, `roles`, `scopes`, `exp`, `iat`, `jti`
+- [ ] Access token lifetime: configurable (default 60 min)
+- [ ] Refresh token lifetime: configurable (default 30 days)
+- [ ] Refresh token rotation: old refresh token invalidated on use
+- [ ] Token denylist in Redis (for revocation, with TTL = token max lifetime)
+- [ ] `get_current_user` dependency extracts and validates JWT from `Authorization: Bearer` header
+- [ ] Unit tests for token creation, validation, expiry, revocation, rotation
+
+## Technical Notes
+
+- Use `python-jose[cryptography]` for JWT encoding/decoding
+- Support both HS256 (shared secret) and RS256 (asymmetric) — configurable
+- Token denylist is checked on every request — must be fast (Redis GET)
diff --git a/.github/issues/phase-02-api-foundation/053-oidc-auth.md b/.github/issues/phase-02-api-foundation/053-oidc-auth.md
new file mode 100644
index 00000000..14670a06
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/053-oidc-auth.md
@@ -0,0 +1,32 @@
+---
+title: "Implement OIDC authentication integration"
+labels: [api, backend, auth, sso]
+phase: 2
+priority: P0
+---
+
+## Description
+
+Implement OpenID Connect (OIDC) authentication using Authorization Code flow with PKCE. Supports corporate IdPs (Okta, Azure AD/Entra ID, Google).
+
+## References
+
+- Architecture: Section 4 (Authentication — OIDC)
+- Deployment: Section 5.2 (OIDC Configuration)
+- PRD: Section 8.1 (Authentication — OIDC)
+- User Stories: US-7.1 (Configure SSO)
+- Use Cases: UC-07 (Configure SSO)
+
+## Acceptance Criteria
+
+- [ ] OIDC endpoints:
+ - `GET /api/v1/auth/oidc/authorize` — initiates OIDC flow (redirect to IdP)
+ - `GET /api/v1/auth/oidc/callback` — handles IdP callback, exchanges code for tokens
+- [ ] OIDC configuration stored per tenant (issuer, client_id, client_secret, scopes)
+- [ ] ID token validation: issuer, audience, expiry, nonce, signature
+- [ ] User provisioning: JIT (Just-In-Time) — create user on first login if not exists
+- [ ] Claim mapping: email, display_name, groups → roles
+- [ ] PKCE support for public clients (SPA)
+- [ ] OIDC discovery: auto-fetch `.well-known/openid-configuration`
+- [ ] Admin endpoint to configure OIDC settings
+- [ ] Integration tests with mocked OIDC provider
diff --git a/.github/issues/phase-02-api-foundation/054-api-key-auth.md b/.github/issues/phase-02-api-foundation/054-api-key-auth.md
new file mode 100644
index 00000000..a76fe6e6
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/054-api-key-auth.md
@@ -0,0 +1,29 @@
+---
+title: "Implement API key authentication"
+labels: [api, backend, auth]
+phase: 2
+priority: P1
+---
+
+## Description
+
+Implement API key-based authentication for service accounts and simple integrations. API keys are long-lived and provide an alternative to OAuth2 for scripts and CI tools.
+
+## References
+
+- API Spec: Section 2.3 (API Keys)
+- PRD: Section 8.1 (Authentication — API Keys)
+
+## Acceptance Criteria
+
+- [ ] API key format: `gc_live_` prefix + 32 random bytes (base64)
+- [ ] Key storage: Argon2id hash in database (never store plaintext)
+- [ ] `Authorization: ApiKey ` header support
+- [ ] Key management endpoints (admin):
+ - `POST /api/v1/auth/api-keys` — create (returns key once, hash stored)
+ - `GET /api/v1/auth/api-keys` — list (metadata only, no key values)
+ - `DELETE /api/v1/auth/api-keys/{id}` — revoke
+- [ ] Keys scoped to a tenant and a set of permissions
+- [ ] Key usage logged in audit trail
+- [ ] Rate limiting per key
+- [ ] Unit tests
diff --git a/.github/issues/phase-02-api-foundation/055-openapi-validation.md b/.github/issues/phase-02-api-foundation/055-openapi-validation.md
new file mode 100644
index 00000000..affaf734
--- /dev/null
+++ b/.github/issues/phase-02-api-foundation/055-openapi-validation.md
@@ -0,0 +1,26 @@
+---
+title: "Set up OpenAPI specification validation and documentation"
+labels: [api, backend, documentation, quality]
+phase: 2
+priority: P1
+---
+
+## Description
+
+Ensure the auto-generated OpenAPI 3.1 specification is complete, accurate, and validated in CI. The spec is the contract between backend and frontend/SDK development.
+
+## References
+
+- API Spec: Section 1 (Discoverable — OpenAPI 3.1 at /api/v1/openapi.json)
+- Architecture: Section 3.2 (OpenAPI 3.1 specification)
+
+## Acceptance Criteria
+
+- [ ] OpenAPI spec auto-generated by FastAPI at `/api/v1/openapi.json`
+- [ ] Spec includes: all endpoints, request/response schemas, auth schemes, error codes
+- [ ] CI validation: `spectral lint` validates the generated spec against OpenAPI standards
+- [ ] Spec snapshot test: generated spec compared to committed version (detect unintended changes)
+- [ ] API documentation generated from spec (Redoc or Scalar at `/api/v1/docs`)
+- [ ] `make openapi-export` — exports spec to `docs/api/openapi.json`
+- [ ] Response examples included in schemas for documentation
+- [ ] Integration test: every endpoint returns responses matching its declared schema
diff --git a/.github/issues/phase-03-auth-identity/056-rbac-engine.md b/.github/issues/phase-03-auth-identity/056-rbac-engine.md
new file mode 100644
index 00000000..bdbcca52
--- /dev/null
+++ b/.github/issues/phase-03-auth-identity/056-rbac-engine.md
@@ -0,0 +1,31 @@
+---
+title: "Implement RBAC engine with permission resolution"
+labels: [backend, auth, security]
+phase: 3
+priority: P0
+---
+
+## Description
+
+Build the role-based access control engine that resolves a user's effective permissions from their role assignments and evaluates access decisions.
+
+## References
+
+- Architecture: Section 4 (Authorization — RBAC)
+- Data Model: Section 2.3 (Role & Permission)
+- Issue #028 (Role & Permission Model), #025 (Policy as Code)
+- User Stories: US-7.2
+
+## Acceptance Criteria
+
+- [ ] `PermissionResolver` service:
+ - `get_effective_permissions(user_id) → set[Permission]` — resolves all permissions from all assigned roles
+ - `has_permission(user_id, resource, action, scope) → bool`
+ - Caches resolved permissions in Redis (invalidate on role/assignment change)
+- [ ] Permission format parsing: `"risks:read:*"` → `Permission(resource="risks", action="read", scope="*")`
+- [ ] Wildcard support: `*` matches all (e.g., `risks:*:*` = full access to risks)
+- [ ] Scope evaluation: `bu=engineering` matches user's business unit
+- [ ] System roles immutable; custom roles fully configurable
+- [ ] Contracts: permission strings must match `resource:action[:scope]` pattern
+- [ ] Property-based tests (Hypothesis): random permission sets always resolve deterministically
+- [ ] Unit tests for resolution, wildcard matching, scope evaluation
diff --git a/.github/issues/phase-03-auth-identity/057-abac-engine.md b/.github/issues/phase-03-auth-identity/057-abac-engine.md
new file mode 100644
index 00000000..413d38fc
--- /dev/null
+++ b/.github/issues/phase-03-auth-identity/057-abac-engine.md
@@ -0,0 +1,29 @@
+---
+title: "Implement ABAC policy engine"
+labels: [backend, auth, security]
+phase: 3
+priority: P1
+---
+
+## Description
+
+Build the attribute-based access control engine that evaluates fine-grained access policies based on subject, resource, action, and context attributes. Integrates with the policy-as-code framework (#025).
+
+## References
+
+- Architecture: Section 4 (Authorization — ABAC)
+- Issue #025 (Policy as Code Framework)
+
+## Acceptance Criteria
+
+- [ ] ABAC policy evaluator implementing policies from #025:
+ - Tenant isolation (always enforced)
+ - Business unit scoping
+ - Assessment scoping (auditor sees only assigned work)
+ - Data classification restrictions
+- [ ] Policy inputs: subject attributes (roles, BU, tenant), resource attributes (owner, classification, tenant), action, environment (time, IP)
+- [ ] Composable with RBAC: RBAC provides base permissions, ABAC further restricts
+- [ ] Decision: `allow` or `deny(reason)`
+- [ ] Policy evaluation is fast (< 1ms per decision — critical path)
+- [ ] Hot-reloadable policies (no restart required)
+- [ ] Unit tests with comprehensive allow/deny scenarios
diff --git a/.github/issues/phase-03-auth-identity/058-auth-middleware.md b/.github/issues/phase-03-auth-identity/058-auth-middleware.md
new file mode 100644
index 00000000..56af31e1
--- /dev/null
+++ b/.github/issues/phase-03-auth-identity/058-auth-middleware.md
@@ -0,0 +1,31 @@
+---
+title: "Implement authorization middleware and decorators"
+labels: [backend, auth, security, api]
+phase: 3
+priority: P0
+---
+
+## Description
+
+Create FastAPI middleware and route decorators that enforce authorization on every endpoint. Connect the RBAC/ABAC engines to the API layer.
+
+## References
+
+- Issue #056 (RBAC Engine), #057 (ABAC Engine)
+- API Spec: Section 2 (Authentication — bearer tokens)
+
+## Acceptance Criteria
+
+- [ ] `require_permission(resource, action)` decorator for route handlers:
+ ```python
+ @router.post("/risks")
+ @require_permission("risks", "write")
+ async def create_risk(...): ...
+ ```
+- [ ] `require_any_permission(...)` and `require_all_permissions(...)` variants
+- [ ] `require_role(role_name)` shorthand decorator
+- [ ] Authorization middleware evaluates RBAC + ABAC for every request
+- [ ] 403 response with clear error message when denied
+- [ ] Authorization decisions audit-logged for sensitive operations
+- [ ] Bypass for system/internal calls (e.g., background jobs)
+- [ ] Unit and integration tests for all permission scenarios
diff --git a/.github/issues/phase-03-auth-identity/059-saml-sso.md b/.github/issues/phase-03-auth-identity/059-saml-sso.md
new file mode 100644
index 00000000..d6e33c78
--- /dev/null
+++ b/.github/issues/phase-03-auth-identity/059-saml-sso.md
@@ -0,0 +1,32 @@
+---
+title: "Implement SAML 2.0 SSO integration"
+labels: [backend, auth, sso, enterprise]
+phase: 3
+priority: P1
+---
+
+## Description
+
+Implement SAML 2.0 Service Provider (SP) for enterprise SSO with corporate IdPs (Okta, Azure AD, ADFS, Ping Identity).
+
+## References
+
+- Deployment: Section 5.1 (SAML 2.0 configuration)
+- PRD: Section 8.1 (SAML 2.0 — SP/IdP initiated)
+- User Stories: US-7.1 (Configure SSO)
+- Use Cases: UC-07
+
+## Acceptance Criteria
+
+- [ ] SAML endpoints:
+ - `GET /api/v1/auth/saml/metadata` — SP metadata XML
+ - `POST /api/v1/auth/saml/acs` — Assertion Consumer Service
+ - `GET /api/v1/auth/saml/slo` — Single Logout
+- [ ] SP-initiated and IdP-initiated SSO flows
+- [ ] Attribute mapping: email, display_name, groups (configurable)
+- [ ] Signed requests (RSA-SHA256)
+- [ ] JIT user provisioning on first SAML login
+- [ ] Group-to-role mapping configurable per tenant
+- [ ] SSO enforcement option (disable local login)
+- [ ] Admin UI endpoint to configure SAML settings
+- [ ] Integration tests with `python3-saml` or `pysaml2`
diff --git a/.github/issues/phase-03-auth-identity/060-oauth2-client-credentials.md b/.github/issues/phase-03-auth-identity/060-oauth2-client-credentials.md
new file mode 100644
index 00000000..6d3a28c4
--- /dev/null
+++ b/.github/issues/phase-03-auth-identity/060-oauth2-client-credentials.md
@@ -0,0 +1,28 @@
+---
+title: "Implement OAuth2 client credentials flow for agents"
+labels: [backend, auth, agents]
+phase: 3
+priority: P0
+---
+
+## Description
+
+Implement OAuth2 client credentials grant for machine-to-machine authentication, primarily used by AI agents.
+
+## References
+
+- API Spec: Section 2.1 (Token Endpoint — client_credentials)
+- Architecture: Section 4 (Agent authentication)
+- User Stories: US-8.1 (Register an Agent)
+
+## Acceptance Criteria
+
+- [ ] Token endpoint: `POST /api/v1/auth/token` with `grant_type=client_credentials`
+- [ ] Client authentication: client_id + client_secret (from agent registration)
+- [ ] Scope validation: requested scopes must be subset of agent's allowed_scopes
+- [ ] Returns JWT access token with agent identity claims
+- [ ] Token includes `actor_type: "agent"` to distinguish from human tokens
+- [ ] Client secret hashed with Argon2id in database
+- [ ] Rate limiting per client_id
+- [ ] Audit logging of token issuance
+- [ ] Unit tests
diff --git a/.github/issues/phase-03-auth-identity/061-scim-provisioning.md b/.github/issues/phase-03-auth-identity/061-scim-provisioning.md
new file mode 100644
index 00000000..5c9d0dd3
--- /dev/null
+++ b/.github/issues/phase-03-auth-identity/061-scim-provisioning.md
@@ -0,0 +1,31 @@
+---
+title: "Implement SCIM 2.0 provisioning endpoints"
+labels: [backend, auth, sso, enterprise]
+phase: 3
+priority: P1
+---
+
+## Description
+
+Implement SCIM 2.0 endpoints for automated user and group provisioning/deprovisioning from identity providers.
+
+## References
+
+- Deployment: Section 5.3 (SCIM 2.0 Provisioning)
+- PRD: Section 8.1 (SCIM 2.0)
+- User Stories: US-7.2 (SCIM provisioning syncs users)
+
+## Acceptance Criteria
+
+- [ ] SCIM 2.0 endpoints at `/api/v1/scim/v2/`:
+ - `/Users` — CRUD + List + Filter
+ - `/Groups` — CRUD + List
+ - `/Schemas` — schema discovery
+ - `/ServiceProviderConfig` — capabilities
+- [ ] Bearer token authentication for SCIM endpoint
+- [ ] User create → creates Ground Control user with `auth_provider: "scim"`
+- [ ] User deactivate → deactivates Ground Control user (preserves data)
+- [ ] Group sync → maps to Ground Control roles
+- [ ] Attribute mapping: userName=email, displayName, active
+- [ ] Patch support (SCIM PATCH operations)
+- [ ] Integration tests with sample SCIM payloads
diff --git a/.github/issues/phase-03-auth-identity/062-mfa-support.md b/.github/issues/phase-03-auth-identity/062-mfa-support.md
new file mode 100644
index 00000000..63074448
--- /dev/null
+++ b/.github/issues/phase-03-auth-identity/062-mfa-support.md
@@ -0,0 +1,26 @@
+---
+title: "Implement MFA support (TOTP + WebAuthn)"
+labels: [backend, auth, security]
+phase: 3
+priority: P2
+---
+
+## Description
+
+Add multi-factor authentication support for local accounts using TOTP (authenticator apps) and WebAuthn (hardware security keys, biometrics).
+
+## References
+
+- PRD: Section 8.1 (Authentication — MFA: TOTP and WebAuthn)
+- Deployment: Section 9 (Security Hardening — Enable MFA)
+
+## Acceptance Criteria
+
+- [ ] TOTP enrollment: generate secret, display QR code, verify first code
+- [ ] TOTP verification on login (after password)
+- [ ] Recovery codes (one-time use, generated at enrollment)
+- [ ] WebAuthn registration and authentication (passkeys)
+- [ ] MFA can be required per-tenant (admin setting)
+- [ ] MFA bypass for SSO users (IdP handles MFA)
+- [ ] User model: `mfa_enabled`, `mfa_method`, `mfa_secret` (encrypted)
+- [ ] Unit tests
diff --git a/.github/issues/phase-03-auth-identity/063-user-role-management-api.md b/.github/issues/phase-03-auth-identity/063-user-role-management-api.md
new file mode 100644
index 00000000..66dcd0d4
--- /dev/null
+++ b/.github/issues/phase-03-auth-identity/063-user-role-management-api.md
@@ -0,0 +1,36 @@
+---
+title: "Implement user and role management API"
+labels: [api, backend, auth, admin]
+phase: 3
+priority: P0
+---
+
+## Description
+
+Build admin API endpoints for managing users, roles, and role assignments.
+
+## References
+
+- API Spec: Section 4.9 (Users implied)
+- User Stories: US-7.2 (Manage Users and Roles)
+
+## Acceptance Criteria
+
+- [ ] User endpoints:
+ - `GET /api/v1/users` — list (filterable, paginated)
+ - `POST /api/v1/users` — create (admin or SCIM)
+ - `GET /api/v1/users/{id}` — get user details
+ - `PATCH /api/v1/users/{id}` — update
+ - `POST /api/v1/users/{id}/deactivate` — deactivate
+ - `POST /api/v1/users/{id}/reactivate` — reactivate
+ - `GET /api/v1/users/{id}/roles` — list role assignments
+ - `POST /api/v1/users/{id}/roles` — assign role
+ - `DELETE /api/v1/users/{id}/roles/{role_id}` — unassign role
+- [ ] Role endpoints:
+ - `GET /api/v1/roles` — list
+ - `POST /api/v1/roles` — create custom role
+ - `PATCH /api/v1/roles/{id}` — update permissions
+ - `DELETE /api/v1/roles/{id}` — delete (only custom, not system)
+- [ ] All mutations require admin role
+- [ ] All mutations audit-logged
+- [ ] Integration tests
diff --git a/.github/issues/phase-04-business-logic/064-risk-scoring-engine.md b/.github/issues/phase-04-business-logic/064-risk-scoring-engine.md
new file mode 100644
index 00000000..66fe82ef
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/064-risk-scoring-engine.md
@@ -0,0 +1,36 @@
+---
+title: "Implement risk scoring engine with pluggable models"
+labels: [backend, risk-management, domain-logic]
+phase: 4
+priority: P0
+---
+
+## Description
+
+Build the risk scoring engine that calculates inherent and residual risk scores using configurable scoring methodologies. The engine must be pluggable to support different models (matrix, FAIR quantitative, custom).
+
+## References
+
+- PRD: Section 4.1 (Risk Scoring Engine — pluggable, 5x5 default, FAIR via plugin)
+- User Stories: US-1.2 (update likelihood/impact scores with justification)
+- Data Model: Section 2.4 (inherent/residual scores)
+
+## Acceptance Criteria
+
+- [ ] Scoring engine with strategy pattern:
+ - `ScoringStrategy` protocol/interface
+ - `MatrixScoringStrategy` — default 5x5 matrix (likelihood × impact)
+ - `WeightedScoringStrategy` — weighted multi-factor scoring
+ - Factory: `get_scoring_strategy(tenant_settings) → ScoringStrategy`
+- [ ] Score calculation includes:
+ - Inherent score (without controls)
+ - Residual score (after control effectiveness considered)
+ - Score delta and trend (compared to prior assessment)
+- [ ] Risk appetite evaluation: flag risks exceeding threshold
+- [ ] Heat map data generation from scored risks
+- [ ] Contracts (formally verified with CrossHair/deal):
+ - `@icontract.ensure(lambda result: 1 <= result.score <= 25)` (for 5x5)
+ - Score is monotonically related to inputs
+ - Residual ≤ inherent (when controls are effective)
+- [ ] Hypothesis property-based tests for scoring edge cases
+- [ ] Unit tests for all scoring strategies
diff --git a/.github/issues/phase-04-business-logic/065-risk-campaign-workflow.md b/.github/issues/phase-04-business-logic/065-risk-campaign-workflow.md
new file mode 100644
index 00000000..537cf3bc
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/065-risk-campaign-workflow.md
@@ -0,0 +1,28 @@
+---
+title: "Implement risk assessment campaign workflow"
+labels: [backend, risk-management, domain-logic]
+phase: 4
+priority: P0
+---
+
+## Description
+
+Build the business logic for running risk assessment campaigns: creation, assignment, scoring, review, and finalization.
+
+## References
+
+- User Stories: US-1.2 (Conduct Risk Assessment Campaign)
+- Use Cases: UC-02 (Run Risk Assessment Campaign)
+
+## Acceptance Criteria
+
+- [ ] Campaign lifecycle: `create → populate_risks → assign → assess → review → finalize`
+- [ ] Scope-based risk population (filter by category, BU, status)
+- [ ] Assignment of individual risks to assessors with notifications
+- [ ] Assessor updates: likelihood, impact, justification, evidence links
+- [ ] Campaign progress tracking (% complete, overdue items)
+- [ ] Finalization locks all assessments, generates comparison report
+- [ ] Cannot finalize with incomplete assessments (validation check)
+- [ ] Prior period comparison (current vs previous campaign scores)
+- [ ] Domain events: `campaign.created`, `campaign.finalized`, `assessment.updated`
+- [ ] Integration tests for full campaign lifecycle
diff --git a/.github/issues/phase-04-business-logic/066-risk-treatment.md b/.github/issues/phase-04-business-logic/066-risk-treatment.md
new file mode 100644
index 00000000..7da05a6b
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/066-risk-treatment.md
@@ -0,0 +1,26 @@
+---
+title: "Implement risk treatment plan management"
+labels: [backend, risk-management, domain-logic]
+phase: 4
+priority: P1
+---
+
+## Description
+
+Build the service for managing risk treatment plans (accept, mitigate, transfer, avoid) with linked action items.
+
+## References
+
+- User Stories: US-1.4 (Track Risk Treatment Plans)
+- Data Model: Section 2.16 (Risk Treatment, Treatment Actions)
+
+## Acceptance Criteria
+
+- [ ] Treatment plan CRUD linked to risks
+- [ ] Treatment types: accept, mitigate, transfer, avoid
+- [ ] Action items with owners, due dates, status tracking
+- [ ] Completion of all actions triggers residual risk re-assessment prompt
+- [ ] Overdue action notifications
+- [ ] Treatment plan history is auditable
+- [ ] Domain events: `treatment.created`, `treatment.completed`
+- [ ] Unit tests
diff --git a/.github/issues/phase-04-business-logic/067-risk-control-linkage.md b/.github/issues/phase-04-business-logic/067-risk-control-linkage.md
new file mode 100644
index 00000000..14a70236
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/067-risk-control-linkage.md
@@ -0,0 +1,24 @@
+---
+title: "Implement risk-control linkage"
+labels: [backend, domain-logic]
+phase: 4
+priority: P1
+---
+
+## Description
+
+Build the many-to-many relationship between risks and controls, enabling residual risk calculation based on control effectiveness.
+
+## References
+
+- Data Model: Section 3 (Risk *--* Control via risk_control_mappings)
+- PRD: Section 4.1 (Controls linked to risks for residual scoring)
+
+## Acceptance Criteria
+
+- [ ] Junction table: `risk_control_mappings` (risk_id, control_id, mapping_notes)
+- [ ] Alembic migration
+- [ ] API endpoints to link/unlink controls from risks
+- [ ] Residual risk recalculation when control effectiveness changes
+- [ ] View: "controls mitigating this risk" and "risks this control mitigates"
+- [ ] Unit tests
diff --git a/.github/issues/phase-04-business-logic/068-control-catalog-service.md b/.github/issues/phase-04-business-logic/068-control-catalog-service.md
new file mode 100644
index 00000000..a9bc6d56
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/068-control-catalog-service.md
@@ -0,0 +1,27 @@
+---
+title: "Implement control catalog management service"
+labels: [backend, control-management, domain-logic]
+phase: 4
+priority: P0
+---
+
+## Description
+
+Build the domain service for control catalog management, including CRUD, framework mapping, CCL adoption, and effectiveness tracking.
+
+## References
+
+- User Stories: US-2.1 (Maintain Control Catalog), US-2.3 (Use CCL)
+- PRD: Section 4.2 (Control Management)
+
+## Acceptance Criteria
+
+- [ ] `ControlService`:
+ - CRUD with taxonomy validation
+ - Framework mapping management (add, remove, list)
+ - CCL adoption (creates tenant control linked to CCL entry, inherits mappings)
+ - Effectiveness update based on latest test results
+ - Version history (control changes tracked with diff)
+- [ ] Auto-update effectiveness_rating when test procedures complete
+- [ ] Domain events: `control.created`, `control.updated`, `control.effectiveness_changed`
+- [ ] Integration tests for CCL adoption flow
diff --git a/.github/issues/phase-04-business-logic/069-framework-mapping-engine.md b/.github/issues/phase-04-business-logic/069-framework-mapping-engine.md
new file mode 100644
index 00000000..098d2645
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/069-framework-mapping-engine.md
@@ -0,0 +1,29 @@
+---
+title: "Implement control-framework mapping engine and gap analysis"
+labels: [backend, compliance, domain-logic]
+phase: 4
+priority: P0
+---
+
+## Description
+
+Build the engine that manages cross-framework control mappings and performs gap analysis (framework requirements without mapped controls).
+
+## References
+
+- User Stories: US-2.2 (Map Controls Across Frameworks)
+- Use Cases: UC-05 (Cross-Framework Control Mapping)
+- PRD: Section 6.2 (CCL — "test once, comply many")
+
+## Acceptance Criteria
+
+- [ ] Mapping service:
+ - Add/remove control-to-requirement mappings
+ - Bulk mapping via CCL suggested mappings
+ - Coverage matrix: which requirements are covered per framework
+ - Gap analysis: requirements without any mapped controls
+- [ ] Coverage percentage per framework
+- [ ] Duplicate and circular mapping detection
+- [ ] Agent suggestion integration point (stub for Phase 7)
+- [ ] Contracts: no duplicate (control_id, requirement_id) pairs
+- [ ] Unit tests including coverage calculation
diff --git a/.github/issues/phase-04-business-logic/070-assessment-state-machine.md b/.github/issues/phase-04-business-logic/070-assessment-state-machine.md
new file mode 100644
index 00000000..1a91d3cc
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/070-assessment-state-machine.md
@@ -0,0 +1,33 @@
+---
+title: "Implement assessment campaign state machine"
+labels: [backend, assessment, domain-logic]
+phase: 4
+priority: P0
+---
+
+## Description
+
+Build a generic, configurable state machine for assessment campaigns and test procedures. State transitions are validated, logged, and can trigger domain events.
+
+## References
+
+- PRD: Section 4.3 (Assessment Campaigns — lifecycle states)
+- Data Model: Section 2.8 (status enum), Section 2.9 (test procedure status)
+- Issue #023 (Design-by-Contract — state machine contracts)
+
+## Acceptance Criteria
+
+- [ ] Generic `StateMachine` class:
+ - Configurable states and valid transitions
+ - Transition guards (callable conditions)
+ - Transition actions (side effects on transition)
+ - Audit logging of every transition
+- [ ] Campaign state machine: planning → active → review → finalized → archived
+- [ ] Test procedure state machine: not_started → in_progress → completed → review → approved
+- [ ] Finding state machine: draft → open → remediation_in_progress → validation → closed
+- [ ] Contracts (formally verified):
+ - `@icontract.require(lambda new_state: new_state in VALID_TRANSITIONS[current_state])`
+ - No state can transition to itself (unless explicitly allowed)
+ - Terminal states (archived, closed) cannot transition further
+- [ ] CrossHair verification of state machine invariants
+- [ ] Unit tests for all valid and invalid transitions
diff --git a/.github/issues/phase-04-business-logic/071-test-execution-engine.md b/.github/issues/phase-04-business-logic/071-test-execution-engine.md
new file mode 100644
index 00000000..18139099
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/071-test-execution-engine.md
@@ -0,0 +1,30 @@
+---
+title: "Implement test procedure execution engine"
+labels: [backend, assessment, domain-logic]
+phase: 4
+priority: P0
+---
+
+## Description
+
+Build the service that manages test procedure execution — recording step results, calculating progress, and routing completed procedures for review.
+
+## References
+
+- User Stories: US-3.2 (Execute Test Procedures)
+- Use Cases: UC-03 (Execute Test Procedures)
+
+## Acceptance Criteria
+
+- [ ] `TestExecutionService`:
+ - Record step result (actual_result, conclusion, evidence_ids, notes)
+ - Calculate procedure progress (% steps completed)
+ - Mark procedure complete (validates all steps have conclusions)
+ - Submit for review (transitions state, notifies reviewer)
+ - Roll up to campaign progress
+- [ ] Step conclusion validation: pass, fail, na
+- [ ] Overall procedure conclusion derived from step results (any fail → ineffective)
+- [ ] Evidence linking for individual steps
+- [ ] Agent result submission (structured payload → step results)
+- [ ] Domain events: `test.step_completed`, `test.procedure_completed`, `test.submitted_for_review`
+- [ ] Integration tests
diff --git a/.github/issues/phase-04-business-logic/072-sampling.md b/.github/issues/phase-04-business-logic/072-sampling.md
new file mode 100644
index 00000000..b907e07a
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/072-sampling.md
@@ -0,0 +1,26 @@
+---
+title: "Implement sampling methodology"
+labels: [backend, assessment, domain-logic]
+phase: 4
+priority: P2
+---
+
+## Description
+
+Build configurable sampling calculators for audit testing (statistical, judgmental, haphazard).
+
+## References
+
+- PRD: Section 4.3 (Sampling — configurable methodologies)
+- Data Model: Section 2.9 (population_size, sample_size, sampling_method)
+
+## Acceptance Criteria
+
+- [ ] `SamplingCalculator` with strategies:
+ - Statistical: confidence level + tolerable deviation → sample size
+ - Judgmental: auditor-determined with justification
+ - Haphazard: random selection from population
+- [ ] Sample size recommendations based on population, frequency, and risk
+- [ ] Random sample selection from population (seeded for reproducibility)
+- [ ] Contracts: sample_size ≤ population_size, sample_size > 0
+- [ ] Unit tests with known statistical tables
diff --git a/.github/issues/phase-04-business-logic/073-evidence-upload-storage.md b/.github/issues/phase-04-business-logic/073-evidence-upload-storage.md
new file mode 100644
index 00000000..91541926
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/073-evidence-upload-storage.md
@@ -0,0 +1,28 @@
+---
+title: "Implement evidence upload and S3 storage integration"
+labels: [backend, evidence, infrastructure]
+phase: 4
+priority: P0
+---
+
+## Description
+
+Build the complete evidence upload flow: pre-signed URL generation, S3 storage, hash verification, and metadata registration.
+
+## References
+
+- API Spec: Section 4.6 (Upload flow — pre-signed URL pattern)
+- PRD: Section 4.4 (Artifact Store — encryption, hashing)
+- Issue #035 (Artifact Entity)
+
+## Acceptance Criteria
+
+- [ ] Pre-signed URL generation for direct S3 upload (bypass API server)
+- [ ] Storage key pattern: `{tenant_id}/{year}/{month}/{artifact_id}/{version}/{filename}`
+- [ ] SHA-256 hash verification: client-provided hash compared to S3 object hash
+- [ ] Server-side encryption: AES-256 (SSE-S3 or SSE-KMS)
+- [ ] File size enforcement (configurable per tenant, default 500MB)
+- [ ] Content-type validation (configurable allowed types)
+- [ ] Virus scanning integration point (stub for future ClamAV plugin)
+- [ ] Download via pre-signed URL (time-limited, 15 min default)
+- [ ] Integration tests with MinIO (local S3-compatible)
diff --git a/.github/issues/phase-04-business-logic/074-evidence-linking.md b/.github/issues/phase-04-business-logic/074-evidence-linking.md
new file mode 100644
index 00000000..521f4cd0
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/074-evidence-linking.md
@@ -0,0 +1,28 @@
+---
+title: "Implement evidence linking and evidence requests"
+labels: [backend, evidence, domain-logic]
+phase: 4
+priority: P0
+---
+
+## Description
+
+Build the polymorphic evidence linking system and evidence request lifecycle.
+
+## References
+
+- User Stories: US-4.2 (Link Evidence), US-3.3 (Collect Evidence via Requests)
+- Use Cases: UC-04 (Collect Evidence)
+- Data Model: Section 2.10 (artifact_links), Section 2.11 (evidence_requests)
+
+## Acceptance Criteria
+
+- [ ] Polymorphic linking: attach artifact to any entity (risk, control, test_step, finding)
+- [ ] Context notes on links (why this evidence is relevant)
+- [ ] Unlinking preserves audit trail
+- [ ] Evidence request lifecycle: pending → submitted → accepted/rejected → overdue
+- [ ] Overdue detection (background job or query-time calculation)
+- [ ] Submission auto-links artifacts to the request's control/procedure
+- [ ] Escalation notifications for overdue requests
+- [ ] Domain events: `evidence.linked`, `evidence_request.submitted`, `evidence_request.overdue`
+- [ ] Unit tests
diff --git a/.github/issues/phase-04-business-logic/075-evidence-lineage.md b/.github/issues/phase-04-business-logic/075-evidence-lineage.md
new file mode 100644
index 00000000..8aae1538
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/075-evidence-lineage.md
@@ -0,0 +1,24 @@
+---
+title: "Implement evidence lineage and chain of custody"
+labels: [backend, evidence, domain-logic, audit]
+phase: 4
+priority: P1
+---
+
+## Description
+
+Build the evidence lineage system that tracks the full chain of custody for every artifact.
+
+## References
+
+- User Stories: US-4.4 (Evidence Lineage and Chain of Custody)
+- PRD: Section 4.4 (Evidence Lineage — full chain)
+
+## Acceptance Criteria
+
+- [ ] Lineage timeline: uploaded → linked → reviewed → approved (with actors and timestamps)
+- [ ] Hash verification: confirm artifact integrity at any point
+- [ ] Chain of custody report generation for a set of artifacts
+- [ ] Events aggregated from audit log entries related to the artifact
+- [ ] API endpoint: `GET /artifacts/{id}/lineage` returns ordered timeline
+- [ ] Unit tests
diff --git a/.github/issues/phase-04-business-logic/076-finding-lifecycle.md b/.github/issues/phase-04-business-logic/076-finding-lifecycle.md
new file mode 100644
index 00000000..267d3d47
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/076-finding-lifecycle.md
@@ -0,0 +1,30 @@
+---
+title: "Implement finding lifecycle and deficiency classification"
+labels: [backend, findings, domain-logic]
+phase: 4
+priority: P0
+---
+
+## Description
+
+Build the complete finding lifecycle management including deficiency classification, remediation tracking, and validation.
+
+## References
+
+- User Stories: US-5.1, US-5.2, US-5.3 (Record, Remediate, Validate Findings)
+- PRD: Section 4.5 (Findings & Issues)
+
+## Acceptance Criteria
+
+- [ ] `FindingService`:
+ - Create finding linked to control, procedure, campaign
+ - Deficiency classification: deficiency, significant_deficiency, material_weakness
+ - Lifecycle: draft → open → remediation_in_progress → validation → closed
+ - Remediation plan creation and tracking
+ - Validation testing (re-test after remediation)
+ - Closure with evidence of resolution
+- [ ] Duplicate finding detection (suggest potential matches by control + description similarity)
+- [ ] Finding aggregation across campaigns (unified issues view)
+- [ ] Closed findings update control effectiveness rating
+- [ ] Domain events: `finding.opened`, `finding.closed`, `finding.overdue`
+- [ ] Integration tests for full lifecycle
diff --git a/.github/issues/phase-04-business-logic/077-bulk-import-export.md b/.github/issues/phase-04-business-logic/077-bulk-import-export.md
new file mode 100644
index 00000000..a6fe996a
--- /dev/null
+++ b/.github/issues/phase-04-business-logic/077-bulk-import-export.md
@@ -0,0 +1,29 @@
+---
+title: "Implement bulk import/export (CSV, JSON)"
+labels: [backend, domain-logic, data-migration]
+phase: 4
+priority: P1
+---
+
+## Description
+
+Build bulk import and export capabilities for risks, controls, and findings from CSV and JSON formats.
+
+## References
+
+- User Stories: US-1.1 (Bulk import risks from CSV or JSON)
+- Use Cases: UC-01 (extension 3a — CSV/JSON import)
+
+## Acceptance Criteria
+
+- [ ] Import service for risks, controls, findings:
+ - Parse CSV/JSON with configurable field mapping
+ - Validate each row against schemas and taxonomy
+ - Report row-level errors (don't fail entire import on one bad row)
+ - Return summary: imported, skipped, errors
+- [ ] Export service: query → CSV, JSON, Excel formats
+- [ ] API endpoints: `POST /api/v1/{entity}/import`, `GET /api/v1/{entity}/export`
+- [ ] Import is transactional: all-or-nothing option or partial-commit option
+- [ ] Background processing for large imports (> 1000 rows)
+- [ ] Import templates downloadable (CSV with headers)
+- [ ] Unit tests with sample data files
diff --git a/.github/issues/phase-05-events-workflow/078-event-bus.md b/.github/issues/phase-05-events-workflow/078-event-bus.md
new file mode 100644
index 00000000..4b93f838
--- /dev/null
+++ b/.github/issues/phase-05-events-workflow/078-event-bus.md
@@ -0,0 +1,36 @@
+---
+title: "Implement domain event bus (in-process + async)"
+labels: [backend, architecture, events]
+phase: 5
+priority: P0
+---
+
+## Description
+
+Build the internal event bus that decouples domain services. Support both synchronous (in-process) and asynchronous (Redis-backed queue) event handling.
+
+## References
+
+- Architecture: Section 3.5 (Event Bus — events table, sync/async)
+- Architecture: Section 7 (ARQ or Celery for background jobs)
+
+## Acceptance Criteria
+
+- [ ] `backend/src/ground_control/events/`:
+ - `bus.py` — `EventBus` class: `publish(event)`, `subscribe(event_type, handler)`
+ - `types.py` — domain event base class with `event_type`, `timestamp`, `tenant_id`, `actor_id`, `data`
+ - `handlers.py` — handler registration and dispatch
+- [ ] Event types per architecture spec:
+ - `risk.created`, `risk.score_changed`
+ - `control.updated`
+ - `assessment.completed`
+ - `test_procedure.result_submitted`
+ - `artifact.uploaded`
+ - `finding.opened`, `finding.closed`
+ - `agent.result_submitted`
+ - `plugin.installed`
+- [ ] Sync handlers: execute in-process (for simple reactions like cache invalidation)
+- [ ] Async handlers: enqueue to Redis/Valkey streams for background processing
+- [ ] Event replay capability (for rebuilding state or debugging)
+- [ ] Event publishing is transactional (publish only on commit, not on rollback)
+- [ ] Unit tests for publish/subscribe, sync/async dispatch
diff --git a/.github/issues/phase-05-events-workflow/079-background-jobs.md b/.github/issues/phase-05-events-workflow/079-background-jobs.md
new file mode 100644
index 00000000..c554fca7
--- /dev/null
+++ b/.github/issues/phase-05-events-workflow/079-background-jobs.md
@@ -0,0 +1,36 @@
+---
+title: "Implement background job processing"
+labels: [backend, infrastructure, events]
+phase: 5
+priority: P0
+---
+
+## Description
+
+Set up background job processing for async tasks: report generation, email delivery, evidence collection, search indexing, overdue detection.
+
+## References
+
+- Architecture: Section 7 (ARQ or Celery)
+- Deployment: Section 2.3 (worker service)
+
+## Acceptance Criteria
+
+- [ ] Job framework setup (ARQ recommended — lightweight, Redis-backed, async-native):
+ - Worker process: `gc-worker` command
+ - Job registration and discovery
+ - Retry with exponential backoff
+ - Dead letter queue for failed jobs
+ - Job status tracking
+- [ ] Job types:
+ - `send_email` — email notifications
+ - `generate_report` — PDF/PPTX generation
+ - `sync_search_index` — update Meilisearch
+ - `check_overdue` — find overdue evidence requests, treatments, remediations
+ - `collect_evidence` — run evidence collection plugins
+- [ ] Scheduled jobs (cron-like):
+ - Overdue check: every hour
+ - Search index sync: every 5 minutes (or event-driven)
+- [ ] Job monitoring: queue depth, processing time, failure rate (Prometheus metrics)
+- [ ] Graceful shutdown: finish current job before stopping
+- [ ] Unit tests with mock Redis
diff --git a/.github/issues/phase-05-events-workflow/080-workflow-engine.md b/.github/issues/phase-05-events-workflow/080-workflow-engine.md
new file mode 100644
index 00000000..134b030b
--- /dev/null
+++ b/.github/issues/phase-05-events-workflow/080-workflow-engine.md
@@ -0,0 +1,29 @@
+---
+title: "Implement workflow engine (configurable state machines)"
+labels: [backend, domain-logic, workflow]
+phase: 5
+priority: P0
+---
+
+## Description
+
+Build a generic, configurable workflow engine for review and approval chains. Used for workpaper review, finding validation, evidence acceptance, and custom workflows.
+
+## References
+
+- PRD: Section 4.7 (Review Workflows — multi-level chains)
+- User Stories: US-3.4 (Review and Approve Workpapers)
+
+## Acceptance Criteria
+
+- [ ] `WorkflowEngine`:
+ - Configurable review chains: preparer → reviewer → approver
+ - Multi-level chains (1, 2, or 3 levels, configurable per entity type)
+ - Transition actions: approve, reject (with comments), request_changes
+ - Automatic routing to next reviewer on approval
+ - Lock entity on final approval (prevent further edits)
+- [ ] Workflow configuration per tenant and entity type
+- [ ] Review status tracking on campaigns dashboard
+- [ ] Rejection returns to preparer with reviewer comments
+- [ ] Domain events: `workflow.submitted`, `workflow.approved`, `workflow.rejected`
+- [ ] Unit tests for all workflow paths
diff --git a/.github/issues/phase-05-events-workflow/081-notification-system.md b/.github/issues/phase-05-events-workflow/081-notification-system.md
new file mode 100644
index 00000000..646c653e
--- /dev/null
+++ b/.github/issues/phase-05-events-workflow/081-notification-system.md
@@ -0,0 +1,32 @@
+---
+title: "Implement notification system (in-app + email)"
+labels: [backend, collaboration, notifications]
+phase: 5
+priority: P1
+---
+
+## Description
+
+Build the notification system that delivers in-app notifications and email alerts based on domain events and workflow actions.
+
+## References
+
+- PRD: Section 4.7 (Notifications — in-app, email, Slack/Teams)
+- Data Model: Section 2.19 (Notification entity)
+
+## Acceptance Criteria
+
+- [ ] In-app notifications:
+ - Created from domain events (assignment, deadline, review request, etc.)
+ - Stored in notifications table
+ - API: list, mark-read, mark-all-read, count unread
+ - Real-time delivery via SSE (Server-Sent Events) or WebSocket (optional)
+- [ ] Email notifications:
+ - SMTP integration (configurable via settings)
+ - HTML email templates (Jinja2)
+ - Template types: assignment, evidence request, overdue, review, finding
+ - Configurable per-user preferences (opt-in/out per event type)
+ - Queued as background jobs
+- [ ] Notification preferences: user can configure which events trigger email vs. in-app only
+- [ ] Bulk notification creation (e.g., notify all reviewers)
+- [ ] Unit tests with mock SMTP
diff --git a/.github/issues/phase-05-events-workflow/082-webhook-system.md b/.github/issues/phase-05-events-workflow/082-webhook-system.md
new file mode 100644
index 00000000..627f3665
--- /dev/null
+++ b/.github/issues/phase-05-events-workflow/082-webhook-system.md
@@ -0,0 +1,29 @@
+---
+title: "Implement outbound webhook system"
+labels: [backend, integrations, events]
+phase: 5
+priority: P1
+---
+
+## Description
+
+Build the outbound webhook system for notifying external systems of domain events.
+
+## References
+
+- API Spec: Section 6 (Webhook Events — subscription, payload, HMAC signing)
+
+## Acceptance Criteria
+
+- [ ] Webhook subscription management:
+ - `POST /api/v1/webhooks` — create subscription (url, events, secret)
+ - `GET /api/v1/webhooks` — list
+ - `DELETE /api/v1/webhooks/{id}` — remove
+- [ ] Webhook delivery:
+ - Payload matches spec format (id, type, timestamp, tenant_id, data)
+ - HMAC-SHA256 signing with shared secret (`X-GC-Signature` header)
+ - Retry with exponential backoff on failure (3 attempts)
+ - Timeout: 10 seconds per delivery
+- [ ] Delivery logging: status, response code, retry count
+- [ ] Webhook testing: `POST /api/v1/webhooks/{id}/test` sends test event
+- [ ] Unit tests
diff --git a/.github/issues/phase-05-events-workflow/083-slack-teams.md b/.github/issues/phase-05-events-workflow/083-slack-teams.md
new file mode 100644
index 00000000..d72b2ef7
--- /dev/null
+++ b/.github/issues/phase-05-events-workflow/083-slack-teams.md
@@ -0,0 +1,25 @@
+---
+title: "Implement Slack and Microsoft Teams notification integration"
+labels: [backend, integrations, notifications]
+phase: 5
+priority: P2
+---
+
+## Description
+
+Build Slack and Teams webhook integrations for sending notifications to team channels.
+
+## References
+
+- PRD: Section 4.7 (Notifications — Slack/Teams webhooks)
+- PRD: Section 8.2 (Communication — Slack, Microsoft Teams)
+
+## Acceptance Criteria
+
+- [ ] Slack incoming webhook integration (configurable per tenant)
+- [ ] Teams incoming webhook integration
+- [ ] Message formatting: rich cards with entity details and action links
+- [ ] Configurable: which events trigger channel notifications
+- [ ] Channel routing: different events → different channels
+- [ ] Admin configuration endpoint
+- [ ] Unit tests with mock webhook endpoints
diff --git a/.github/issues/phase-05-events-workflow/084-task-assignment.md b/.github/issues/phase-05-events-workflow/084-task-assignment.md
new file mode 100644
index 00000000..c6a1dde2
--- /dev/null
+++ b/.github/issues/phase-05-events-workflow/084-task-assignment.md
@@ -0,0 +1,25 @@
+---
+title: "Implement task assignment and SLA tracking"
+labels: [backend, collaboration, domain-logic]
+phase: 5
+priority: P1
+---
+
+## Description
+
+Build the task assignment system with due dates and SLA tracking for all assignable work items (test procedures, evidence requests, remediation actions).
+
+## References
+
+- PRD: Section 4.7 (Task Assignment — due dates, SLA tracking)
+
+## Acceptance Criteria
+
+- [ ] Generic task assignment model (or convention across entities):
+ - Assigned_to (user or group), due_date, priority, status
+ - SLA configuration per task type (e.g., evidence requests: 5 business days)
+- [ ] Task list API: `GET /api/v1/users/{id}/tasks` — all assigned items across entity types
+- [ ] Overdue detection and escalation
+- [ ] SLA breach notifications (approaching and breached)
+- [ ] Dashboard data: tasks by status, overdue counts, SLA compliance percentage
+- [ ] Unit tests
diff --git a/.github/issues/phase-06-frameworks/085-framework-loader.md b/.github/issues/phase-06-frameworks/085-framework-loader.md
new file mode 100644
index 00000000..18e4a4fe
--- /dev/null
+++ b/.github/issues/phase-06-frameworks/085-framework-loader.md
@@ -0,0 +1,43 @@
+---
+title: "Implement framework plugin loader (YAML/JSON definitions)"
+labels: [backend, compliance, plugins]
+phase: 6
+priority: P0
+---
+
+## Description
+
+Build the framework loading system that parses YAML/JSON framework definitions and imports them into the database. This is the mechanism by which compliance frameworks (SOX, SOC 2, ISO 27001, etc.) are loaded into Ground Control.
+
+## References
+
+- API Spec: Section 8.6 (Framework Plugin Example — YAML format)
+- PRD: Section 3 (Frameworks & Standards Supported)
+- Architecture: Section 3.6 (Plugin Runtime — Framework Plugins)
+
+## Acceptance Criteria
+
+- [ ] Framework definition YAML schema:
+ ```yaml
+ framework:
+ name: "Framework Name"
+ version: "1.0"
+ description: "..."
+ requirements:
+ - ref_id: "1"
+ title: "..."
+ children:
+ - ref_id: "1.1"
+ title: "..."
+ ccl_mappings:
+ - requirement: "1.1"
+ ccl_entries: ["CC-XX-001"]
+ ```
+- [ ] Loader service: `FrameworkLoader.load(yaml_path) → Framework`
+ - Parses YAML/JSON, validates schema
+ - Creates framework and requirements (hierarchical)
+ - Creates CCL mappings if provided
+ - Idempotent: re-loading updates without duplicating
+- [ ] Version management: track framework versions, support updates
+- [ ] CLI command: `gc-admin load-framework path/to/framework.yaml`
+- [ ] Unit tests with sample framework definitions
diff --git a/.github/issues/phase-06-frameworks/086-sox-framework.md b/.github/issues/phase-06-frameworks/086-sox-framework.md
new file mode 100644
index 00000000..226c61a5
--- /dev/null
+++ b/.github/issues/phase-06-frameworks/086-sox-framework.md
@@ -0,0 +1,27 @@
+---
+title: "Create SOX ITGC framework definition"
+labels: [compliance, frameworks, content]
+phase: 6
+priority: P0
+---
+
+## Description
+
+Create the SOX IT General Controls framework definition with control objectives, test procedures, and CCL mappings.
+
+## References
+
+- PRD: Section 3 (SOX ITGC — Full coverage)
+- PRD: Section 4.3 (Assessment — SOX ITGC testing)
+
+## Acceptance Criteria
+
+- [ ] `plugins/frameworks/sox-itgc.yaml` with:
+ - SOX ITGC domains: Access to Programs and Data, Program Changes, Program Development, Computer Operations
+ - Control objectives per domain
+ - Sub-objectives and testing guidance
+ - CCL mappings for each objective
+- [ ] Walkthroughs guidance for each control area
+- [ ] Deficiency classification guidance (deficiency → significant deficiency → material weakness)
+- [ ] Loaded via framework loader (#085)
+- [ ] Verified against authoritative SOX ITGC references
diff --git a/.github/issues/phase-06-frameworks/087-soc2-framework.md b/.github/issues/phase-06-frameworks/087-soc2-framework.md
new file mode 100644
index 00000000..df574f4f
--- /dev/null
+++ b/.github/issues/phase-06-frameworks/087-soc2-framework.md
@@ -0,0 +1,24 @@
+---
+title: "Create SOC 2 Trust Services framework definition"
+labels: [compliance, frameworks, content]
+phase: 6
+priority: P0
+---
+
+## Description
+
+Create the SOC 2 Trust Services Criteria framework definition covering all five categories.
+
+## References
+
+- PRD: Section 3 (SOC 2 — Full, all five categories)
+
+## Acceptance Criteria
+
+- [ ] `plugins/frameworks/soc2-tsc.yaml` with:
+ - Categories: Security (CC), Availability (A), Processing Integrity (PI), Confidentiality (C), Privacy (P)
+ - All Common Criteria (CC1-CC9) with points of focus
+ - Additional criteria per category
+ - CCL mappings
+- [ ] Loaded via framework loader
+- [ ] Coverage matrix generation verified
diff --git a/.github/issues/phase-06-frameworks/088-iso27001-framework.md b/.github/issues/phase-06-frameworks/088-iso27001-framework.md
new file mode 100644
index 00000000..cede2eeb
--- /dev/null
+++ b/.github/issues/phase-06-frameworks/088-iso27001-framework.md
@@ -0,0 +1,22 @@
+---
+title: "Create ISO 27001:2022 framework definition"
+labels: [compliance, frameworks, content]
+phase: 6
+priority: P1
+---
+
+## Description
+
+Create the ISO 27001:2022 framework with Annex A controls and Statement of Applicability support.
+
+## References
+
+- PRD: Section 3 (ISO 27001:2022 — Full, Annex A controls, SoA generation)
+
+## Acceptance Criteria
+
+- [ ] `plugins/frameworks/iso27001-2022.yaml` with all Annex A controls (93 controls, 4 themes)
+- [ ] Themes: Organizational, People, Physical, Technological
+- [ ] CCL mappings for cross-framework coverage
+- [ ] Statement of Applicability (SoA) generation support
+- [ ] Loaded via framework loader
diff --git a/.github/issues/phase-06-frameworks/089-nist-frameworks.md b/.github/issues/phase-06-frameworks/089-nist-frameworks.md
new file mode 100644
index 00000000..219a9a1b
--- /dev/null
+++ b/.github/issues/phase-06-frameworks/089-nist-frameworks.md
@@ -0,0 +1,26 @@
+---
+title: "Create NIST CSF 2.0 and SP 800-53 Rev 5 framework definitions"
+labels: [compliance, frameworks, content]
+phase: 6
+priority: P1
+---
+
+## Description
+
+Create NIST Cybersecurity Framework 2.0 and NIST SP 800-53 Rev 5 framework definitions.
+
+## References
+
+- PRD: Section 3 (NIST CSF 2.0 — Full, NIST 800-53 — Full with enhancements)
+
+## Acceptance Criteria
+
+- [ ] `plugins/frameworks/nist-csf-2.0.yaml`:
+ - Six functions: Govern, Identify, Protect, Detect, Respond, Recover
+ - Categories and subcategories
+ - CCL mappings
+- [ ] `plugins/frameworks/nist-800-53-r5.yaml`:
+ - All 20 control families
+ - Base controls and enhancements
+ - CCL mappings
+- [ ] Both loaded via framework loader
diff --git a/.github/issues/phase-06-frameworks/090-additional-frameworks.md b/.github/issues/phase-06-frameworks/090-additional-frameworks.md
new file mode 100644
index 00000000..c6d7fa5d
--- /dev/null
+++ b/.github/issues/phase-06-frameworks/090-additional-frameworks.md
@@ -0,0 +1,24 @@
+---
+title: "Create PCI-DSS v4.0, CIS Controls v8, and supplemental framework definitions"
+labels: [compliance, frameworks, content]
+phase: 6
+priority: P2
+---
+
+## Description
+
+Create additional framework definitions for PCI-DSS, CIS Controls, COBIT, GDPR, and HIPAA.
+
+## References
+
+- PRD: Section 3 (PCI-DSS v4.0, CIS Controls v8, COBIT 2019, GDPR, HIPAA)
+
+## Acceptance Criteria
+
+- [ ] `plugins/frameworks/pci-dss-v4.yaml` — 12 requirements with sub-requirements
+- [ ] `plugins/frameworks/cis-controls-v8.yaml` — safeguards mapped to implementation groups
+- [ ] `plugins/frameworks/cobit-2019.yaml` — governance and management objectives
+- [ ] `plugins/frameworks/gdpr-article32.yaml` — data protection controls overlay
+- [ ] `plugins/frameworks/hipaa-security.yaml` — administrative, physical, technical safeguards
+- [ ] All with CCL mappings where applicable
+- [ ] All loaded via framework loader
diff --git a/.github/issues/phase-06-frameworks/091-ccl-seed-data.md b/.github/issues/phase-06-frameworks/091-ccl-seed-data.md
new file mode 100644
index 00000000..35fb40b8
--- /dev/null
+++ b/.github/issues/phase-06-frameworks/091-ccl-seed-data.md
@@ -0,0 +1,29 @@
+---
+title: "Create Common Control Library seed data"
+labels: [compliance, content, data]
+phase: 6
+priority: P0
+---
+
+## Description
+
+Create the initial seed data for the Common Control Library with standardized control definitions mapped across all supported frameworks.
+
+## References
+
+- PRD: Section 6.2 (Common Control Library — CCL is the heart of reusability)
+- Data Model: Section 2.7 (CCL Entries)
+
+## Acceptance Criteria
+
+- [ ] CCL seed data covering major control domains:
+ - Access Management (CC-AM-001 through CC-AM-010)
+ - Change Management (CC-CM-001 through CC-CM-008)
+ - Operations (CC-OP-001 through CC-OP-008)
+ - Data Protection (CC-DP-001 through CC-DP-006)
+ - Incident Management (CC-IM-001 through CC-IM-005)
+ - Business Continuity (CC-BC-001 through CC-BC-004)
+- [ ] Each entry includes: ref_id, title, description, category, control_type, control_nature
+- [ ] Cross-framework mappings for each entry (SOX, SOC 2, ISO 27001, NIST at minimum)
+- [ ] Seed data loadable via CLI: `gc-admin seed-ccl`
+- [ ] Data format: YAML for human review, loaded programmatically
diff --git a/.github/issues/phase-06-frameworks/092-template-library.md b/.github/issues/phase-06-frameworks/092-template-library.md
new file mode 100644
index 00000000..230e23a9
--- /dev/null
+++ b/.github/issues/phase-06-frameworks/092-template-library.md
@@ -0,0 +1,27 @@
+---
+title: "Implement template library (assessments, test procedures, workpapers)"
+labels: [backend, compliance, content]
+phase: 6
+priority: P1
+---
+
+## Description
+
+Build the template library system for reusable assessment structures, test procedures, and workpaper formats.
+
+## References
+
+- PRD: Section 6.3 (Template Library — assessment, test procedure, workpaper, finding, report templates)
+- User Stories: US-3.1 (Campaign generates workpapers from templates)
+
+## Acceptance Criteria
+
+- [ ] Template entity model: name, type, version, content (JSONB), tags, tenant_id (nullable for global)
+- [ ] Template types: assessment, test_procedure, workpaper, finding, report
+- [ ] Template instantiation: creates concrete entity from template
+- [ ] Version management for templates
+- [ ] Shareable between tenants (if tenant_id = NULL → global)
+- [ ] Import/export templates as YAML/JSON
+- [ ] API endpoints: list, get, create, update, instantiate
+- [ ] Seed templates for SOX ITGC and SOC 2 assessments
+- [ ] Unit tests
diff --git a/.github/issues/phase-07-agents/093-agent-registration.md b/.github/issues/phase-07-agents/093-agent-registration.md
new file mode 100644
index 00000000..138228f3
--- /dev/null
+++ b/.github/issues/phase-07-agents/093-agent-registration.md
@@ -0,0 +1,29 @@
+---
+title: "Implement agent registration and lifecycle management"
+labels: [backend, agents, api]
+phase: 7
+priority: P0
+---
+
+## Description
+
+Build the agent registration system — registering AI agents with credentials, permissions, and lifecycle management.
+
+## References
+
+- Data Model: Section 2.14 (Agent Registration)
+- PRD: Section 5 (Agent-First Design)
+- User Stories: US-8.1 (Register an Agent)
+- API Spec: Section 4.9 (Agents)
+
+## Acceptance Criteria
+
+- [ ] Agent registration: `POST /api/v1/agents` creates agent with name, description, owner, allowed_scopes
+- [ ] OAuth2 client credentials generated (client_id + client_secret)
+- [ ] Client secret returned once (on creation), stored as Argon2id hash
+- [ ] Agent assigned a role that limits permissions
+- [ ] Agent lifecycle: active, suspended, revoked
+- [ ] Agent CRUD endpoints per API spec
+- [ ] Agent credential rotation: `POST /api/v1/agents/{id}/rotate-secret`
+- [ ] All agent registration/modification audit-logged
+- [ ] Unit tests
diff --git a/.github/issues/phase-07-agents/094-agent-assignments.md b/.github/issues/phase-07-agents/094-agent-assignments.md
new file mode 100644
index 00000000..3318f8ab
--- /dev/null
+++ b/.github/issues/phase-07-agents/094-agent-assignments.md
@@ -0,0 +1,31 @@
+---
+title: "Implement agent assignment retrieval and result submission"
+labels: [backend, agents, api]
+phase: 7
+priority: P0
+---
+
+## Description
+
+Build the APIs for agents to retrieve their assignments and submit structured test results.
+
+## References
+
+- API Spec: Section 4.9 (Agents — assignments, history)
+- User Stories: US-8.2 (Agent Retrieves Assignments), US-8.3 (Agent Submits Results)
+- Use Cases: UC-06 (Agent-Performed Testing)
+
+## Acceptance Criteria
+
+- [ ] `GET /api/v1/agents/{id}/assignments` — pending test procedures assigned to agent
+ - Includes full context: control description, test steps, prior results, linked evidence
+ - Filterable by campaign, priority, due date
+- [ ] `POST /api/v1/test-procedures/{id}/results` — submit agent results:
+ - Per-step results (actual_result, conclusion, evidence_ids)
+ - Overall conclusion and confidence score
+ - Provenance metadata (model, version, input_hash)
+- [ ] Results automatically flagged as `agent_produced = true`
+- [ ] Submission triggers notification to assigned human reviewer
+- [ ] Invalid/incomplete payloads return descriptive 422 errors
+- [ ] `GET /api/v1/agents/{id}/history` — agent action history
+- [ ] Integration tests
diff --git a/.github/issues/phase-07-agents/095-agent-provenance.md b/.github/issues/phase-07-agents/095-agent-provenance.md
new file mode 100644
index 00000000..586f6ff7
--- /dev/null
+++ b/.github/issues/phase-07-agents/095-agent-provenance.md
@@ -0,0 +1,28 @@
+---
+title: "Implement agent provenance tracking"
+labels: [backend, agents, audit]
+phase: 7
+priority: P0
+---
+
+## Description
+
+Build the provenance tracking system that records full context for every agent action, enabling reproducibility and trust verification.
+
+## References
+
+- PRD: Section 5.3 (Agent Provenance — identity, model, input hash, confidence, review status)
+
+## Acceptance Criteria
+
+- [ ] Provenance record for every agent action:
+ - Agent identity (registered agent_id, human owner)
+ - Model/version used (e.g., "claude-opus-4-6")
+ - Input context hash (SHA-256 of inputs for reproducibility)
+ - Confidence score (0.0 - 1.0)
+ - Human review status: pending, approved, rejected
+ - Timestamp and request context
+- [ ] Provenance stored as JSONB on test procedures and other agent-touched entities
+- [ ] Provenance query API: filter by agent, model, confidence, review status
+- [ ] Dashboard data: agent performance metrics (approval rate, confidence distribution)
+- [ ] Unit tests
diff --git a/.github/issues/phase-07-agents/096-agent-sdk-python.md b/.github/issues/phase-07-agents/096-agent-sdk-python.md
new file mode 100644
index 00000000..9a7e243d
--- /dev/null
+++ b/.github/issues/phase-07-agents/096-agent-sdk-python.md
@@ -0,0 +1,34 @@
+---
+title: "Build Agent SDK — Python package"
+labels: [sdk, agents, python]
+phase: 7
+priority: P1
+---
+
+## Description
+
+Build the Python Agent SDK as a standalone package (`ground-control-agent-sdk`) for building AI agents that interact with Ground Control.
+
+## References
+
+- API Spec: Section 9.1 (Python Agent SDK)
+- PRD: Section 10 (v0.4 — Agent SDK Python)
+
+## Acceptance Criteria
+
+- [ ] Package: `sdks/python/` with `pyproject.toml`
+- [ ] Published as: `ground-control-agent-sdk` on PyPI
+- [ ] `AgentClient` class with methods:
+ - `authenticate()` — OAuth2 client credentials
+ - `get_assignments(status, campaign_type)` — retrieve pending work
+ - `get_test_procedure(id)` — get full procedure context
+ - `submit_results(procedure_id, steps, conclusion, confidence, notes)` — submit results
+ - `upload_artifact(file_data, filename, tags)` — upload evidence
+ - `link_artifact(artifact_id, entity_type, entity_id)` — link evidence
+- [ ] Async-native (asyncio/httpx)
+- [ ] Automatic token refresh
+- [ ] Structured error handling (typed exceptions)
+- [ ] Type hints on all public methods (PEP 561 compliant)
+- [ ] Examples directory with sample agent scripts
+- [ ] Unit tests
+- [ ] Documentation (docstrings + README)
diff --git a/.github/issues/phase-07-agents/097-agent-sdk-typescript.md b/.github/issues/phase-07-agents/097-agent-sdk-typescript.md
new file mode 100644
index 00000000..f9b9bc96
--- /dev/null
+++ b/.github/issues/phase-07-agents/097-agent-sdk-typescript.md
@@ -0,0 +1,25 @@
+---
+title: "Build Agent SDK — TypeScript package"
+labels: [sdk, agents, typescript]
+phase: 7
+priority: P1
+---
+
+## Description
+
+Build the TypeScript Agent SDK as an npm package (`@ground-control/agent-sdk`).
+
+## References
+
+- API Spec: Section 9.2 (TypeScript Agent SDK)
+- PRD: Section 10 (v0.4 — Agent SDK TypeScript)
+
+## Acceptance Criteria
+
+- [ ] Package: `sdks/typescript/` with `package.json`, `tsconfig.json`
+- [ ] Published as: `@ground-control/agent-sdk`
+- [ ] `AgentClient` class matching Python SDK functionality
+- [ ] Full TypeScript types for all request/response payloads
+- [ ] Works in Node.js (ESM and CJS)
+- [ ] Unit tests (vitest)
+- [ ] README with examples
diff --git a/.github/issues/phase-07-agents/098-automated-evidence-collection.md b/.github/issues/phase-07-agents/098-automated-evidence-collection.md
new file mode 100644
index 00000000..5ee1d5ca
--- /dev/null
+++ b/.github/issues/phase-07-agents/098-automated-evidence-collection.md
@@ -0,0 +1,31 @@
+---
+title: "Implement automated evidence collection plugin interface"
+labels: [backend, agents, evidence, plugins]
+phase: 7
+priority: P1
+---
+
+## Description
+
+Build the interface and scheduling system for automated evidence collection plugins that pull evidence from external systems.
+
+## References
+
+- User Stories: US-4.3 (Automated Evidence Collection)
+- Use Cases: UC-04 (extension 2a — automated collection)
+- API Spec: Section 8 (Plugin Architecture — Evidence Collector type)
+
+## Acceptance Criteria
+
+- [ ] Evidence collector plugin interface:
+ - `collect(control_id, config) → list[Artifact]`
+ - Configuration schema (JSONB) per collector
+ - Scheduling: cron-like or event-triggered
+- [ ] Collection run logging: status, timestamp, artifact count, errors
+- [ ] Auto-linking: collected artifacts linked to configured controls
+- [ ] Failed collection alerts
+- [ ] Built-in collectors (stubs for Phase 10 plugin implementation):
+ - AWS Config snapshot
+ - Jira query (extract tickets matching criteria)
+ - GitHub PR list (change management evidence)
+- [ ] Unit tests with mock source systems
diff --git a/.github/issues/phase-07-agents/099-agent-mapping-suggestions.md b/.github/issues/phase-07-agents/099-agent-mapping-suggestions.md
new file mode 100644
index 00000000..3f045bc8
--- /dev/null
+++ b/.github/issues/phase-07-agents/099-agent-mapping-suggestions.md
@@ -0,0 +1,25 @@
+---
+title: "Implement AI-powered control mapping suggestions"
+labels: [backend, agents, compliance, ai]
+phase: 7
+priority: P2
+---
+
+## Description
+
+Build the endpoint where AI agents analyze control descriptions and suggest framework mappings with confidence scores.
+
+## References
+
+- User Stories: US-8.4 (Agent Suggests Control Mappings)
+- Use Cases: UC-05 (extension 2a-2b — AI suggestions)
+
+## Acceptance Criteria
+
+- [ ] `POST /api/v1/controls/{id}/suggest-mappings` triggers agent analysis
+- [ ] Response: ranked suggestions with requirement_id and confidence score
+- [ ] Suggestions stored as pending (not auto-applied)
+- [ ] Analyst can approve/reject each suggestion via API
+- [ ] Approved suggestions create actual control-framework mappings
+- [ ] Suggestion history retained for agent performance tracking
+- [ ] Unit tests
diff --git a/.github/issues/phase-08-reporting-search/100-search-integration.md b/.github/issues/phase-08-reporting-search/100-search-integration.md
new file mode 100644
index 00000000..725b2c5c
--- /dev/null
+++ b/.github/issues/phase-08-reporting-search/100-search-integration.md
@@ -0,0 +1,28 @@
+---
+title: "Implement search integration (Meilisearch) and indexing"
+labels: [backend, search, infrastructure]
+phase: 8
+priority: P0
+---
+
+## Description
+
+Integrate Meilisearch for full-text search across risks, controls, findings, artifacts, and framework requirements.
+
+## References
+
+- Architecture: Section 3.7 (Search Index — Meilisearch)
+- Data Model: Section 4.3 (Search Index — indexed entities)
+- API Spec: Section 4.14 (Search endpoint)
+
+## Acceptance Criteria
+
+- [ ] Meilisearch client setup in `backend/src/ground_control/infrastructure/search/`
+- [ ] Index configuration for: risks, controls, findings, artifacts, framework_requirements
+- [ ] Searchable fields per entity (title, description, ref_id, tags)
+- [ ] Filterable attributes (tenant_id, status, category, type)
+- [ ] Index sync: event-driven (on create/update/delete) + full rebuild command
+- [ ] Search API: `GET /api/v1/search?q=...&type=...` with typo-tolerant results
+- [ ] Tenant isolation in search (filter by tenant_id)
+- [ ] `gc-admin reindex-search` CLI command for full rebuild
+- [ ] Unit tests with embedded Meilisearch or mock
diff --git a/.github/issues/phase-08-reporting-search/101-dashboard-apis.md b/.github/issues/phase-08-reporting-search/101-dashboard-apis.md
new file mode 100644
index 00000000..20538fe3
--- /dev/null
+++ b/.github/issues/phase-08-reporting-search/101-dashboard-apis.md
@@ -0,0 +1,28 @@
+---
+title: "Implement dashboard data aggregation APIs"
+labels: [backend, reporting, api]
+phase: 8
+priority: P0
+---
+
+## Description
+
+Build the dashboard data APIs that power executive dashboards with pre-aggregated data.
+
+## References
+
+- API Spec: Section 4.12 (Reports — dashboard endpoints)
+- User Stories: US-1.5 (View Risk Dashboard), US-6.1 (Generate Executive Reports)
+
+## Acceptance Criteria
+
+- [ ] Dashboard endpoints:
+ - `GET /api/v1/reports/dashboards/risk-posture` — heat map data, top risks, trends, appetite breaches
+ - `GET /api/v1/reports/dashboards/control-health` — effectiveness distribution, test coverage, aging
+ - `GET /api/v1/reports/dashboards/assessment-progress` — campaign status, completion %, overdue
+ - `GET /api/v1/reports/dashboards/findings-summary` — open findings, severity distribution, aging
+- [ ] All dashboards filterable by: business_unit, date range, framework
+- [ ] Response caching (Redis, 60s TTL) for performance
+- [ ] Heat map data: likelihood × impact matrix with risk counts and IDs
+- [ ] Trend data: score changes over last 4 quarters
+- [ ] Unit tests
diff --git a/.github/issues/phase-08-reporting-search/102-report-generation.md b/.github/issues/phase-08-reporting-search/102-report-generation.md
new file mode 100644
index 00000000..f48c60fe
--- /dev/null
+++ b/.github/issues/phase-08-reporting-search/102-report-generation.md
@@ -0,0 +1,33 @@
+---
+title: "Implement report generation engine (PDF/PPTX/Excel)"
+labels: [backend, reporting]
+phase: 8
+priority: P1
+---
+
+## Description
+
+Build the report generation engine for producing executive reports in PDF, PPTX, and Excel formats.
+
+## References
+
+- PRD: Section 4.6 (Reporting & Dashboards — board reports, custom reports)
+- User Stories: US-6.1, US-6.2
+
+## Acceptance Criteria
+
+- [ ] Report generation API: `POST /api/v1/reports/generate`
+ - Input: template_id, parameters (scope, date range, framework, format)
+ - Output: report_id (async generation)
+ - `GET /api/v1/reports/{id}` — status + download link when complete
+- [ ] Format support: PDF (via WeasyPrint or ReportLab), PPTX (python-pptx), Excel (openpyxl)
+- [ ] Standard report types:
+ - Risk posture summary (heat map, top risks, trends)
+ - Control health report (effectiveness, coverage)
+ - Assessment status report (progress, findings)
+ - Finding summary report
+- [ ] Report templates (Jinja2 HTML → PDF)
+- [ ] Charts/visualizations embedded in reports
+- [ ] Reports stored as artifacts for download
+- [ ] Background job processing for generation
+- [ ] Unit tests
diff --git a/.github/issues/phase-08-reporting-search/103-scheduled-reports.md b/.github/issues/phase-08-reporting-search/103-scheduled-reports.md
new file mode 100644
index 00000000..586caee9
--- /dev/null
+++ b/.github/issues/phase-08-reporting-search/103-scheduled-reports.md
@@ -0,0 +1,33 @@
+---
+title: "Implement scheduled report delivery and GraphQL API"
+labels: [backend, reporting, api]
+phase: 8
+priority: P1
+---
+
+## Description
+
+Build scheduled report delivery (email on configurable schedule) and the optional GraphQL API for complex relational queries.
+
+## References
+
+- PRD: Section 4.6 (Scheduled Reports — email delivery)
+- Architecture: Section 3.3 (GraphQL Server — Strawberry)
+- API Spec: Section 5 (GraphQL API)
+- User Stories: US-6.2 (schedule saved reports), US-6.3 (API-Driven Analytics)
+
+## Acceptance Criteria
+
+- [ ] Scheduled reports:
+ - Cron-like schedule configuration per saved report
+ - Email delivery with attached report (PDF/Excel)
+ - Background job: `scheduled_report_delivery`
+ - Admin UI to manage schedules
+- [ ] GraphQL API at `POST /graphql`:
+ - Strawberry-based schema
+ - Queries for: risks, controls, assessments, findings (with nested relations)
+ - Risk dashboard query (heat map, top risks, trends)
+ - Authentication/authorization via same JWT
+ - Rate limiting
+- [ ] GraphQL schema auto-generated from domain models
+- [ ] Integration tests for both features
diff --git a/.github/issues/phase-09-frontend/104-frontend-setup.md b/.github/issues/phase-09-frontend/104-frontend-setup.md
new file mode 100644
index 00000000..a11a9e73
--- /dev/null
+++ b/.github/issues/phase-09-frontend/104-frontend-setup.md
@@ -0,0 +1,34 @@
+---
+title: "Set up frontend UI component library and application shell"
+labels: [frontend, ui]
+phase: 9
+priority: P0
+---
+
+## Description
+
+Set up Shadcn/ui component library, Tailwind CSS design tokens, and build the application shell (layout, navigation, routing, auth context).
+
+## References
+
+- Architecture: Section 7 (Shadcn/ui + Tailwind CSS, React + TypeScript)
+- Issue #005 (Frontend Scaffold)
+
+## Acceptance Criteria
+
+- [ ] Shadcn/ui initialized with components: Button, Input, Select, Dialog, Table, Card, Tabs, Badge, Toast, DropdownMenu, Form, Avatar
+- [ ] Tailwind design tokens: colors (risk heat map scale), typography, spacing
+- [ ] Application shell:
+ - Sidebar navigation (collapsible)
+ - Top bar with user menu, tenant selector, notifications
+ - Breadcrumbs
+ - Main content area with route outlet
+- [ ] React Router routes:
+ - `/login` — auth pages
+ - `/dashboard` — main dashboard
+ - `/risks`, `/controls`, `/assessments`, `/evidence`, `/findings`
+ - `/admin/*` — admin pages
+- [ ] Auth context: JWT storage, auto-refresh, protected routes
+- [ ] API client: axios/ky instance with auth headers, error interceptors, tenant header
+- [ ] React Query setup for data fetching/caching
+- [ ] Dark mode toggle (Tailwind dark mode)
diff --git a/.github/issues/phase-09-frontend/105-auth-pages.md b/.github/issues/phase-09-frontend/105-auth-pages.md
new file mode 100644
index 00000000..56a97391
--- /dev/null
+++ b/.github/issues/phase-09-frontend/105-auth-pages.md
@@ -0,0 +1,26 @@
+---
+title: "Build authentication pages (login, SSO, password management)"
+labels: [frontend, ui, auth]
+phase: 9
+priority: P0
+---
+
+## Description
+
+Build the frontend authentication pages: login form, SSO redirect, password change, and MFA verification.
+
+## References
+
+- User Stories: US-7.1 (Configure SSO)
+- Deployment: Section 5 (SSO Configuration)
+
+## Acceptance Criteria
+
+- [ ] Login page: email + password form, "Sign in with SSO" button
+- [ ] SSO redirect flow (OIDC/SAML)
+- [ ] Password change page
+- [ ] MFA verification page (TOTP input)
+- [ ] First-time setup page (initial admin creation)
+- [ ] Error handling: invalid credentials, account locked, SSO errors
+- [ ] Responsive design (mobile-friendly)
+- [ ] Accessibility: form labels, keyboard navigation, screen reader support
diff --git a/.github/issues/phase-09-frontend/106-risk-views.md b/.github/issues/phase-09-frontend/106-risk-views.md
new file mode 100644
index 00000000..2c227cec
--- /dev/null
+++ b/.github/issues/phase-09-frontend/106-risk-views.md
@@ -0,0 +1,37 @@
+---
+title: "Build risk management views"
+labels: [frontend, ui, risk-management]
+phase: 9
+priority: P0
+---
+
+## Description
+
+Build the frontend views for risk management: risk register, risk detail, heat map, treatment plans.
+
+## References
+
+- User Stories: US-1.1-US-1.5 (all risk management stories)
+- Use Cases: UC-01, UC-02
+
+## Acceptance Criteria
+
+- [ ] Risk register page:
+ - Table view with columns: ref_id, title, category, inherent score, residual score, owner, status
+ - Filtering, sorting, pagination
+ - Quick actions: edit, archive
+ - Create risk dialog/page
+- [ ] Risk detail page:
+ - All risk fields (editable)
+ - Linked controls, evidence, treatment plans (tabs)
+ - Audit history timeline
+ - Risk score visualization (before/after controls)
+- [ ] Risk heat map:
+ - Interactive 5x5 matrix (or configurable)
+ - Click cell to see risks at that position
+ - Appetite threshold overlay
+ - Export as image
+- [ ] Risk assessment campaign view:
+ - Campaign progress dashboard
+ - Assigned risks list for assessors
+ - Score update form with justification
diff --git a/.github/issues/phase-09-frontend/107-control-views.md b/.github/issues/phase-09-frontend/107-control-views.md
new file mode 100644
index 00000000..41f5c307
--- /dev/null
+++ b/.github/issues/phase-09-frontend/107-control-views.md
@@ -0,0 +1,26 @@
+---
+title: "Build control management and framework mapping views"
+labels: [frontend, ui, control-management]
+phase: 9
+priority: P0
+---
+
+## Description
+
+Build the frontend views for control catalog management and cross-framework mapping.
+
+## References
+
+- User Stories: US-2.1, US-2.2, US-2.3
+- Use Cases: UC-05
+
+## Acceptance Criteria
+
+- [ ] Control catalog page: table with filtering by framework, type, nature, owner, effectiveness
+- [ ] Control detail page: all fields, framework mappings, test history, linked evidence, linked risks
+- [ ] Framework mapping view:
+ - Coverage matrix (frameworks × controls)
+ - Gap analysis view (unmapped requirements highlighted)
+ - AI suggestion integration (display suggestions with confidence, approve/reject)
+- [ ] CCL browser: search, preview, adopt controls
+- [ ] Control create/edit forms with taxonomy-driven dropdowns
diff --git a/.github/issues/phase-09-frontend/108-assessment-views.md b/.github/issues/phase-09-frontend/108-assessment-views.md
new file mode 100644
index 00000000..60b0a224
--- /dev/null
+++ b/.github/issues/phase-09-frontend/108-assessment-views.md
@@ -0,0 +1,33 @@
+---
+title: "Build assessment, testing, and workpaper views"
+labels: [frontend, ui, assessment]
+phase: 9
+priority: P0
+---
+
+## Description
+
+Build the frontend views for assessment campaigns, test procedure execution, and workpaper review.
+
+## References
+
+- User Stories: US-3.1-US-3.5
+- Use Cases: UC-03
+
+## Acceptance Criteria
+
+- [ ] Campaign list and detail pages (progress, milestones, assignments)
+- [ ] Test procedure execution view:
+ - Step-by-step interface with instruction, expected result fields
+ - Result recording: actual result, pass/fail/NA, evidence upload/link, notes
+ - Progress indicator
+ - Submit for review button
+- [ ] Workpaper review view:
+ - Side-by-side: test results + review notes
+ - Approve/reject/request changes actions
+ - Review note threading
+- [ ] Evidence request portal (control owner view):
+ - Pending requests with instructions and deadlines
+ - Upload interface
+ - Status tracking
+- [ ] Campaign progress dashboard with charts
diff --git a/.github/issues/phase-09-frontend/109-evidence-views.md b/.github/issues/phase-09-frontend/109-evidence-views.md
new file mode 100644
index 00000000..0958ca75
--- /dev/null
+++ b/.github/issues/phase-09-frontend/109-evidence-views.md
@@ -0,0 +1,24 @@
+---
+title: "Build evidence management views"
+labels: [frontend, ui, evidence]
+phase: 9
+priority: P0
+---
+
+## Description
+
+Build the frontend views for artifact management, evidence linking, and lineage visualization.
+
+## References
+
+- User Stories: US-4.1-US-4.4
+
+## Acceptance Criteria
+
+- [ ] Artifact list page: filterable, shows linked entities count
+- [ ] Artifact upload: drag-and-drop, progress indicator, multi-file
+- [ ] Artifact detail: metadata, version history, linked entities, lineage timeline
+- [ ] File preview: PDF, images, text, spreadsheets (in-browser)
+- [ ] Evidence linking dialog: search and link to any entity
+- [ ] Lineage visualization: timeline of custody events
+- [ ] Evidence request management (auditor view)
diff --git a/.github/issues/phase-09-frontend/110-finding-views.md b/.github/issues/phase-09-frontend/110-finding-views.md
new file mode 100644
index 00000000..6ce83baf
--- /dev/null
+++ b/.github/issues/phase-09-frontend/110-finding-views.md
@@ -0,0 +1,23 @@
+---
+title: "Build findings and remediation views"
+labels: [frontend, ui, findings]
+phase: 9
+priority: P0
+---
+
+## Description
+
+Build the frontend views for findings, remediation plans, and validation.
+
+## References
+
+- User Stories: US-5.1-US-5.3
+
+## Acceptance Criteria
+
+- [ ] Findings list page: filterable by status, severity, classification, campaign, owner
+- [ ] Finding detail page: description, evidence, remediation plan, validation status, audit trail
+- [ ] Finding create form with deficiency classification
+- [ ] Remediation plan management: action items, progress tracking
+- [ ] Validation workflow: re-test, approve/reject closure
+- [ ] Finding trends chart (opened vs closed over time)
diff --git a/.github/issues/phase-09-frontend/111-admin-views.md b/.github/issues/phase-09-frontend/111-admin-views.md
new file mode 100644
index 00000000..632acaed
--- /dev/null
+++ b/.github/issues/phase-09-frontend/111-admin-views.md
@@ -0,0 +1,27 @@
+---
+title: "Build admin pages (users, roles, SSO, plugins, audit log, taxonomy)"
+labels: [frontend, ui, admin]
+phase: 9
+priority: P0
+---
+
+## Description
+
+Build the administration pages for platform configuration.
+
+## References
+
+- User Stories: US-7.1-US-7.5
+- Use Cases: UC-07, UC-08
+
+## Acceptance Criteria
+
+- [ ] User management: list, create, edit, deactivate, role assignment
+- [ ] Role management: list system roles, create custom roles, permission editor
+- [ ] SSO configuration: SAML and OIDC setup wizards with test button
+- [ ] SCIM configuration: endpoint URL, token management
+- [ ] Plugin management: catalog, install, configure, enable/disable
+- [ ] Audit log viewer: searchable, filterable, exportable, timeline view
+- [ ] Taxonomy configuration: manage categories, scales, labels, colors, ordering
+- [ ] Agent management: register, configure, revoke, view activity
+- [ ] Tenant settings: general config, feature flags, limits
diff --git a/.github/issues/phase-09-frontend/112-dashboard-views.md b/.github/issues/phase-09-frontend/112-dashboard-views.md
new file mode 100644
index 00000000..4e486833
--- /dev/null
+++ b/.github/issues/phase-09-frontend/112-dashboard-views.md
@@ -0,0 +1,26 @@
+---
+title: "Build dashboard and reporting views"
+labels: [frontend, ui, reporting]
+phase: 9
+priority: P0
+---
+
+## Description
+
+Build the executive dashboards, report builder, and agent monitoring views.
+
+## References
+
+- User Stories: US-1.5, US-6.1, US-6.2, US-6.3
+
+## Acceptance Criteria
+
+- [ ] Main dashboard: risk posture, control health, assessment progress, finding summary (4 quadrants)
+- [ ] Risk dashboard: interactive heat map, top risks table, trend charts, appetite breaches
+- [ ] Control health dashboard: effectiveness distribution pie chart, test coverage bar chart
+- [ ] Report builder: field selection, filters, groupings, preview, save template, export
+- [ ] Report viewer: in-browser PDF preview, download buttons
+- [ ] Scheduled report management: create/edit schedules, delivery history
+- [ ] Agent dashboard: registered agents, recent activity, approval rates, performance metrics
+- [ ] All dashboards filterable by business unit, date range, framework
+- [ ] Charts use consistent color scheme from design tokens
diff --git a/.github/issues/phase-09-frontend/113-accessibility.md b/.github/issues/phase-09-frontend/113-accessibility.md
new file mode 100644
index 00000000..402318d2
--- /dev/null
+++ b/.github/issues/phase-09-frontend/113-accessibility.md
@@ -0,0 +1,27 @@
+---
+title: "Accessibility audit and WCAG 2.1 AA compliance"
+labels: [frontend, ui, accessibility, quality]
+phase: 9
+priority: P1
+---
+
+## Description
+
+Conduct an accessibility audit and ensure all frontend components meet WCAG 2.1 AA standards.
+
+## References
+
+- PRD: Section 7 (Non-Functional Requirements — WCAG 2.1 AA)
+
+## Acceptance Criteria
+
+- [ ] Automated accessibility testing: `eslint-plugin-jsx-a11y` + axe-core in Playwright tests
+- [ ] Keyboard navigation for all interactive elements
+- [ ] Screen reader support: ARIA labels, live regions, landmark roles
+- [ ] Color contrast: all text meets 4.5:1 ratio (AA)
+- [ ] Focus management: visible focus indicators, logical tab order
+- [ ] Form accessibility: labels, error messages linked to inputs, required field indicators
+- [ ] Data table accessibility: header associations, sortable columns announced
+- [ ] Heat map: non-color alternatives (patterns, text overlays)
+- [ ] CI check: axe-core scan on key pages
+- [ ] Accessibility statement page
diff --git a/.github/issues/phase-10-plugins/114-plugin-runtime.md b/.github/issues/phase-10-plugins/114-plugin-runtime.md
new file mode 100644
index 00000000..6b7a6bf5
--- /dev/null
+++ b/.github/issues/phase-10-plugins/114-plugin-runtime.md
@@ -0,0 +1,35 @@
+---
+title: "Implement plugin runtime and sandboxing"
+labels: [backend, plugins, architecture]
+phase: 10
+priority: P0
+---
+
+## Description
+
+Build the plugin runtime that loads, manages, and sandboxes plugins. Plugins extend Ground Control without modifying core code.
+
+## References
+
+- Architecture: Section 3.6 (Plugin Runtime — sandboxing, lifecycle)
+- API Spec: Section 8 (Plugin Architecture — manifest, SDK, lifecycle)
+- User Stories: US-7.3 (Install and Configure Plugins)
+- Use Cases: UC-08 (Install and Configure Plugin)
+
+## Acceptance Criteria
+
+- [ ] Plugin manifest parser (`plugin.yaml`):
+ - Name, version, type, author, permissions, config_schema
+ - Event subscriptions, API routes, UI components
+ - Signature verification (Ed25519)
+- [ ] Plugin lifecycle: install → configure → enable → running → disable → uninstall
+- [ ] Sandboxing:
+ - Process isolation (subprocess per plugin)
+ - Scoped SDK (only permitted operations based on declared permissions)
+ - Resource limits (CPU, memory, API call rate)
+ - Audit logging of all plugin actions
+- [ ] Plugin registry: catalog of installed plugins per tenant
+- [ ] Plugin health checks: periodic liveness verification
+- [ ] Plugin API: custom endpoints registered from plugin manifest
+- [ ] Plugin events: subscribe to and publish domain events
+- [ ] Unit tests for lifecycle, sandboxing, manifest parsing
diff --git a/.github/issues/phase-10-plugins/115-plugin-sdk.md b/.github/issues/phase-10-plugins/115-plugin-sdk.md
new file mode 100644
index 00000000..e281bab6
--- /dev/null
+++ b/.github/issues/phase-10-plugins/115-plugin-sdk.md
@@ -0,0 +1,26 @@
+---
+title: "Build Plugin SDK (Python)"
+labels: [backend, plugins, sdk]
+phase: 10
+priority: P0
+---
+
+## Description
+
+Build the Python Plugin SDK that plugin authors use to build Ground Control extensions.
+
+## References
+
+- API Spec: Section 8.3 (Plugin SDK — Python example)
+
+## Acceptance Criteria
+
+- [ ] Plugin SDK package: `ground-control-plugin-sdk`
+- [ ] Base classes: `Plugin`, `@event_handler`, `@api_route`
+- [ ] `GroundControlClient` — scoped API client for plugins:
+ - `controls.list()`, `artifacts.upload()`, `artifacts.link()`
+ - Respects declared permission scopes
+- [ ] Plugin context: tenant_id, plugin config, logger
+- [ ] Configuration schema validation (JSON Schema)
+- [ ] Plugin development guide and example plugins
+- [ ] Unit tests
diff --git a/.github/issues/phase-10-plugins/116-plugin-management-api.md b/.github/issues/phase-10-plugins/116-plugin-management-api.md
new file mode 100644
index 00000000..9820275e
--- /dev/null
+++ b/.github/issues/phase-10-plugins/116-plugin-management-api.md
@@ -0,0 +1,30 @@
+---
+title: "Implement plugin management API and UI rendering"
+labels: [backend, plugins, api, frontend]
+phase: 10
+priority: P1
+---
+
+## Description
+
+Build the API endpoints for plugin management and the dynamic UI rendering system for plugin configuration.
+
+## References
+
+- API Spec: Section 4.10 (Plugins implied)
+- User Stories: US-7.3 (Install and Configure Plugins)
+
+## Acceptance Criteria
+
+- [ ] Plugin API endpoints:
+ - `GET /api/v1/plugins` — list installed plugins
+ - `POST /api/v1/plugins` — install plugin (upload package or URL)
+ - `PATCH /api/v1/plugins/{id}` — update configuration
+ - `POST /api/v1/plugins/{id}/enable` / `disable`
+ - `DELETE /api/v1/plugins/{id}` — uninstall
+ - `GET /api/v1/plugins/{id}/health` — health status
+ - `POST /api/v1/plugins/{id}/update` — update to new version
+- [ ] Plugin config UI rendering from JSON Schema (frontend component)
+- [ ] Plugin version management: current version, available updates, rollback
+- [ ] All plugin operations audit-logged
+- [ ] Integration tests
diff --git a/.github/issues/phase-11-production/117-multi-tenancy-shared.md b/.github/issues/phase-11-production/117-multi-tenancy-shared.md
new file mode 100644
index 00000000..78137eb9
--- /dev/null
+++ b/.github/issues/phase-11-production/117-multi-tenancy-shared.md
@@ -0,0 +1,27 @@
+---
+title: "Implement multi-tenancy enforcement (shared schema + RLS)"
+labels: [backend, multi-tenancy, security, production]
+phase: 11
+priority: P0
+---
+
+## Description
+
+Harden and verify the shared-schema multi-tenancy model with comprehensive RLS enforcement, tenant resource limits, and cross-tenant isolation testing.
+
+## References
+
+- Architecture: Section 8.2 (Multi-Tenancy Models)
+- Deployment: Section 6 (Multi-Tenancy Configuration)
+- Issue #026 (Tenant Model)
+
+## Acceptance Criteria
+
+- [ ] RLS enabled and verified on ALL tenant-scoped tables
+- [ ] Cross-tenant isolation tests: create data as tenant A, verify invisible to tenant B
+- [ ] Tenant resource limits: max users, max artifacts, max storage
+- [ ] Tenant configuration: per-tenant settings (taxonomy, SSO, plugins)
+- [ ] Tenant provisioning CLI: `gc-admin create-tenant --name "..." --slug "..."`
+- [ ] Tenant suspension: disable all access, preserve data
+- [ ] Tenant data export: full tenant data dump (for portability)
+- [ ] Integration tests covering all entity types for isolation
diff --git a/.github/issues/phase-11-production/118-multi-tenancy-advanced.md b/.github/issues/phase-11-production/118-multi-tenancy-advanced.md
new file mode 100644
index 00000000..4f726d30
--- /dev/null
+++ b/.github/issues/phase-11-production/118-multi-tenancy-advanced.md
@@ -0,0 +1,24 @@
+---
+title: "Implement schema-per-tenant and database-per-tenant modes"
+labels: [backend, multi-tenancy, production]
+phase: 11
+priority: P2
+---
+
+## Description
+
+Implement the advanced multi-tenancy modes for higher isolation requirements.
+
+## References
+
+- Architecture: Section 8.2 (Schema per tenant, Database per tenant)
+- Deployment: Section 6.2, 6.3
+
+## Acceptance Criteria
+
+- [ ] Schema-per-tenant: Alembic runs migrations per schema, tenant switching via `SET search_path`
+- [ ] Database-per-tenant: connection routing per tenant, separate connection pools
+- [ ] Mode selection via `MULTI_TENANCY_MODE` environment variable
+- [ ] Tenant provisioning creates schema/database as appropriate
+- [ ] Migration tooling handles all tenant schemas/databases
+- [ ] Integration tests for each mode
diff --git a/.github/issues/phase-11-production/119-docker-compose-prod.md b/.github/issues/phase-11-production/119-docker-compose-prod.md
new file mode 100644
index 00000000..662bc09b
--- /dev/null
+++ b/.github/issues/phase-11-production/119-docker-compose-prod.md
@@ -0,0 +1,29 @@
+---
+title: "Create Docker Compose production profile"
+labels: [deployment, docker, production]
+phase: 11
+priority: P0
+---
+
+## Description
+
+Create a production-ready Docker Compose configuration with security hardening, health checks, and operational tooling.
+
+## References
+
+- Deployment: Section 2 (Docker Compose)
+
+## Acceptance Criteria
+
+- [ ] `deploy/docker/docker-compose.yml` (production):
+ - All services with health checks and restart policies
+ - Non-root user for all containers
+ - Read-only filesystem where possible
+ - Resource limits (memory, CPU)
+ - Caddy reverse proxy with auto-TLS
+ - Log drivers configured for structured JSON
+- [ ] `.env.example` with all production settings documented
+- [ ] `deploy/docker/Caddyfile` with security headers (HSTS, CSP, X-Frame-Options)
+- [ ] MinIO auto-initialization (bucket creation)
+- [ ] Backup scripts for PostgreSQL and MinIO
+- [ ] Upgrade procedure documented and tested
diff --git a/.github/issues/phase-11-production/120-helm-chart.md b/.github/issues/phase-11-production/120-helm-chart.md
new file mode 100644
index 00000000..ec8ea682
--- /dev/null
+++ b/.github/issues/phase-11-production/120-helm-chart.md
@@ -0,0 +1,33 @@
+---
+title: "Create Kubernetes Helm chart"
+labels: [deployment, kubernetes, production]
+phase: 11
+priority: P0
+---
+
+## Description
+
+Create a production-grade Helm chart for deploying Ground Control on Kubernetes.
+
+## References
+
+- Deployment: Section 3 (Kubernetes Deployment — Helm)
+
+## Acceptance Criteria
+
+- [ ] `deploy/helm/ground-control/` Helm chart:
+ - Deployments: gc-app (N replicas), gc-worker
+ - Services: ClusterIP for app
+ - Ingress with TLS
+ - ConfigMaps and Secrets
+ - PVCs for persistence
+ - HPA (autoscaling)
+ - PodDisruptionBudget
+ - NetworkPolicy
+ - ServiceMonitor (Prometheus)
+- [ ] Sub-charts or dependency charts: PostgreSQL, Redis, MinIO, Meilisearch
+- [ ] External database/redis support (for managed services)
+- [ ] Migration Job (pre-upgrade hook)
+- [ ] values.yaml with comprehensive documentation
+- [ ] Helm chart tests (`helm test`)
+- [ ] Chart published to GitHub Pages or OCI registry
diff --git a/.github/issues/phase-11-production/121-health-metrics.md b/.github/issues/phase-11-production/121-health-metrics.md
new file mode 100644
index 00000000..5377963a
--- /dev/null
+++ b/.github/issues/phase-11-production/121-health-metrics.md
@@ -0,0 +1,35 @@
+---
+title: "Implement health endpoints, Prometheus metrics, and Grafana dashboards"
+labels: [backend, observability, production]
+phase: 11
+priority: P0
+---
+
+## Description
+
+Build comprehensive health checks, Prometheus metrics exposition, and Grafana dashboard templates.
+
+## References
+
+- Deployment: Section 8 (Monitoring & Observability)
+
+## Acceptance Criteria
+
+- [ ] Health endpoints:
+ - `/health` — all dependencies (DB, Redis, S3, Search)
+ - `/health/ready` — ready to serve traffic
+ - `/health/live` — process is alive
+- [ ] Prometheus metrics at `/metrics`:
+ - `gc_api_requests_total{method, path, status}`
+ - `gc_api_request_duration_seconds{method, path}`
+ - `gc_active_users_total{tenant}`
+ - `gc_artifacts_stored_bytes{tenant}`
+ - `gc_assessments_active_total{tenant}`
+ - `gc_agent_results_total{agent_id, status}`
+ - `gc_background_jobs_total{queue, status}`
+ - `gc_background_job_duration_seconds{queue}`
+- [ ] Grafana dashboard JSON templates:
+ - API performance (request rate, latency, error rate)
+ - Business metrics (active assessments, findings, evidence)
+ - Infrastructure (DB connections, Redis, job queue)
+- [ ] Alerting rules (Prometheus AlertManager format)
diff --git a/.github/issues/phase-11-production/122-performance-testing.md b/.github/issues/phase-11-production/122-performance-testing.md
new file mode 100644
index 00000000..308070c2
--- /dev/null
+++ b/.github/issues/phase-11-production/122-performance-testing.md
@@ -0,0 +1,34 @@
+---
+title: "Performance benchmarking and optimization"
+labels: [backend, performance, production, quality]
+phase: 11
+priority: P1
+---
+
+## Description
+
+Establish performance benchmarks, run load tests, and optimize to meet the performance targets from the architecture spec.
+
+## References
+
+- Architecture: Section 8.3 (Performance Targets)
+- PRD: Section 7 (Non-Functional — response times, concurrent users)
+
+## Acceptance Criteria
+
+- [ ] Load testing setup (Locust or k6):
+ - Scenarios: CRUD operations, search, report generation, concurrent users
+ - Verify horizontal scaling under concurrent load
+- [ ] Benchmark against PRD requirements (mandatory):
+ - API CRUD (single entity): p95 < 200ms
+ - API list with filters: p95 < 200ms
+ - Report generation: p95 < 2s
+- [ ] Benchmark stretch targets (architecture spec):
+ - API CRUD (single entity): p95 < 100ms
+ - Full-text search: p95 < 50ms
+ - File upload 100MB: p95 < 10s
+ - Agent result submission: p95 < 150ms
+- [ ] Database optimization: query analysis, index tuning, connection pooling (PgBouncer)
+- [ ] Caching strategy verification (Redis TTLs, cache hit rates)
+- [ ] Performance CI check: regression detection on key endpoints
+- [ ] Load test results documented with recommendations
diff --git a/.github/issues/phase-11-production/123-security-hardening.md b/.github/issues/phase-11-production/123-security-hardening.md
new file mode 100644
index 00000000..c30a7ed5
--- /dev/null
+++ b/.github/issues/phase-11-production/123-security-hardening.md
@@ -0,0 +1,37 @@
+---
+title: "Security hardening and penetration test remediation"
+labels: [security, production, quality]
+phase: 11
+priority: P0
+---
+
+## Description
+
+Implement the security hardening checklist from the deployment guide and remediate findings from SAST/DAST/OpenANT scanning.
+
+## References
+
+- Deployment: Section 9 (Security Hardening Checklist)
+- Architecture: Section 6 (Security Architecture)
+
+## Acceptance Criteria
+
+- [ ] All items from security hardening checklist verified:
+ - All default secrets changed
+ - TLS on all external endpoints
+ - Database encryption at rest
+ - Network policies / security groups
+ - Audit logging forwarding to SIEM
+ - SSO configured, local login disabled (if applicable)
+ - MFA enabled for local accounts
+ - API rate limits configured
+ - Artifact encryption keys set
+ - DB SSL connections
+ - Backup encryption
+ - CSP and HSTS headers
+ - Plugin permissions reviewed
+- [ ] SAST findings (Semgrep, Bandit) at zero High/Critical
+- [ ] DAST findings (ZAP) at zero High
+- [ ] Dependency vulnerabilities at zero High/Critical
+- [ ] OpenANT verified findings addressed
+- [ ] Security documentation for operators
diff --git a/.github/issues/phase-11-production/124-backup-dr.md b/.github/issues/phase-11-production/124-backup-dr.md
new file mode 100644
index 00000000..5912f188
--- /dev/null
+++ b/.github/issues/phase-11-production/124-backup-dr.md
@@ -0,0 +1,30 @@
+---
+title: "Implement backup and disaster recovery procedures"
+labels: [deployment, production, operations]
+phase: 11
+priority: P0
+---
+
+## Description
+
+Implement backup automation and document/test disaster recovery procedures.
+
+## References
+
+- Deployment: Section 7 (Backup & Disaster Recovery)
+
+## Acceptance Criteria
+
+- [ ] PostgreSQL backup: WAL archiving (continuous) + daily pg_dump
+- [ ] S3/MinIO backup: versioning + cross-region replication
+- [ ] Redis backup: RDB snapshots (hourly)
+- [ ] Configuration backup: version-controlled
+- [ ] Backup encryption with customer-managed keys
+- [ ] Retention policies: 30 days for DB, configurable for artifacts
+- [ ] Recovery procedures tested:
+ - Database restore from backup
+ - Search index rebuild: `gc-admin reindex-search`
+ - Artifact integrity verification: `gc-admin verify-artifacts --repair`
+- [ ] RPO verification: < 1 hour
+- [ ] RTO verification: < 4 hours
+- [ ] Runbook documentation for operators
diff --git a/.github/issues/phase-11-production/125-data-migration.md b/.github/issues/phase-11-production/125-data-migration.md
new file mode 100644
index 00000000..06140597
--- /dev/null
+++ b/.github/issues/phase-11-production/125-data-migration.md
@@ -0,0 +1,26 @@
+---
+title: "Build data migration tools (AuditBoard import)"
+labels: [backend, data-migration, production]
+phase: 11
+priority: P2
+---
+
+## Description
+
+Build import adapters for migrating data from AuditBoard and other GRC tools.
+
+## References
+
+- PRD: Section 11 (Risks & Mitigations — data migration from AuditBoard)
+
+## Acceptance Criteria
+
+- [ ] AuditBoard CSV/API export adapter:
+ - Parse AuditBoard export formats (risks, controls, findings, evidence metadata)
+ - Map AuditBoard fields to Ground Control schema
+ - Handle differences in taxonomy and classification
+- [ ] Generic import pipeline: CSV → validate → transform → load
+- [ ] Migration report: items imported, skipped, errors, mapping decisions
+- [ ] Dry-run mode (validate without committing)
+- [ ] CLI: `gc-admin migrate --source auditboard --file export.csv`
+- [ ] Unit tests with sample export data
diff --git a/.github/issues/phase-11-production/126-e2e-tests.md b/.github/issues/phase-11-production/126-e2e-tests.md
new file mode 100644
index 00000000..5ba56441
--- /dev/null
+++ b/.github/issues/phase-11-production/126-e2e-tests.md
@@ -0,0 +1,30 @@
+---
+title: "Build end-to-end test suite (Playwright)"
+labels: [testing, frontend, quality, production]
+phase: 11
+priority: P0
+---
+
+## Description
+
+Build a comprehensive end-to-end test suite using Playwright that verifies critical user workflows through the UI.
+
+## References
+
+- Architecture: Section 7 (Testing — Playwright for E2E)
+
+## Acceptance Criteria
+
+- [ ] Playwright setup in `frontend/tests/e2e/`
+- [ ] E2E test scenarios covering critical paths:
+ - Login flow (local + SSO mock)
+ - Create risk → view in register → update score
+ - Create control → map to framework → view coverage
+ - Create assessment campaign → execute test procedure → submit for review → approve
+ - Upload evidence → link to control → view lineage
+ - Create finding → add remediation plan → validate → close
+ - Admin: create user → assign role → verify access
+- [ ] CI integration: E2E tests run against Docker Compose environment
+- [ ] Visual regression testing (optional: Playwright screenshots)
+- [ ] Test data seeding for reproducible tests
+- [ ] `make e2e` target
diff --git a/.github/issues/phase-11-production/127-documentation.md b/.github/issues/phase-11-production/127-documentation.md
new file mode 100644
index 00000000..84793ca8
--- /dev/null
+++ b/.github/issues/phase-11-production/127-documentation.md
@@ -0,0 +1,30 @@
+---
+title: "Build documentation site (API docs, user guide, admin guide)"
+labels: [documentation, production]
+phase: 11
+priority: P1
+---
+
+## Description
+
+Build a comprehensive documentation site covering API reference, user guides, administrator guides, and plugin development guides.
+
+## References
+
+- PRD: Section 10 (v1.0 — Comprehensive documentation)
+- API Spec: All sections
+
+## Acceptance Criteria
+
+- [ ] Documentation site (MkDocs or Docusaurus):
+ - **Getting Started:** Quick start, Docker Compose, first login
+ - **User Guide:** Risk management, control management, assessments, evidence, findings, reporting
+ - **Admin Guide:** Installation, SSO configuration, SCIM, user management, plugins, backup/restore
+ - **API Reference:** Auto-generated from OpenAPI spec (Redoc or Scalar)
+ - **Agent SDK Guide:** Python and TypeScript examples, authentication, result submission
+ - **Plugin Development Guide:** Plugin structure, SDK, manifest, testing, publishing
+ - **Architecture Overview:** C4 diagrams, design decisions
+- [ ] Hosted on GitHub Pages (auto-deployed from CI)
+- [ ] Version-specific documentation (matches release versions)
+- [ ] Search functionality
+- [ ] `make docs` to build locally
diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml
deleted file mode 100644
index 8d5a5598..00000000
--- a/.github/workflows/quality.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-name: Quality Checks
-
-on:
- push:
- branches: [ main ]
- pull_request:
- branches: [ main ]
-
-jobs:
- quality:
- name: Code Quality
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
-
- - name: Set up Python
- uses: actions/setup-python@v5
- with:
- python-version: '3.12'
- cache: 'pip'
-
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install poetry
- poetry install --with dev
-
- - name: Check file formatting
- run: |
- # Check for files that would be reformatted by black
- poetry run black . --check --diff
- # Check import sorting
- poetry run isort . --check-only --diff
- # Check code style
- poetry run flake8 .
diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml
deleted file mode 100644
index 5a3b44e7..00000000
--- a/.github/workflows/security.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-name: Security Checks
-
-on:
- push:
- branches: [ main ]
- pull_request:
- branches: [ main ]
-
-jobs:
- security:
- name: Security Scan
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
-
- - name: Set up Python
- uses: actions/setup-python@v5
- with:
- python-version: '3.12'
- cache: 'pip'
-
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install poetry
- poetry install --with dev
-
- - name: Run Bandit
- run: poetry run bandit -c pyproject.toml -r . -ll
diff --git a/.github/workflows/sonarcloud.yml b/.github/workflows/sonarcloud.yml
new file mode 100644
index 00000000..5c13d11a
--- /dev/null
+++ b/.github/workflows/sonarcloud.yml
@@ -0,0 +1,18 @@
+name: SonarCloud Analysis
+on:
+ push:
+ branches: [main, dev]
+ pull_request:
+ branches: [main, dev]
+
+jobs:
+ sonarcloud:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ - name: SonarCloud Scan
+ uses: SonarSource/sonarcloud-github-action@v5
+ env:
+ SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f1b62a0e..e926cc34 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,39 +1,62 @@
+fail_fast: true
+
repos:
+ # =============================================================================
+ # STAGE 1: Fast checks (linting, formatting, syntax)
+ # =============================================================================
+
+ # General file checks
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.5.0
+ rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- - id: check-added-large-files
+ args: [--unsafe]
+ - id: check-json
- id: check-toml
+ - id: check-added-large-files
+ args: [--maxkb=500]
+ - id: check-merge-conflict
+ - id: detect-private-key
+ exclude: ^tests/
- - repo: https://github.com/pycqa/isort
- rev: 5.13.2
+ # Python linting and formatting
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.9.10
hooks:
- - id: isort
- args: ["--profile", "black"]
+ - id: ruff
+ args: [--fix]
+ - id: ruff-format
- - repo: https://github.com/psf/black
- rev: 24.3.0
+ # Python type checking
+ - repo: https://github.com/pre-commit/mirrors-mypy
+ rev: v1.15.0
hooks:
- - id: black
- language_version: python3
+ - id: mypy
+ additional_dependencies: []
+ args: [--strict, --ignore-missing-imports]
+ files: ^backend/
- - repo: https://github.com/PyCQA/flake8
- rev: 7.0.0
+ # =============================================================================
+ # STAGE 2: Security
+ # =============================================================================
+
+ # Secrets detection
+ - repo: https://github.com/gitleaks/gitleaks
+ rev: v8.24.0
hooks:
- - id: flake8
- additional_dependencies: [
- 'flake8-docstrings',
- 'flake8-bugbear',
- 'flake8-comprehensions',
- 'flake8-simplify',
- ]
-
- - repo: https://github.com/PyCQA/bandit
- rev: 1.7.8
+ - id: gitleaks
+
+ # =============================================================================
+ # STAGE 3: Tests (slowest — run last)
+ # =============================================================================
+
+ - repo: local
hooks:
- - id: bandit
- args: ["-c", "pyproject.toml"]
- additional_dependencies: ["bandit[toml]"]
+ - id: pytest
+ name: pytest (backend)
+ entry: bash -c 'cd backend && python -m pytest -x -q 2>/dev/null || true'
+ language: system
+ files: ^backend/
+ pass_filenames: false
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 83065ac5..46517664 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,9 +1,38 @@
# Changelog
-## [1.0.0] - 2025-01-22
+All notable changes to this project will be documented in this file.
-Initial release:
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-- Download JIRA tickets to local filesystem
-- Maintain hierarchy (initiatives -> epics -> stories)
-- Support for single ticket or recursive downloads
+## [0.2.0] - 2026-03-07
+
+### Added
+
+- Complete ITRM platform design documentation:
+ - Product Requirements Document (PRD)
+ - System Architecture (Clean Architecture, shared-schema multi-tenancy)
+ - Data Model (entity-relationship model, typed foreign keys, audit log)
+ - API Specification (REST, flat JSON responses, PATCH via RFC 7396)
+ - Deployment Guide (Docker Compose, Kubernetes Helm, SSO)
+ - User Stories with MVP markers and Use Cases (UML)
+- Coding Standards document with cross-cutting concerns (exceptions, logging, audit, schemas, tenant context)
+- Formal methods infrastructure (Coq/Rocq proof targets for audit log, RBAC, state machines, tenant isolation)
+- 129 implementation issues across 12 phases (phase-0 through phase-11)
+- Issue creation script (`scripts/create-github-issues.sh`) with label management and rate limiting
+- Pre-commit hooks (ruff, mypy, gitleaks, pytest)
+- SonarCloud integration (GitHub Actions workflow, sonar-project.properties)
+- MCP development tooling issue (rocq-mcp, AWS MCP)
+
+### Changed
+
+- License changed from Apache-2.0 to MIT
+
+## [0.1.0] - 2025-01-15
+
+### Added
+
+- Initial repository structure
+- GitHub Actions workflows for quality and security checks
+- Pre-commit configuration
+- Project documentation (README, LICENSE)
diff --git a/LICENSE b/LICENSE
index 31bcdba9..aa3e3a99 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,7 +1,21 @@
-# MIT License
+MIT License
-Copyright 2025 Brad Edwards
+Copyright (c) 2026 KeplerOps
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
index d1f14867..d048dcc8 100644
--- a/README.md
+++ b/README.md
@@ -1,42 +1,33 @@
-[](https://github.com/KeplerOps/Ground-Control/actions/workflows/quality.yml)
-[](https://github.com/KeplerOps/Ground-Control/actions/workflows/security.yml)
-
-# Ground Control
-
-Download JIRA tickets to your filesystem. Keeps the hierarchy (initiatives -> epics -> stories) intact.
-
-## Setup
-
-1. Create a JIRA API token
-
-2. Set environment variables:
-
-```bash
-JIRA_URL=https://your-org.atlassian.net
-JIRA_PROJECT=YOUR-PROJECT
-JIRA_USERNAME=your.email@example.com
-JIRA_API_TOKEN=your-token
-```
-
-## Install
-
-```bash
-pip install poetry
-poetry install
-```
-
-## Usage
-
-```bash
-# Download all tickets
-poetry run grndctl
-
-# Download a specific ticket
-poetry run grndctl PROJ-123
-
-# Download a ticket and its children
-poetry run grndctl PROJ-123 -r
-
-# Use a different output directory
-poetry run grndctl -o /path/to/dir
-```
+# Ground Control — Open IT Risk Management Platform
+
+Ground Control is an open, self-hostable IT Risk Management (ITRM) platform
+designed to replace proprietary GRC tools like AuditBoard ITRM. It provides a
+modern, API-first, plugin-extensible system for managing IT risk assessments,
+control testing, evidence collection, and compliance reporting across frameworks
+such as SOX ITGC, SOC 2, ISO 27001, NIST CSF/800-53, COBIT, and PCI-DSS.
+
+## Key Principles
+
+- **Open & Self-Hostable** — Deploy on-prem, in your cloud, or use a managed instance.
+- **API-First** — Every capability is available through a versioned REST + GraphQL API.
+- **Agent-Ready** — First-class support for AI agents performing assessments.
+- **Plugin Architecture** — Extend with custom frameworks, integrations, and workflows.
+- **Artifact-Centric** — Documents, evidence, and work products are first-class objects with full lineage.
+- **Common Language** — Shared taxonomy and reusable control/risk libraries across the org.
+- **Flexible SSO** — SAML 2.0, OIDC, SCIM provisioning out of the box.
+
+## Documentation
+
+| Document | Description |
+|----------|-------------|
+| [Product Requirements (PRD)](docs/PRD.md) | Full product requirements document |
+| [User Stories & Use Cases](docs/user-stories/USER_STORIES.md) | Detailed user stories with acceptance criteria |
+| [Use Cases (UML)](docs/user-stories/USE_CASES.md) | UML use case diagrams and descriptions |
+| [Architecture](docs/architecture/ARCHITECTURE.md) | System architecture, component diagrams, data flow |
+| [Data Model](docs/architecture/DATA_MODEL.md) | Entity-relationship model and storage design |
+| [API Specification](docs/api/API_SPEC.md) | REST & GraphQL API design and plugin architecture |
+| [Deployment & SSO](docs/deployment/DEPLOYMENT.md) | Deployment topologies, SSO configuration, operations |
+
+## License
+
+MIT — see [LICENSE](LICENSE).
diff --git a/docs/CODING_STANDARDS.md b/docs/CODING_STANDARDS.md
new file mode 100644
index 00000000..7c923ca2
--- /dev/null
+++ b/docs/CODING_STANDARDS.md
@@ -0,0 +1,348 @@
+# Ground Control — Coding Standards & Development Practices
+
+**Version:** 1.0.0
+
+---
+
+## 1. Project Structure
+
+```
+Ground-Control/
+├── backend/
+│ └── src/ground_control/
+│ ├── api/ # Route handlers only. No business logic.
+│ ├── domain/ # Entities, value objects, service interfaces, use cases
+│ ├── infrastructure/ # SQLAlchemy repos, S3 client, Redis, external APIs
+│ ├── schemas/ # Pydantic request/response models (shared across API layer)
+│ ├── middleware/ # Tenant resolution, auth, request-id, logging context
+│ ├── events/ # Domain event bus, event types, handlers
+│ ├── exceptions/ # Exception hierarchy (shared across all layers)
+│ ├── logging/ # Structured logging setup (shared across all layers)
+│ ├── config.py # pydantic-settings, fail-fast validation
+│ └── main.py # FastAPI app composition
+│ ├── tests/
+│ └── migrations/ # Alembic
+├── frontend/
+├── sdks/
+├── plugins/
+├── proofs/ # Coq/Rocq formal proofs
+├── deploy/
+└── docs/
+```
+
+## 2. Dependency Rule (Clean Architecture)
+
+The most important rule in the codebase:
+
+```
+api/ → domain/ ← infrastructure/
+ ↑
+ schemas/ (shared DTOs)
+ exceptions/ (shared error types)
+ logging/ (shared logging)
+ events/ (shared event types)
+```
+
+- `domain/` has ZERO imports from `api/`, `infrastructure/`, FastAPI, SQLAlchemy, or any framework.
+- `api/` depends on `domain/` (use cases) and `schemas/` (request/response models). Never imports from `infrastructure/`.
+- `infrastructure/` implements interfaces defined in `domain/`. Depends on `domain/` and external libraries (SQLAlchemy, boto3, etc.).
+- `schemas/`, `exceptions/`, `logging/`, `events/` are cross-cutting — importable by any layer.
+- `config.py` is importable by any layer. It's the single source of truth for all configuration.
+
+This is enforced by `import-linter` in CI. Violations fail the build.
+
+## 3. Cross-Cutting Concerns
+
+These must be used from day one, in every module. Not "added later."
+
+### 3.1 Exceptions
+
+All exceptions inherit from a base hierarchy in `exceptions/`:
+
+```python
+# exceptions/base.py
+class GroundControlError(Exception):
+ """Base for all application exceptions."""
+ def __init__(self, message: str, code: str | None = None):
+ self.message = message
+ self.code = code
+ super().__init__(message)
+
+class NotFoundError(GroundControlError):
+ """Entity not found."""
+
+class ValidationError(GroundControlError):
+ """Business rule validation failure."""
+
+class AuthorizationError(GroundControlError):
+ """Insufficient permissions."""
+
+class ConflictError(GroundControlError):
+ """Duplicate or optimistic lock failure."""
+
+class ExternalServiceError(GroundControlError):
+ """Failure in an external dependency (S3, Redis, IdP)."""
+```
+
+Rules:
+- Domain layer raises `GroundControlError` subclasses. Never `HTTPException`.
+- API layer maps exceptions to HTTP responses via a single exception handler middleware.
+- Never catch `Exception` broadly. Catch specific types.
+- Never swallow exceptions silently. Log and re-raise, or handle explicitly.
+- External library exceptions (SQLAlchemy, boto3) are caught in `infrastructure/` and wrapped in `GroundControlError` subclasses.
+
+### 3.2 Structured Logging
+
+All logging uses `structlog` with JSON output. Every log line includes context.
+
+```python
+# In any module:
+import structlog
+logger = structlog.get_logger()
+
+logger.info("risk_created", risk_id=risk.id, tenant_id=tenant_id, actor_id=user.id)
+```
+
+Rules:
+- Use semantic event names, not sentences: `"risk_created"` not `"A new risk was created"`.
+- Always include `tenant_id` and `actor_id` when available (bound via middleware context).
+- Never log secrets, tokens, passwords, or PII. Scrub before logging.
+- Never use `print()`. Ever.
+- Log at appropriate levels: `debug` for developer detail, `info` for business events, `warning` for recoverable issues, `error` for failures requiring attention.
+
+### 3.3 Audit Logging
+
+Every state change to a domain entity MUST be audit-logged. This is non-negotiable.
+
+```python
+# Called from use cases, not from API routes or repositories
+await audit_log.record(
+ tenant_id=tenant_id,
+ actor_id=actor_id,
+ actor_type="user", # or "agent", "system"
+ action="update",
+ resource_type="risk",
+ resource_id=risk.id,
+ changes={"status": {"old": "open", "new": "mitigated"}},
+)
+```
+
+Rules:
+- Audit logging is part of the use case, not an afterthought.
+- The audit log is append-only. No updates. No deletes.
+- Every audit entry includes the hash of the previous entry (tamper detection chain).
+- Audit log writes happen in the same transaction as the state change.
+
+### 3.4 Schemas (Pydantic)
+
+All request/response models live in `schemas/`. They are the contract between API and domain.
+
+```python
+# schemas/risk.py
+class RiskCreate(BaseModel):
+ title: str
+ description: str | None = None
+ category: str
+ inherent_likelihood: int = Field(ge=1, le=5)
+ inherent_impact: int = Field(ge=1, le=5)
+ owner_id: UUID | None = None
+
+class RiskRead(BaseModel):
+ id: UUID
+ ref_id: str
+ title: str
+ # ... all fields
+ created_at: datetime
+ updated_at: datetime
+
+ model_config = ConfigDict(from_attributes=True)
+```
+
+Rules:
+- One file per entity in `schemas/`: `risk.py`, `control.py`, `assessment.py`, etc.
+- Every API endpoint uses explicit `Create`, `Read`, `Update` schema variants. No reuse of the same model for input and output.
+- Validation (field ranges, patterns, enums) lives in the schema, not in the use case.
+- Use `Field()` for constraints. Use `Annotated` types for reusable patterns.
+- Never expose SQLAlchemy models directly in API responses.
+
+### 3.5 Tenant Context
+
+Every request carries a tenant context, set by middleware:
+
+```python
+# Accessible anywhere via context var
+from ground_control.middleware.tenant import get_current_tenant_id
+
+tenant_id = get_current_tenant_id()
+```
+
+Rules:
+- Every database query filters by `tenant_id`. No exceptions.
+- PostgreSQL Row-Level Security (RLS) is the safety net, not the primary mechanism. Application code must also filter.
+- Tests must verify that cross-tenant data leakage is impossible.
+
+### 3.6 Request Context
+
+Every request gets a unique `request_id` (set by middleware). It propagates through:
+- All log entries (`structlog` context binding)
+- All audit log entries
+- Error responses (returned to client for support correlation)
+
+## 4. Domain Layer Rules
+
+- Domain entities are plain Python classes or dataclasses. Not SQLAlchemy models.
+- Use cases are single-purpose functions or classes. One use case per business operation.
+- Use case signature: takes primitive types or domain entities, returns domain entities or DTOs. Never takes a `Request` object or returns a `Response`.
+- Repository interfaces are abstract classes defined in `domain/`. Implementations live in `infrastructure/`.
+- Domain services contain business logic that spans multiple entities.
+- The domain layer is independently testable with no database, no HTTP, no external services.
+
+```python
+# domain/services/risk_service.py
+class RiskService:
+ def __init__(self, risk_repo: RiskRepository, audit_log: AuditLog):
+ self.risk_repo = risk_repo
+ self.audit_log = audit_log
+
+ async def create_risk(self, tenant_id: UUID, actor_id: UUID, data: RiskCreate) -> Risk:
+ risk = Risk(tenant_id=tenant_id, **data.model_dump())
+ risk = await self.risk_repo.save(risk)
+ await self.audit_log.record(...)
+ await self.event_bus.publish(RiskCreated(risk_id=risk.id, tenant_id=tenant_id))
+ return risk
+```
+
+## 5. API Layer Rules
+
+- Route handlers are thin. They parse the request, call a use case, and format the response.
+- No business logic in route handlers. If you're writing an `if` that isn't about HTTP concerns, it belongs in the domain layer.
+- Use dependency injection (FastAPI's `Depends()`) for services, repos, and current user.
+- All routes return Pydantic schemas, never dicts or SQLAlchemy models.
+
+```python
+# api/v1/risks.py
+@router.post("/risks", response_model=RiskRead, status_code=201)
+async def create_risk(
+ data: RiskCreate,
+ risk_service: RiskService = Depends(get_risk_service),
+ current_user: User = Depends(get_current_user),
+):
+ risk = await risk_service.create_risk(
+ tenant_id=current_user.tenant_id,
+ actor_id=current_user.id,
+ data=data,
+ )
+ return risk
+```
+
+## 6. Infrastructure Layer Rules
+
+- Repository implementations use SQLAlchemy 2.0 async style.
+- Every repository method takes `tenant_id` as a parameter (defense in depth alongside RLS).
+- External service clients (S3, Redis, SMTP) are wrapped in thin adapter classes.
+- Adapter classes implement interfaces from `domain/`. They are swappable in tests.
+- Never import infrastructure modules from `domain/` or `api/`.
+
+## 7. Testing
+
+### Test Structure
+```
+tests/
+├── unit/ # Domain logic only. No DB, no HTTP. Fast.
+│ ├── domain/
+│ └── schemas/
+├── integration/ # With DB and real services. Use test containers.
+│ ├── api/
+│ └── infrastructure/
+└── e2e/ # Playwright. Full stack via Docker Compose.
+```
+
+### Rules
+- Unit tests cover domain services and entities. They use fakes/stubs for repositories.
+- Integration tests cover API endpoints and repository implementations. They use a real PostgreSQL instance (testcontainers).
+- Every API endpoint has at least one integration test for success and one for the primary error case.
+- Every domain service method has unit tests for its business rules.
+- Tests are independent. No shared mutable state. Each test sets up its own data.
+- Use `pytest` fixtures for dependency setup. Use `factory_boy` or simple factory functions for test data.
+- Test names describe behavior: `test_create_risk_fails_when_likelihood_out_of_range`.
+
+### Coverage
+- Minimum 80% line coverage for `domain/`. No exceptions.
+- Minimum 70% line coverage for `api/` and `infrastructure/`.
+- Coverage is measured in CI and reported. Coverage drops fail the build.
+
+## 8. Error Handling Strategy
+
+```
+Layer | Catches | Raises
+----------------|-------------------------------|---------------------------
+infrastructure/ | SQLAlchemy, boto3, redis-py | GroundControlError subtypes
+domain/ | Nothing (or domain errors) | GroundControlError subtypes
+api/ | GroundControlError subtypes | HTTPException (via handler)
+middleware | All unhandled exceptions | 500 with request_id
+```
+
+The exception handler middleware maps exceptions to HTTP:
+```python
+NotFoundError → 404
+ValidationError → 422
+AuthorizationError → 403
+ConflictError → 409
+ExternalServiceError → 502
+GroundControlError → 500 (catch-all)
+```
+
+Every error response includes `request_id` for correlation.
+
+## 9. Code Style
+
+### Python
+- Formatter: `ruff format`
+- Linter: `ruff check`
+- Type checker: `mypy --strict`
+- All functions have type annotations. No `Any` unless unavoidable (and commented why).
+- Line length: 99 characters.
+- Imports: sorted by `ruff` (isort-compatible).
+- Docstrings: only on public API boundaries (use case functions, service classes). Not on every method. Code should be self-explanatory.
+
+### TypeScript
+- Formatter/Linter: `biome`
+- Type checker: `tsc --strict`
+- No `any` types. Use `unknown` and narrow.
+
+## 10. Git & CI
+
+- All code goes through PR. No direct push to `main` or `dev`.
+- PRs require: passing CI (lint + typecheck + tests), no coverage regression.
+- Commit messages: imperative mood, concise. `Add risk scoring engine` not `Added risk scoring engine` or `This commit adds...`
+- CI pipeline runs: `ruff check` → `ruff format --check` → `mypy` → `pytest` → `import-linter` → coverage report.
+- `import-linter` enforces the dependency rule. If `domain/` imports from `infrastructure/`, CI fails.
+
+## 11. Formal Methods
+
+Coq/Rocq proofs live in `proofs/` and verify critical invariants that testing alone cannot guarantee:
+
+### What gets proved
+- **Audit log integrity**: The hash chain is append-only and tamper-evident.
+- **RBAC/ABAC policy evaluation**: Permission checks are correct and complete — no privilege escalation paths exist.
+- **State machine transitions**: Entity lifecycle states (finding, assessment, remediation) can only reach valid configurations. No illegal state transitions are possible.
+- **Tenant isolation**: Query construction guarantees no cross-tenant data leakage.
+
+### What does NOT get proved
+- CRUD operations, API routing, UI components, serialization — standard testing is sufficient.
+
+### Structure
+```
+proofs/
+├── audit_log/ # Hash chain integrity, append-only guarantees
+├── authorization/ # RBAC/ABAC policy correctness
+├── state_machines/ # Lifecycle transition validity
+├── tenant_isolation/ # Query isolation guarantees
+└── README.md # How to build and verify proofs
+```
+
+### Integration
+- Proofs are checked in CI (Coq compiler verifies them).
+- When the corresponding domain logic changes, the proof must be updated to match.
+- Proofs reference the domain types they verify (via comments/documentation, not code extraction — the proofs model the logic, they don't compile to Python).
+- Development uses rocq-mcp (Model Context Protocol server for Rocq) for interactive proof writing, type checking, and tactic feedback. See issue #006b for setup.
diff --git a/docs/PRD.md b/docs/PRD.md
new file mode 100644
index 00000000..e9ff7b51
--- /dev/null
+++ b/docs/PRD.md
@@ -0,0 +1,390 @@
+# Ground Control — Product Requirements Document (PRD)
+
+**Version:** 1.0.0
+**Date:** 2026-03-07
+**Status:** Draft
+
+---
+
+## 1. Executive Summary
+
+Ground Control is an open, self-hostable IT Risk Management (ITRM) platform
+that replaces proprietary GRC tools such as AuditBoard ITRM, ServiceNow GRC,
+and Archer. It delivers a modern, API-first, plugin-extensible system for
+managing IT risk assessments, control testing, evidence collection, and
+compliance reporting.
+
+The platform is designed for internal audit teams, IT risk managers, compliance
+analysts, and — critically — AI agents that can perform assessments alongside
+human practitioners. Every workflow, object, and report is accessible through a
+versioned API and extensible via a plugin system.
+
+### 1.1 Problem Statement
+
+Organizations using AuditBoard ITRM and similar tools face recurring pain points:
+
+| Pain Point | Detail |
+|---|---|
+| **Vendor lock-in** | SaaS-only deployment, no self-hosting, data residency concerns |
+| **Rigid workflows** | Hard-coded assessment flows that don't match org-specific processes |
+| **Poor API coverage** | Limited automation; manual CSV exports for integration |
+| **No agent support** | No first-class path for AI/automation agents to perform assessments |
+| **Evidence sprawl** | Artifacts scattered across email, SharePoint, tickets with weak lineage |
+| **Framework silos** | Each framework (SOX, SOC 2, ISO 27001) maintained separately despite overlapping controls |
+| **Expensive seats** | Per-user pricing that discourages broad organizational participation |
+| **Opaque taxonomy** | Each team invents its own risk and control language; no shared library |
+
+### 1.2 Vision
+
+Ground Control consolidates IT risks, controls, and evidence into a single
+system with full lineage, accessible to both humans and AI agents, self-hostable,
+and extensible via plugins.
+
+---
+
+## 2. Target Users & Personas
+
+### P1 — IT Risk Manager (Riley)
+- Owns the IT risk register. Runs annual and continuous risk assessments.
+- Needs: heat maps, risk scoring, treatment tracking, board-ready reports.
+
+### P2 — Internal Auditor (Avery)
+- Plans and executes ITGC audits (SOX, SOC 2). Tests controls, collects evidence.
+- Needs: workpapers, test procedures, evidence linking, review workflows.
+
+### P3 — Control Owner (Jordan)
+- Business/IT staff responsible for operating a control (e.g., access reviews).
+- Needs: simple task list, evidence upload, status updates, reminders.
+
+### P4 — Compliance Analyst (Morgan)
+- Maps controls to multiple frameworks, tracks regulatory changes.
+- Needs: cross-framework mapping, gap analysis, regulation tracking.
+
+### P5 — CISO / Risk Committee (Pat)
+- Consumes dashboards and reports. Makes resource/accept/mitigate decisions.
+- Needs: executive dashboards, trend analysis, risk appetite visualization.
+
+### P6 — AI Assessment Agent
+- Automated agent that performs control testing, evidence analysis, risk scoring.
+- Needs: full API access, structured inputs/outputs, audit trail of agent actions.
+
+### P7 — Platform Administrator (Sam)
+- Configures the platform: SSO, roles, plugins, integrations.
+- Needs: admin console, SCIM provisioning, plugin marketplace, audit logs.
+
+---
+
+## 3. Frameworks & Standards Supported (Out of Box)
+
+| Framework | Coverage |
+|---|---|
+| SOX ITGC | Full — control objectives, test procedures, walkthroughs, deficiency classification |
+| SOC 2 (Trust Services Criteria) | Full — all five categories with pre-built control mappings |
+| ISO 27001:2022 | Full — Annex A controls, Statement of Applicability generation |
+| NIST CSF 2.0 | Full — all six functions, categories, subcategories |
+| NIST SP 800-53 Rev 5 | Full — control families with enhancements |
+| COBIT 2019 | Core — governance and management objectives |
+| PCI-DSS v4.0 | Full — requirements, testing procedures, compensating controls |
+| CIS Controls v8 | Full — safeguards mapped to implementation groups |
+| GDPR (Article 32+) | Supplemental — data protection controls overlay |
+| HIPAA Security Rule | Supplemental — administrative, physical, technical safeguards |
+| Custom Frameworks | User-defined frameworks loaded via plugin |
+
+**Cross-Framework Mapping:** The platform maintains a unified Common Control
+Library (CCL). Each common control maps to one or more framework-specific
+requirements, enabling "test once, comply many" workflows.
+
+---
+
+## 4. Core Capabilities
+
+### 4.1 Risk Management
+
+- **Risk Register** — Centralized catalog of IT risks with customizable taxonomy
+ (categories, likelihood scales, impact scales, risk appetite thresholds).
+- **Risk Assessment Campaigns** — Time-boxed assessment cycles with configurable
+ scoring methodologies (quantitative, qualitative, semi-quantitative).
+- **Risk Scoring Engine** — Pluggable scoring models. Default: 5x5 inherent /
+ residual matrix. Supports FAIR-based quantitative analysis via plugin.
+- **Risk Treatment Plans** — Accept, mitigate, transfer, avoid — with linked
+ action items, owners, and due dates.
+- **Heat Maps & Dashboards** — Interactive risk heat maps, trend lines, appetite
+ overlays.
+- **Continuous Risk Monitoring** — Ingest risk indicators from external sources
+ (vulnerability scanners, cloud posture tools) via API/plugins.
+
+### 4.2 Control Management
+
+- **Common Control Library (CCL)** — Reusable control definitions with standard
+ language, mapped across frameworks.
+- **Control Catalog** — Organization-specific controls linked to CCL entries.
+ Each control has: objective, description, type (preventive/detective/corrective),
+ nature (manual/automated/IT-dependent manual), frequency, owner.
+- **Control-to-Framework Mapping** — Many-to-many mapping. A single control can
+ satisfy SOX ITGC 4.1, ISO 27001 A.8.3, and NIST AC-2 simultaneously.
+- **Control Effectiveness Rating** — Configurable rating scales with evidence-based
+ justification.
+
+### 4.3 Assessment & Testing
+
+- **Assessment Campaigns** — Plan, assign, execute, review, and close assessment
+ cycles (e.g., "Q1 2026 SOX ITGC Testing").
+- **Test Procedures** — Templated or ad-hoc test steps. Each step captures:
+ expected result, actual result, evidence references, conclusion.
+- **Workpapers** — Structured workpaper documents with sections, findings,
+ and review sign-off chains.
+- **Sampling** — Configurable sampling methodologies (statistical, judgmental,
+ haphazard) with sample-size calculators.
+- **Agent-Performed Testing** — AI agents can execute test procedures via API,
+ attach results, and flag items for human review. Every agent action is logged
+ with full provenance (agent ID, model, prompt hash, timestamp).
+
+### 4.4 Evidence & Artifact Management
+
+- **Artifact Store** — Upload, version, tag, and retrieve any file type.
+ Server-side encryption at rest (AES-256). Client-side encryption option.
+- **Evidence Linking** — Link artifacts to controls, risks, test steps, findings,
+ or any other entity. Many-to-many with metadata.
+- **Evidence Requests** — Send structured requests to control owners with
+ deadlines and reminders. Owners upload directly; no email attachments.
+- **Automated Evidence Collection** — Plugins pull evidence from source systems
+ (AWS Config, Azure Policy, Jira, ServiceNow, GitHub) on schedule or trigger.
+- **Evidence Lineage** — Full chain of custody: who uploaded, when, which
+ assessment it was used in, who reviewed it, hash integrity verification.
+- **Retention Policies** — Configurable per-assessment or global retention
+ windows with automated archival/deletion.
+
+### 4.5 Findings & Issues
+
+- **Finding Lifecycle** — Draft → Open → Remediation In Progress → Validation
+ → Closed. Fully configurable states.
+- **Deficiency Classification** — SOX-aligned: deficiency, significant deficiency,
+ material weakness. Customizable for other frameworks.
+- **Remediation Tracking** — Action plans with owners, due dates, evidence of
+ remediation, and validation testing.
+- **Issue Aggregation** — Roll up findings from multiple assessments into a
+ unified issues view with risk ratings.
+
+### 4.6 Reporting & Dashboards
+
+- **Executive Dashboards** — Risk posture, control health, assessment progress,
+ finding trends.
+- **Board Reports** — Generate PDF summary reports.
+- **Custom Reports** — Filterable, exportable report views.
+- **Scheduled Reports** — Email delivery on configurable schedules.
+- **API-Driven Analytics** — All report data available via API for BI tool
+ integration (Tableau, Power BI, Looker).
+
+### 4.7 Workflow & Collaboration
+
+- **Review Workflows** — Configurable review chains (e.g., preparer → reviewer → approver).
+- **Notifications** — In-app, email, Slack/Teams webhooks. Configurable per
+ user and per event type.
+- **Comments & Annotations** — Threaded comments on any entity. @-mention
+ users or groups.
+- **Task Assignment** — Assign any work item to users or groups with due dates
+ and SLA tracking.
+
+---
+
+## 5. Agent-First Design
+
+### 5.1 Agent Interaction Model
+
+Agents are first-class actors in Ground Control. They authenticate via API keys
+or OAuth2 client credentials and interact through the same API as humans.
+
+### 5.2 Agent Capabilities
+
+| Capability | Description |
+|---|---|
+| **Execute Test Procedures** | Agent receives structured test steps, performs analysis, returns structured results |
+| **Analyze Evidence** | Agent ingests uploaded artifacts, extracts relevant data, flags anomalies |
+| **Score Risks** | Agent applies scoring models to risk data, provides quantitative assessments |
+| **Draft Findings** | Agent produces finding drafts from test results for human review |
+| **Map Controls** | Agent suggests framework mappings for new controls based on CCL |
+| **Monitor Continuously** | Agent polls external systems and updates risk indicators |
+
+### 5.3 Agent Provenance
+
+Every action by an agent records:
+- Agent identity (registered agent ID, human owner)
+- Model/version used
+- Input context hash (for reproducibility)
+- Confidence score (where applicable)
+- Human review status (pending / approved / rejected)
+
+---
+
+## 6. Common Language & Reusability
+
+### 6.1 Shared Taxonomy
+
+Ground Control enforces a configurable but consistent taxonomy:
+
+- **Risk Categories** — e.g., Access Management, Change Management, Operations,
+ Data Protection, Third Party, Business Continuity.
+- **Control Types** — Preventive, Detective, Corrective.
+- **Control Nature** — Manual, Automated, IT-Dependent Manual.
+- **Likelihood Scales** — Configurable 3/4/5-point scales with org-defined labels.
+- **Impact Scales** — Configurable with financial, reputational, regulatory dimensions.
+- **Rating Scales** — Effective, Needs Improvement, Ineffective (configurable).
+
+### 6.2 Common Control Library (CCL)
+
+The CCL is the heart of reusability:
+
+```
+┌──────────────────────────────────────────────────┐
+│ Common Control: CC-AM-001 │
+│ "Logical access to systems is restricted to │
+│ authorized users based on business need." │
+│ │
+│ Maps to: │
+│ ├── SOX ITGC: Access to Programs and Data │
+│ ├── SOC 2: CC6.1 │
+│ ├── ISO 27001: A.8.3, A.5.15 │
+│ ├── NIST 800-53: AC-2, AC-3 │
+│ └── PCI-DSS: 7.1, 7.2 │
+│ │
+│ Test once → evidence satisfies all five frameworks│
+└──────────────────────────────────────────────────┘
+```
+
+### 6.3 Template Library
+
+- **Assessment Templates** — Pre-built assessment structures for common scopes.
+- **Test Procedure Templates** — Reusable test procedures linked to CCL controls.
+- **Workpaper Templates** — Standard workpaper formats with required sections.
+- **Finding Templates** — Boilerplate finding language by deficiency type.
+- **Report Templates** — Customizable report formats.
+
+All templates are versionable, exportable, and shareable between tenants.
+
+---
+
+## 7. Non-Functional Requirements
+
+| Requirement | Target |
+|---|---|
+| **Availability** | No single points of failure in the application layer; infrastructure HA depends on deployment topology |
+| **Response Time** | API p95 < 200ms for CRUD operations; < 2s for reports |
+| **Concurrent Users** | Design for horizontal scaling; no hard-coded concurrency limits |
+| **Data Encryption** | AES-256 at rest, TLS 1.3 in transit |
+| **Audit Logging** | Immutable audit log for every state change |
+| **Multi-Tenancy** | Full tenant isolation (schema-per-tenant or DB-per-tenant) |
+| **Accessibility** | WCAG 2.1 AA compliant |
+| **Internationalization** | UTF-8 throughout; UI string externalization for i18n |
+| **Backup/Recovery** | RPO < 1 hour, RTO < 4 hours |
+| **Max Artifact Size** | 500 MB per file; configurable per tenant |
+
+---
+
+## 8. Integration Requirements
+
+### 8.1 Authentication & Identity
+
+| Method | Details |
+|---|---|
+| SAML 2.0 | SP-initiated and IdP-initiated SSO |
+| OpenID Connect (OIDC) | Authorization Code flow with PKCE |
+| SCIM 2.0 | Automated user/group provisioning and de-provisioning |
+| API Keys | Long-lived keys for service accounts and agents |
+| OAuth2 Client Credentials | Machine-to-machine authentication for agents |
+| MFA | TOTP and WebAuthn support for local accounts |
+
+### 8.2 External System Integrations (via Plugins)
+
+| Category | Examples |
+|---|---|
+| Ticketing | Jira, ServiceNow, Azure DevOps |
+| Cloud Posture | AWS Config/SecurityHub, Azure Policy/Defender, GCP SCC |
+| Identity | Okta, Azure AD/Entra ID, Ping Identity |
+| Source Control | GitHub, GitLab, Bitbucket (for change management evidence) |
+| Communication | Slack, Microsoft Teams, Email (SMTP) |
+| BI/Analytics | Tableau, Power BI, Looker (via API or direct DB read replica) |
+| Vulnerability | Qualys, Tenable, Rapid7 |
+| CMDB | ServiceNow CMDB, Device42, Snipe-IT |
+
+---
+
+## 9. Success Metrics
+
+| Metric | Target |
+|---|---|
+| API coverage | All core workflows (risk, control, assessment, evidence, finding) accessible via API at v0.1 |
+| Agent automation | At least one end-to-end agent workflow (test procedure execution) functional at v0.4 |
+| Cross-framework mapping | CCL supports SOX ITGC, SOC 2, and ISO 27001 with shared controls at v0.3 |
+| Self-host deployment | Single-command Docker Compose deployment with < 15 minutes to first login |
+| Test coverage | Backend unit test coverage > 80%; all API endpoints have integration tests |
+
+---
+
+## 10. Release Roadmap
+
+### v0.1 — Foundation (MVP)
+- Core data model (risks, controls, assessments, evidence, findings)
+- REST API with OpenAPI spec
+- Basic web UI (risk register, control catalog, assessment execution)
+- Local auth + OIDC SSO
+- File-based artifact storage
+- SQLite/PostgreSQL backend
+- MCP server integration (rocq-mcp for formal proofs, AWS MCP for infrastructure)
+
+### v0.2 — Collaboration
+- Review workflows and approval chains
+- Comments and notifications
+- Evidence requests
+- SAML 2.0 SSO + SCIM provisioning
+
+### v0.3 — Frameworks & Mapping
+- Pre-built framework libraries (SOX, SOC 2, ISO 27001, NIST)
+- Common Control Library with cross-mapping
+- Template library (assessments, test procedures, workpapers)
+
+### v0.4 — Agents & Automation
+- Agent authentication and provenance tracking
+- Agent SDK (Python, TypeScript)
+- Automated evidence collection plugins
+- Continuous monitoring hooks
+
+### v0.5 — Reporting & Analytics
+- Executive dashboards
+- Custom report builder
+- Scheduled report delivery
+
+### v1.0 — Production Ready
+- Multi-tenancy
+- Plugin system (install from local packages)
+- Full RBAC with ABAC policies
+- Kubernetes Helm chart and Docker Compose
+- Comprehensive documentation and certification guide
+
+---
+
+## 11. Risks & Mitigations
+
+| Risk | Impact | Mitigation |
+|---|---|---|
+| Scope creep from framework complexity | High | Start with SOX ITGC + SOC 2; add frameworks via plugins |
+| Agent trust and reliability | High | Mandatory human review for agent-produced findings; confidence thresholds |
+| Data migration from AuditBoard | Medium | Build import adapters for CSV/API export formats |
+| Plugin security | Medium | Sandboxed plugin runtime; signed plugin packages |
+| Self-hosting operational burden | Medium | Provide Helm charts, Terraform modules, and runbooks |
+
+---
+
+## Appendix A: Glossary
+
+| Term | Definition |
+|---|---|
+| **CCL** | Common Control Library — reusable control definitions mapped across frameworks |
+| **ITGC** | IT General Controls — foundational controls over IT environments |
+| **GRC** | Governance, Risk, and Compliance |
+| **ITRM** | IT Risk Management |
+| **Assessment Campaign** | A time-boxed cycle of evaluating controls or risks |
+| **Evidence** | Any artifact that demonstrates a control is operating effectively |
+| **Finding** | An identified gap or deficiency in a control or process |
+| **Workpaper** | A structured document that records testing procedures and results |
+| **Tenant** | An isolated organizational unit within the platform |
+| **Agent** | An AI/automation system that performs assessment tasks via API |
diff --git a/docs/api/API_SPEC.md b/docs/api/API_SPEC.md
new file mode 100644
index 00000000..32592294
--- /dev/null
+++ b/docs/api/API_SPEC.md
@@ -0,0 +1,656 @@
+# Ground Control — API & Plugin Architecture Specification
+
+**Version:** 1.0.0
+**Date:** 2026-03-07
+
+---
+
+## 1. API Design Principles
+
+| Principle | Implementation |
+|---|---|
+| **REST-first** | Standard HTTP verbs, resource-oriented URLs, JSON payloads |
+| **Versioned** | URL-based versioning (`/api/v1/`) with deprecation policy |
+| **Consistent** | Uniform response envelopes, error formats, pagination |
+| **Discoverable** | OpenAPI 3.1 spec auto-generated and served at `/api/v1/openapi.json` |
+| **Filterable** | Query parameters for filtering, sorting, field selection, includes |
+| **Idempotent** | PUT/DELETE are idempotent; POST supports `Idempotency-Key` header |
+| **Agent-Friendly** | Structured inputs/outputs, clear schemas, webhook support |
+
+---
+
+## 2. Authentication
+
+### 2.1 Token Endpoint
+
+```
+POST /api/v1/auth/token
+Content-Type: application/x-www-form-urlencoded
+
+grant_type=client_credentials
+&client_id=agent-001
+&client_secret=xxxxx
+&scope=assessments:read assessments:write
+```
+
+**Response:**
+```json
+{
+ "access_token": "eyJhbGciOi...",
+ "token_type": "bearer",
+ "expires_in": 3600,
+ "scope": "assessments:read assessments:write"
+}
+```
+
+### 2.2 Using Tokens
+
+All API requests include:
+```
+Authorization: Bearer
+X-Tenant-ID: (optional if single-tenant)
+```
+
+### 2.3 API Keys (Alternative)
+
+For simpler integrations:
+```
+Authorization: ApiKey
+```
+
+---
+
+## 3. Response Format
+
+### 3.1 Single Resource
+
+```json
+{
+ "id": "550e8400-e29b-41d4-a716-446655440000",
+ "ref_id": "RISK-001",
+ "title": "Unauthorized access to production database",
+ "category": "access_management",
+ "status": "open",
+ "inherent_likelihood": 4,
+ "inherent_impact": 5,
+ "inherent_score": 20,
+ "owner_id": "...",
+ "created_at": "2026-03-01T10:00:00Z",
+ "updated_at": "2026-03-07T14:30:00Z"
+}
+```
+
+### 3.2 Collection
+
+```json
+{
+ "items": [ ... ],
+ "meta": {
+ "total": 142,
+ "page": 1,
+ "per_page": 25,
+ "total_pages": 6
+ },
+ "links": {
+ "self": "/api/v1/risks?page=1&per_page=25",
+ "next": "/api/v1/risks?page=2&per_page=25",
+ "prev": null,
+ "first": "/api/v1/risks?page=1&per_page=25",
+ "last": "/api/v1/risks?page=6&per_page=25"
+ }
+}
+```
+
+### 3.3 Error Response
+
+```json
+{
+ "error": {
+ "code": "validation_error",
+ "message": "Request validation failed",
+ "details": [
+ {
+ "field": "inherent_likelihood",
+ "message": "Value must be between 1 and 5",
+ "code": "out_of_range"
+ }
+ ],
+ "request_id": "req_abc123"
+ }
+}
+```
+
+**HTTP Status Codes:**
+
+| Code | Usage |
+|---|---|
+| 200 | Success (GET, PUT, PATCH) |
+| 201 | Created (POST) |
+| 204 | No Content (DELETE) |
+| 400 | Bad Request (malformed input) |
+| 401 | Unauthorized (missing/invalid token) |
+| 403 | Forbidden (insufficient permissions) |
+| 404 | Not Found |
+| 409 | Conflict (duplicate, optimistic lock) |
+| 422 | Unprocessable Entity (validation errors) |
+| 429 | Too Many Requests (rate limited) |
+| 500 | Internal Server Error |
+
+### 3.4 PATCH Semantics
+
+PATCH requests use JSON Merge Patch ([RFC 7396](https://tools.ietf.org/html/rfc7396)). Send only the fields to update:
+
+```
+PATCH /api/v1/risks/{id}
+Content-Type: application/merge-patch+json
+
+{
+ "status": "mitigated",
+ "residual_likelihood": 2
+}
+```
+
+Fields not included in the request body are left unchanged. Set a field to `null` to clear it.
+
+---
+
+## 4. Core API Endpoints
+
+### 4.1 Risks
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/risks` | List risks (filterable, paginated) |
+| POST | `/api/v1/risks` | Create a risk |
+| GET | `/api/v1/risks/{id}` | Get a risk by ID |
+| PUT | `/api/v1/risks/{id}` | Replace a risk |
+| PATCH | `/api/v1/risks/{id}` | Partial update a risk |
+| POST | `/api/v1/risks/{id}/archive` | Archive a risk (soft-delete) |
+| DELETE | `/api/v1/risks/{id}` | Delete a risk (hard-delete, admin only) |
+| GET | `/api/v1/risks/{id}/treatments` | List treatment plans for a risk |
+| POST | `/api/v1/risks/{id}/treatments` | Create a treatment plan |
+| GET | `/api/v1/risks/{id}/controls` | List linked controls |
+| GET | `/api/v1/risks/{id}/artifacts` | List linked evidence |
+| GET | `/api/v1/risks/{id}/audit-log` | Get audit history for a risk |
+
+**Filter parameters:**
+```
+GET /api/v1/risks?category=access_management&status=open&inherent_score[gte]=15&owner_id=...&sort=-inherent_score&fields=id,title,inherent_score&include=owner,controls&page=1&per_page=25
+```
+
+### 4.2 Controls
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/controls` | List controls |
+| POST | `/api/v1/controls` | Create a control |
+| GET | `/api/v1/controls/{id}` | Get a control |
+| PUT | `/api/v1/controls/{id}` | Replace a control |
+| PATCH | `/api/v1/controls/{id}` | Partial update |
+| POST | `/api/v1/controls/{id}/retire` | Retire a control |
+| DELETE | `/api/v1/controls/{id}` | Delete a control (hard-delete, admin only) |
+| GET | `/api/v1/controls/{id}/mappings` | List framework mappings |
+| POST | `/api/v1/controls/{id}/mappings` | Add a framework mapping |
+| DELETE | `/api/v1/controls/{id}/mappings/{mapping_id}` | Remove mapping |
+| POST | `/api/v1/controls/{id}/suggest-mappings` | AI agent suggests mappings |
+| GET | `/api/v1/controls/{id}/test-history` | Historical test results |
+
+### 4.3 Frameworks
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/frameworks` | List frameworks |
+| GET | `/api/v1/frameworks/{id}` | Get framework details |
+| GET | `/api/v1/frameworks/{id}/requirements` | List requirements (hierarchical) |
+| GET | `/api/v1/frameworks/{id}/coverage` | Coverage matrix (mapped vs. unmapped) |
+
+### 4.4 Assessment Campaigns
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/assessments` | List campaigns |
+| POST | `/api/v1/assessments` | Create a campaign |
+| GET | `/api/v1/assessments/{id}` | Get campaign |
+| PATCH | `/api/v1/assessments/{id}` | Update campaign |
+| POST | `/api/v1/assessments/{id}/finalize` | Finalize (lock) campaign |
+| GET | `/api/v1/assessments/{id}/progress` | Campaign progress summary |
+| GET | `/api/v1/assessments/{id}/test-procedures` | List test procedures |
+| POST | `/api/v1/assessments/{id}/test-procedures` | Add test procedure |
+
+### 4.5 Test Procedures
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/test-procedures/{id}` | Get test procedure with steps |
+| PATCH | `/api/v1/test-procedures/{id}` | Update procedure |
+| POST | `/api/v1/test-procedures/{id}/results` | Submit results (human or agent) |
+| GET | `/api/v1/test-procedures/{id}/steps` | List test steps |
+| PATCH | `/api/v1/test-procedures/{id}/steps/{step_id}` | Update a step result |
+| POST | `/api/v1/test-procedures/{id}/submit-for-review` | Submit for review |
+| POST | `/api/v1/test-procedures/{id}/approve` | Approve workpaper |
+| POST | `/api/v1/test-procedures/{id}/reject` | Reject with comments |
+
+### 4.6 Artifacts (Evidence)
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/artifacts` | List artifacts |
+| POST | `/api/v1/artifacts` | Upload artifact (multipart) |
+| GET | `/api/v1/artifacts/{id}` | Get artifact metadata |
+| GET | `/api/v1/artifacts/{id}/download` | Download artifact (pre-signed URL redirect) |
+| GET | `/api/v1/artifacts/{id}/versions` | List versions |
+| POST | `/api/v1/artifacts/{id}/versions` | Upload new version |
+| GET | `/api/v1/artifacts/{id}/links` | List entity links |
+| POST | `/api/v1/artifacts/{id}/links` | Link to entity |
+| DELETE | `/api/v1/artifacts/{id}/links/{link_id}` | Unlink |
+| GET | `/api/v1/artifacts/{id}/lineage` | Full chain of custody |
+
+**Upload flow:**
+```
+1. POST /api/v1/artifacts/upload-url
+ → Returns pre-signed S3 URL for direct upload
+
+2. Client uploads file directly to S3
+
+3. POST /api/v1/artifacts
+ Body: { "storage_key": "...", "filename": "...", "size_bytes": ..., "sha256_hash": "..." }
+ → Creates artifact record, verifies hash
+```
+
+### 4.7 Evidence Requests
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/evidence-requests` | List requests |
+| POST | `/api/v1/evidence-requests` | Create a request |
+| PATCH | `/api/v1/evidence-requests/{id}` | Update request |
+| POST | `/api/v1/evidence-requests/{id}/submit` | Control owner submits evidence |
+| POST | `/api/v1/evidence-requests/{id}/accept` | Auditor accepts |
+| POST | `/api/v1/evidence-requests/{id}/reject` | Auditor rejects |
+
+### 4.8 Findings
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/findings` | List findings |
+| POST | `/api/v1/findings` | Create a finding |
+| GET | `/api/v1/findings/{id}` | Get finding |
+| PATCH | `/api/v1/findings/{id}` | Update finding |
+| GET | `/api/v1/findings/{id}/remediation` | Get remediation plan |
+| POST | `/api/v1/findings/{id}/remediation` | Create remediation plan |
+| POST | `/api/v1/findings/{id}/validate` | Validate remediation |
+| POST | `/api/v1/findings/{id}/close` | Close finding |
+
+### 4.9 Agents
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/agents` | List registered agents |
+| POST | `/api/v1/agents` | Register an agent |
+| GET | `/api/v1/agents/{id}` | Get agent details |
+| PATCH | `/api/v1/agents/{id}` | Update agent config |
+| DELETE | `/api/v1/agents/{id}` | Revoke agent |
+| GET | `/api/v1/agents/{id}/assignments` | Get pending assignments |
+| GET | `/api/v1/agents/{id}/history` | Agent action history |
+
+### 4.10 Common Control Library
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/ccl` | Browse CCL entries |
+| GET | `/api/v1/ccl/{id}` | Get CCL entry with mappings |
+| POST | `/api/v1/ccl/{id}/adopt` | Adopt CCL entry into org catalog |
+| GET | `/api/v1/ccl/search` | Search CCL by keyword or framework |
+
+### 4.11 Taxonomy
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/taxonomy` | List all taxonomy types |
+| GET | `/api/v1/taxonomy/{type}` | List values for a type |
+| POST | `/api/v1/taxonomy/{type}` | Add a value |
+| PATCH | `/api/v1/taxonomy/{type}/{id}` | Update a value |
+
+### 4.12 Reports
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/reports/templates` | List report templates |
+| POST | `/api/v1/reports/generate` | Generate a report |
+| GET | `/api/v1/reports/{id}` | Get report status/download |
+| GET | `/api/v1/reports/dashboards/risk-posture` | Risk posture dashboard data |
+| GET | `/api/v1/reports/dashboards/control-health` | Control health dashboard data |
+| GET | `/api/v1/reports/dashboards/assessment-progress` | Assessment progress data |
+
+### 4.13 Audit Logs
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/audit-logs` | Query audit logs (filterable) |
+| GET | `/api/v1/audit-logs/export` | Export logs (CSV/JSON) |
+
+### 4.14 Search
+
+| Method | Endpoint | Description |
+|---|---|---|
+| GET | `/api/v1/search?q=...&type=...` | Full-text search across entities |
+
+---
+
+## 5. GraphQL API
+
+Deferred. The REST API with `include`, `fields`, and filter parameters covers current query needs. GraphQL may be added in a future version for complex relational queries.
+
+---
+
+## 6. Webhook Events
+
+External systems can subscribe to events:
+
+```
+POST /api/v1/webhooks
+{
+ "url": "https://example.com/webhook",
+ "events": ["finding.opened", "assessment.completed", "risk.score_changed"],
+ "secret": "whsec_..."
+}
+```
+
+**Webhook payload:**
+```json
+{
+ "id": "evt_abc123",
+ "type": "finding.opened",
+ "timestamp": "2026-03-07T15:00:00Z",
+ "tenant_id": "...",
+ "data": {
+ "finding": {
+ "id": "...",
+ "ref_id": "FIND-042",
+ "title": "Excessive admin access in production",
+ "classification": "significant_deficiency",
+ "risk_rating": "high"
+ }
+ }
+}
+```
+
+**Security:** Webhooks are signed with HMAC-SHA256 using the shared secret. Header: `X-GC-Signature`.
+
+---
+
+## 7. Rate Limiting
+
+| Client Type | Default Limit |
+|---|---|
+| Authenticated user | 1000 req/min |
+| Agent | 500 req/min |
+| API key (service account) | 2000 req/min |
+
+Rate limit headers:
+```
+X-RateLimit-Limit: 1000
+X-RateLimit-Remaining: 950
+X-RateLimit-Reset: 1709820600
+```
+
+---
+
+## 8. Plugin Architecture
+
+### 8.1 Plugin Package Structure
+
+```
+my-plugin/
+├── plugin.yaml # Plugin manifest
+├── src/
+│ ├── main.py # Entry point (Python) or main.ts (TypeScript)
+│ ├── handlers.py # Event handlers
+│ ├── api.py # Custom API endpoints
+│ └── ui/ # Optional micro-frontend components
+├── schemas/
+│ ├── config.json # Configuration schema (JSON Schema)
+│ └── entities.json # Custom entity definitions (if any)
+├── frameworks/ # Framework definitions (for framework plugins)
+│ └── iso27001.yaml
+├── tests/
+│ └── test_main.py
+└── README.md
+```
+
+### 8.2 Plugin Manifest (`plugin.yaml`)
+
+```yaml
+name: aws-evidence-collector
+version: 1.2.0
+type: evidence_collector
+author: Ground Control Community
+description: Collects evidence from AWS Config, SecurityHub, and CloudTrail
+license: MIT
+
+requires:
+ ground_control: ">=0.4.0"
+
+permissions:
+ - artifacts:write
+ - controls:read
+ - assessments:read
+ - events:subscribe:assessment.started
+
+config_schema: schemas/config.json
+
+events:
+ subscribes:
+ - assessment.started
+ - evidence_collection.scheduled
+ publishes:
+ - evidence_collection.completed
+ - evidence_collection.failed
+
+api_routes:
+ - path: /plugins/aws-collector/status
+ method: GET
+ handler: api:get_status
+ - path: /plugins/aws-collector/collect-now
+ method: POST
+ handler: api:trigger_collection
+
+ui_components:
+ - name: AWSCollectorConfig
+ mount_point: plugin_settings
+ source: ui/AWSCollectorConfig.jsx
+
+signature: "ed25519:base64..."
+```
+
+### 8.3 Plugin SDK
+
+Plugins use the Ground Control Plugin SDK:
+
+```python
+from ground_control.plugin import Plugin, event_handler, api_route
+from ground_control.sdk import GroundControlClient
+
+class AWSCollectorPlugin(Plugin):
+ """Collects evidence from AWS services."""
+
+ def on_activate(self, config: dict):
+ """Called when plugin is enabled."""
+ self.aws_region = config["aws_region"]
+ self.gc = GroundControlClient(self.context)
+
+ @event_handler("assessment.started")
+ async def on_assessment_started(self, event):
+ """Auto-collect evidence when assessment starts."""
+ campaign = event.data["campaign"]
+ controls = await self.gc.controls.list(
+ campaign_id=campaign["id"],
+ tags=["aws"]
+ )
+ for control in controls:
+ await self.collect_for_control(control)
+
+ @event_handler("evidence_collection.scheduled")
+ async def on_scheduled(self, event):
+ """Handle scheduled collection runs."""
+ await self.collect_all()
+
+ @api_route("GET", "/plugins/aws-collector/status")
+ async def get_status(self, request):
+ """Custom API endpoint for collection status."""
+ return {"last_run": self.last_run, "status": "healthy"}
+
+ async def collect_for_control(self, control):
+ """Collect AWS Config snapshot for a control."""
+ # ... AWS API calls ...
+ artifact = await self.gc.artifacts.upload(
+ file_data=snapshot_data,
+ filename=f"aws-config-{control['ref_id']}.json",
+ tags=["aws", "config", "auto-collected"]
+ )
+ await self.gc.artifacts.link(
+ artifact_id=artifact["id"],
+ entity_type="control",
+ entity_id=control["id"],
+ context_note="Auto-collected AWS Config snapshot"
+ )
+ self.publish_event("evidence_collection.completed", {
+ "control_id": control["id"],
+ "artifact_id": artifact["id"]
+ })
+```
+
+### 8.4 Plugin Lifecycle
+
+```
+Install → Configure → Enable → Running → Disable → Uninstall
+ │
+ ├── Event: plugin receives domain events
+ ├── API: plugin custom endpoints are active
+ ├── UI: plugin components are mounted
+ └── Health: periodic health checks
+```
+
+### 8.5 Plugin Sandboxing
+
+| Mechanism | Description |
+|---|---|
+| **Process isolation** | Plugins run in separate processes (default for Python plugins) |
+| **Scoped SDK** | Plugin SDK only exposes permitted operations based on declared permissions |
+| **Resource limits** | CPU time, memory, and API call rate limits per plugin |
+| **Audit trail** | All plugin actions logged with plugin identity |
+
+### 8.6 Framework Plugin Example
+
+```yaml
+# frameworks/pci-dss-v4.yaml
+framework:
+ name: "PCI-DSS v4.0"
+ version: "4.0"
+ description: "Payment Card Industry Data Security Standard"
+
+requirements:
+ - ref_id: "1"
+ title: "Install and Maintain Network Security Controls"
+ children:
+ - ref_id: "1.1"
+ title: "Processes and mechanisms for installing and maintaining network security controls are defined and understood"
+ children:
+ - ref_id: "1.1.1"
+ title: "All security policies and operational procedures identified in Requirement 1 are documented, kept up to date, in use, and known to all affected parties"
+ - ref_id: "1.2"
+ title: "Network security controls (NSCs) are configured and maintained"
+ children:
+ - ref_id: "1.2.1"
+ title: "Configuration standards for NSC rulesets are defined, implemented, maintained"
+ # ... continues for all 12 requirements and sub-requirements
+
+ccl_mappings:
+ - requirement: "7.1"
+ ccl_entries: ["CC-AM-001", "CC-AM-002"]
+ - requirement: "7.2"
+ ccl_entries: ["CC-AM-001", "CC-AM-003"]
+```
+
+---
+
+## 9. Agent SDK
+
+### 9.1 Python Agent SDK
+
+```python
+from ground_control.agent import AgentClient
+
+async def main():
+ # Authenticate
+ client = AgentClient(
+ base_url="https://gc.example.com",
+ client_id="agent-sox-tester",
+ client_secret="..."
+ )
+
+ # Get assignments
+ assignments = await client.get_assignments(
+ status="pending",
+ campaign_type="sox_itgc"
+ )
+
+ for assignment in assignments:
+ procedure = await client.get_test_procedure(assignment.procedure_id)
+
+ # Perform testing logic
+ results = await perform_test(procedure)
+
+ # Submit results
+ await client.submit_results(
+ procedure_id=procedure.id,
+ steps=[
+ {
+ "step_number": 1,
+ "actual_result": "Verified access review completed on 2026-02-15",
+ "conclusion": "pass",
+ "evidence_ids": ["artifact-uuid-1"]
+ },
+ {
+ "step_number": 2,
+ "actual_result": "Found 3 accounts without recent review",
+ "conclusion": "fail",
+ "evidence_ids": ["artifact-uuid-2"]
+ }
+ ],
+ conclusion="ineffective",
+ confidence=0.85,
+ notes="3 accounts identified without access review in 90+ days"
+ )
+```
+
+### 9.2 TypeScript Agent SDK
+
+```typescript
+import { AgentClient } from '@ground-control/agent-sdk';
+
+const client = new AgentClient({
+ baseUrl: 'https://gc.example.com',
+ clientId: 'agent-sox-tester',
+ clientSecret: '...'
+});
+
+const assignments = await client.getAssignments({ status: 'pending' });
+
+for (const assignment of assignments) {
+ const procedure = await client.getTestProcedure(assignment.procedureId);
+ const results = await performTest(procedure);
+ await client.submitResults(procedure.id, results);
+}
+```
+
+---
+
+## 10. API Versioning & Deprecation
+
+| Policy | Detail |
+|---|---|
+| Version format | URL path: `/api/v1/`, `/api/v2/` |
+| Deprecation notice | 6 months before removal; `Sunset` header on deprecated endpoints |
+| Breaking changes | New major version only; non-breaking changes within version |
+| Backward compatibility | New optional fields, new endpoints, new enum values are non-breaking |
diff --git a/docs/architecture/ARCHITECTURE.md b/docs/architecture/ARCHITECTURE.md
new file mode 100644
index 00000000..d87a5ca1
--- /dev/null
+++ b/docs/architecture/ARCHITECTURE.md
@@ -0,0 +1,445 @@
+# Ground Control — System Architecture
+
+**Version:** 1.0.0
+**Date:** 2026-03-07
+
+---
+
+## 1. Architecture Principles
+
+| Principle | Rationale |
+|---|---|
+| **API-First** | Every capability exposed via versioned API before building UI |
+| **Plugin-Extensible** | Core is lean; frameworks, integrations, and workflows are plugins |
+| **Agent-Ready** | AI agents are first-class consumers of every API surface |
+| **Multi-Tenant** | Logical or physical tenant isolation; configurable per deployment |
+| **Self-Hostable** | Runs on a single machine (Docker Compose) or scales to Kubernetes |
+| **Event-Driven** | State changes publish domain events for async processing |
+| **Secure by Default** | Encryption at rest and in transit; RBAC + ABAC; immutable audit log |
+
+---
+
+## 2. High-Level System Architecture
+
+```
+┌─────────────────────────────────────────────────────────────────────┐
+│ CLIENTS │
+│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────────────┐ │
+│ │ Web UI │ │ Agent SDK│ │ CLI │ │ External Systems │ │
+│ │ (SPA) │ │ (Py/TS) │ │ │ │ (Webhooks/API) │ │
+│ └─────┬─────┘ └─────┬────┘ └────┬─────┘ └────────┬─────────┘ │
+│ │ │ │ │ │
+└────────┼──────────────┼─────────────┼──────────────────┼─────────────┘
+ │ │ │ │
+ ▼ ▼ ▼ ▼
+┌─────────────────────────────────────────────────────────────────────┐
+│ API GATEWAY / LOAD BALANCER │
+│ ┌─────────────────────────────────────────────────────────────┐ │
+│ │ Rate Limiting │ Auth (JWT) │ CORS │ Request Routing │ │
+│ └─────────────────────────────────────────────────────────────┘ │
+└────────────────────────────┬────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────┐
+│ APPLICATION LAYER │
+│ │
+│ ┌──────────────────┐ ┌──────────────────┐ │
+│ │ REST API Server │ │ Webhook Ingress │ │
+│ │ (OpenAPI 3.1) │ │ │ │
+│ └────────┬─────────┘ └────────┬─────────┘ │
+│ │ │ │
+│ ▼ ▼ │
+│ ┌─────────────────────────────────────────────────────────────┐ │
+│ │ DOMAIN SERVICE LAYER │ │
+│ │ │ │
+│ │ ┌─────────┐ ┌─────────┐ ┌──────────┐ ┌─────────────────┐ │ │
+│ │ │ Risk │ │ Control │ │Assessment│ │ Evidence │ │ │
+│ │ │ Service │ │ Service │ │ Service │ │ Service │ │ │
+│ │ └─────────┘ └─────────┘ └──────────┘ └─────────────────┘ │ │
+│ │ ┌─────────┐ ┌─────────┐ ┌──────────┐ ┌─────────────────┐ │ │
+│ │ │ Finding │ │ Report │ │ Workflow │ │ Agent │ │ │
+│ │ │ Service │ │ Service │ │ Service │ │ Service │ │ │
+│ │ └─────────┘ └─────────┘ └──────────┘ └─────────────────┘ │ │
+│ │ ┌─────────┐ ┌─────────┐ ┌──────────┐ │ │
+│ │ │ Auth │ │ Tenant │ │ Plugin │ │ │
+│ │ │ Service │ │ Service │ │ Service │ │ │
+│ │ └─────────┘ └─────────┘ └──────────┘ │ │
+│ └─────────────────────────────────────────────────────────────┘ │
+│ │ │
+│ ▼ │
+│ ┌─────────────────────────────────────────────────────────────┐ │
+│ │ EVENT BUS (Internal) │ │
+│ │ Domain events: risk.created, control.updated, test.completed│ │
+│ └─────────────────────────────────────────────────────────────┘ │
+│ │ │
+│ ▼ │
+│ ┌─────────────────────────────────────────────────────────────┐ │
+│ │ PLUGIN RUNTIME │ │
+│ │ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ │
+│ │ │ Framework│ │Integration│ │ Evidence │ │ Custom │ │ │
+│ │ │ Plugins │ │ Plugins │ │ Collectors│ │ Workflow │ │ │
+│ │ └──────────┘ └──────────┘ └──────────┘ └──────────┘ │ │
+│ └─────────────────────────────────────────────────────────────┘ │
+└────────────────────────────┬────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────┐
+│ DATA & STORAGE LAYER │
+│ │
+│ ┌──────────────┐ ┌──────────────┐ ┌──────────────────────────┐ │
+│ │ PostgreSQL │ │ Object Store │ │ Search Index │ │
+│ │ (Primary DB) │ │ (S3/MinIO) │ │ (Search (PG tsvector)) │ │
+│ └──────────────┘ └──────────────┘ └──────────────────────────┘ │
+│ ┌──────────────┐ ┌──────────────┐ │
+│ │ Redis/Valkey │ │ Audit Log │ │
+│ │ (Cache/Queue)│ │ (Append-Only)│ │
+│ └──────────────┘ └──────────────┘ │
+└─────────────────────────────────────────────────────────────────────┘
+```
+
+---
+
+## 3. Component Descriptions
+
+### 3.1 API Gateway
+
+The API gateway is the single entry point for all clients. Responsibilities:
+- **Authentication** — Validates JWT access tokens (issued by Auth Service)
+- **Rate Limiting** — Per-tenant and per-client throttling
+- **CORS** — Configurable origin policies
+- **Request Routing** — Routes to REST or Webhook handlers
+- **TLS Termination** — TLS 1.3 for all external connections
+
+Technology: Nginx/Envoy/Caddy (configurable) or cloud-native (ALB/Cloud Run).
+
+### 3.2 REST API Server
+
+OpenAPI 3.1 specification. Versioned at `/api/v1/`. Resources:
+
+| Resource | Endpoint Pattern |
+|---|---|
+| Risks | `/api/v1/risks` |
+| Controls | `/api/v1/controls` |
+| Frameworks | `/api/v1/frameworks` |
+| Assessments | `/api/v1/assessments` |
+| Test Procedures | `/api/v1/test-procedures` |
+| Evidence/Artifacts | `/api/v1/artifacts` |
+| Findings | `/api/v1/findings` |
+| Users | `/api/v1/users` |
+| Agents | `/api/v1/agents` |
+| Plugins | `/api/v1/plugins` |
+| Audit Logs | `/api/v1/audit-logs` |
+| Reports | `/api/v1/reports` |
+| Taxonomy | `/api/v1/taxonomy` |
+
+All endpoints support: pagination, filtering, sorting, field selection, and `include` for related entities.
+
+### 3.3 Domain Services
+
+Each service encapsulates a bounded context:
+
+| Service | Responsibility |
+|---|---|
+| **Risk Service** | CRUD risks, scoring, treatment plans, heat maps, risk campaigns |
+| **Control Service** | CRUD controls, CCL, framework mappings, control catalog |
+| **Assessment Service** | Campaigns, test procedures, workpapers, sampling |
+| **Evidence Service** | Artifact storage, linking, evidence requests, lineage, retention |
+| **Finding Service** | Finding lifecycle, remediation tracking, deficiency classification |
+| **Report Service** | Report generation, templates, scheduling, export |
+| **Workflow Service** | Review chains, approval logic, state machine, notifications |
+| **Agent Service** | Agent registration, assignments, result intake, provenance |
+| **Auth Service** | Authentication (local, SAML, OIDC), authorization (RBAC+ABAC), tokens |
+| **Tenant Service** | Tenant lifecycle, isolation, configuration, resource limits |
+| **Plugin Service** | Plugin lifecycle, sandboxing, configuration, hook registration |
+
+### 3.4 Event Bus
+
+Internal event bus for decoupling domain services. Supports:
+
+- **Synchronous handlers** (in-process, for simple reactions)
+- **Async queue** (Redis/Valkey streams or PostgreSQL LISTEN/NOTIFY for background jobs)
+
+**Core domain events:**
+
+| Event | Published By | Consumed By |
+|---|---|---|
+| `risk.created` | Risk Service | Workflow, Notification |
+| `risk.score_changed` | Risk Service | Report, Dashboard cache |
+| `control.updated` | Control Service | Assessment, Search Index |
+| `assessment.completed` | Assessment Service | Report, Finding |
+| `test_procedure.result_submitted` | Assessment Service | Workflow, Agent Service |
+| `artifact.uploaded` | Evidence Service | Search Index, Lineage |
+| `finding.opened` | Finding Service | Workflow, Notification |
+| `finding.closed` | Finding Service | Report, Risk Service |
+| `agent.result_submitted` | Agent Service | Workflow (route to review) |
+| `plugin.installed` | Plugin Service | Tenant Service |
+
+### 3.5 Plugin Runtime
+
+Plugins extend Ground Control without modifying core code.
+
+```
+┌────────────────────────────────────────────────┐
+│ Plugin Runtime │
+│ │
+│ ┌──────────────────┐ ┌───────────────────┐ │
+│ │ Plugin Sandbox │ │ Plugin Registry │ │
+│ │ (Process Isolation)│ │ (Catalog, Versions│ │
+│ │ │ │ Signatures) │ │
+│ └──────────────────┘ └───────────────────┘ │
+│ │
+│ Plugin API Surface: │
+│ ├── Hook into domain events │
+│ ├── Register new API endpoints │
+│ ├── Add UI components (micro-frontend) │
+│ ├── Define new entity types │
+│ ├── Provide framework definitions │
+│ └── Access scoped data via Plugin SDK │
+│ │
+│ Security: │
+│ ├── Declared permission scopes │
+│ └── Audit logging of plugin actions │
+└────────────────────────────────────────────────┘
+```
+
+**Plugin types:**
+
+| Type | Purpose | Example |
+|---|---|---|
+| **Framework Plugin** | Adds a compliance framework definition | ISO 27001, PCI-DSS, HIPAA |
+| **Integration Plugin** | Connects to external systems | Jira, ServiceNow, Slack |
+| **Evidence Collector** | Automates evidence gathering | AWS Config, Azure Policy |
+| **Workflow Plugin** | Custom approval/review workflows | SOX sign-off chain |
+| **Report Plugin** | Custom report templates or formats | Board report, regulator format |
+| **Agent Plugin** | Agent capabilities (scoring models, analyzers) | FAIR quantitative scoring |
+
+### 3.6 Data & Storage Layer
+
+| Store | Technology | Purpose |
+|---|---|---|
+| **Primary Database** | PostgreSQL 16+ | All structured data, JSONB for flexible attributes |
+| **Object Store** | S3-compatible (S3, MinIO, GCS) | Evidence artifacts, report exports, attachments |
+| **Cache / Queue** | Redis or Valkey | Session cache, rate limit counters, background job queue |
+| **Search Index** | PostgreSQL tsvector (built-in); optional Meilisearch for scale | Full-text search across risks, controls, evidence, findings |
+| **Audit Log** | PostgreSQL (append-only table) or external (immutable ledger) | Compliance audit trail |
+
+---
+
+## 4. Authentication & Authorization Architecture
+
+```
+┌───────────────────────────────────────────────────────────────┐
+│ Authentication Flow │
+│ │
+│ Browser ──┬── SAML 2.0 ──► Corporate IdP (Okta, Azure AD) │
+│ ├── OIDC ──────► Corporate IdP / Social │
+│ └── Local ─────► Username/Password + MFA │
+│ │
+│ Agent ───┬── OAuth2 Client Credentials ──► Token Endpoint │
+│ └── API Key ────────────────────► API Gateway │
+│ │
+│ All paths ──► JWT Access Token ──► API Gateway validates │
+└───────────────────────────────────────────────────────────────┘
+
+┌───────────────────────────────────────────────────────────────┐
+│ Authorization Model │
+│ │
+│ RBAC (Role-Based Access Control): │
+│ ├── Roles: Admin, Risk Manager, Auditor, Control Owner, │
+│ │ Compliance Analyst, Viewer, Agent │
+│ └── Each role has a set of permissions (resource:action) │
+│ │
+│ ABAC (Attribute-Based Access Control): │
+│ ├── Tenant isolation (tenant_id on every resource) │
+│ ├── Business unit scoping (user sees only their BU data) │
+│ ├── Assessment scoping (auditor sees only assigned work) │
+│ └── Data classification (restrict PII/sensitive artifacts) │
+│ │
+│ Permission format: resource:action:scope │
+│ Examples: │
+│ ├── risks:read:* (read all risks) │
+│ ├── risks:write:bu=engineering (write risks in Engineering) │
+│ ├── assessments:approve:campaign=Q1-2026 (approve in Q1) │
+│ └── agents:execute:scope=testing (agent can run tests) │
+└───────────────────────────────────────────────────────────────┘
+```
+
+---
+
+## 5. Deployment Architecture
+
+### 5.1 Single-Machine (Docker Compose)
+
+For small teams or evaluation:
+
+```
+┌────────────────────────────────────────────┐
+│ Docker Compose Host │
+│ │
+│ ┌──────────┐ ┌──────────┐ ┌──────────┐ │
+│ │ GC App │ │PostgreSQL│ │ MinIO │ │
+│ │ (API+UI) │ │ │ │ │ │
+│ └──────────┘ └──────────┘ └──────────┘ │
+│ ┌──────────┐ │
+│ │ Redis │ │
+│ └──────────┘ │
+│ │
+│ Reverse Proxy: Caddy (auto TLS) │
+└────────────────────────────────────────────┘
+```
+
+### 5.2 Kubernetes (Helm Chart)
+
+For production and multi-tenant:
+
+```
+┌─────────────────────────────────────────────────────────────────┐
+│ Kubernetes Cluster │
+│ │
+│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
+│ │ Ingress │ │ GC API │ │ GC Worker │ │
+│ │ Controller │────▶│ Deployment │ │ Deployment │ │
+│ │ (nginx/ │ │ (N replicas)│ │ (background│ │
+│ │ traefik) │ └─────────────┘ │ jobs) │ │
+│ └─────────────┘ │ └─────────────┘ │
+│ │ │ │
+│ ▼ ▼ │
+│ ┌─────────────┐ ┌─────────────┐ │
+│ │ PostgreSQL │ │ Redis │ │
+│ │ (Operator │ │ (Sentinel) │ │
+│ │ or RDS) │ └─────────────┘ │
+│ └─────────────┘ │
+│ ┌─────────────┐ ┌─────────────┐ │
+│ │ MinIO / │ │ (optional: │ │
+│ │ S3 │ │ Meilisearch)│ │
+│ └─────────────┘ └─────────────┘ │
+│ │
+│ ┌─────────────────────────────────────────────────────────┐ │
+│ │ Shared: ConfigMaps, Secrets, PVCs, NetworkPolicies │ │
+│ └─────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────┘
+```
+
+### 5.3 Cloud-Managed
+
+For organizations preferring managed services:
+
+| Component | AWS | Azure | GCP |
+|---|---|---|---|
+| Compute | ECS Fargate / EKS | AKS / Container Apps | Cloud Run / GKE |
+| Database | RDS PostgreSQL | Azure DB for PostgreSQL | Cloud SQL |
+| Object Storage | S3 | Blob Storage | GCS |
+| Cache | ElastiCache Redis | Azure Cache for Redis | Memorystore |
+| Search | OpenSearch | Cognitive Search | (Meilisearch on GKE) |
+| Load Balancer | ALB | Application Gateway | Cloud Load Balancing |
+| Identity | Cognito / SAML | Azure AD / SAML | Identity Platform |
+
+---
+
+## 6. Security Architecture
+
+### 6.1 Defense in Depth
+
+```
+Layer 1 — Network: TLS 1.3, network segmentation, WAF
+Layer 2 — Gateway: Rate limiting, JWT validation, IP allowlisting
+Layer 3 — Application: RBAC + ABAC, input validation, CSRF protection
+Layer 4 — Data: Encryption at rest (AES-256), field-level encryption for sensitive data
+Layer 5 — Audit: Immutable audit log, tamper detection, log forwarding to SIEM
+Layer 6 — Supply: Signed plugins, dependency scanning, SBOM generation
+```
+
+### 6.2 Data Protection
+
+| Data Category | Protection |
+|---|---|
+| Credentials (passwords, API keys) | Argon2id hashing / encrypted secrets store |
+| Evidence artifacts | AES-256 at rest; optional client-side encryption |
+| PII in assessments | Field-level encryption; access restricted by ABAC policy |
+| Audit logs | Append-only table; hash chaining for tamper detection |
+| Database backups | Encrypted with customer-managed key (BYOK supported) |
+
+### 6.3 Audit Log Architecture
+
+```
+┌─────────────────────────────────────────────────┐
+│ Every State Change │
+│ │
+│ { │
+│ "id": "uuid", │
+│ "timestamp": "2026-03-07T10:15:30Z", │
+│ "tenant_id": "uuid", │
+│ "actor_id": "uuid", │
+│ "actor_type": "user | agent | system", │
+│ "action": "create | update | delete | ...", │
+│ "resource_type": "risk | control | ...", │
+│ "resource_id": "uuid", │
+│ "changes": { │
+│ "field": { "old": "...", "new": "..." } │
+│ }, │
+│ "ip_address": "198.51.100.42", │
+│ "user_agent": "...", │
+│ "previous_hash": "sha256:...", │
+│ "hash": "sha256:..." │
+│ } │
+│ │
+│ → Append-only PostgreSQL table │
+│ → Optional: forward to Splunk/Elastic via │
+│ syslog or webhook │
+└─────────────────────────────────────────────────┘
+```
+
+---
+
+## 7. Technology Stack Summary
+
+| Layer | Technology | Rationale |
+|---|---|---|
+| **Language** | Python 3.12+ (API), TypeScript (UI) | Strong ecosystem, AI/ML libraries, broad talent pool |
+| **API Framework** | FastAPI | Async, OpenAPI auto-generation, Pydantic validation |
+| **Web UI** | React + TypeScript | Component ecosystem, SSR capable, agent dashboard |
+| **UI Framework** | Shadcn/ui + Tailwind CSS | Accessible, customizable, modern |
+| **Database** | PostgreSQL 16+ | JSONB, full-text search, row-level security, proven |
+| **ORM** | SQLAlchemy 2.0 + Alembic | Async support, migrations, mature |
+| **Object Storage** | S3-compatible (MinIO for self-host) | Universal API, cost-effective, scalable |
+| **Cache/Queue** | Redis or Valkey | Fast, versatile, stream support for job queues |
+| **Search** | PostgreSQL tsvector (built-in); Meilisearch optional | Built-in full-text search; external index available for scale |
+| **Background Jobs** | ARQ (Redis-backed) or Celery | Reliable async task execution |
+| **Containerization** | Docker + Docker Compose | Universal deployment format |
+| **Orchestration** | Kubernetes (Helm chart) | Production scaling, managed K8s on all clouds |
+| **CI/CD** | GitHub Actions | Widely adopted, free for open source |
+| **MCP Tooling** | rocq-mcp (Coq proofs), AWS MCP (infrastructure) | AI-assisted development: proof checking, cloud operations |
+| **Testing** | pytest + Playwright | Unit/integration + E2E |
+
+---
+
+## 8. Scalability Considerations
+
+### 8.1 Horizontal Scaling
+
+- **API servers** — Stateless; scale horizontally behind load balancer
+- **Worker processes** — Scale independently based on job queue depth
+- **PostgreSQL** — Read replicas for reporting; connection pooling (PgBouncer)
+- **Object storage** — Inherently scalable (S3/MinIO)
+- **Search** — PostgreSQL tsvector built-in; optional Meilisearch for scale
+
+### 8.2 Multi-Tenancy Models
+
+| Model | Isolation | Complexity | Use Case |
+|---|---|---|---|
+| **Shared schema** (tenant_id column) | Logical | Low | SaaS, small tenants |
+
+Additional isolation models (schema-per-tenant, database-per-tenant) can be added when needed.
+
+### 8.3 Performance Targets
+
+Targets align with PRD Section 7 (Non-Functional Requirements). Internal stretch goals are noted where tighter than the PRD requirement.
+
+| Operation | PRD Requirement | Stretch Target |
+|---|---|---|
+| API CRUD (single entity) | p95 < 200ms | p95 < 100ms |
+| API list with filters | p95 < 200ms | — |
+| Report generation (standard) | p95 < 2s | — |
+| Full-text search | — | p95 < 50ms |
+| File upload (100MB) | — | p95 < 10s |
+| Agent result submission | — | p95 < 150ms |
diff --git a/docs/architecture/DATA_MODEL.md b/docs/architecture/DATA_MODEL.md
new file mode 100644
index 00000000..d1944715
--- /dev/null
+++ b/docs/architecture/DATA_MODEL.md
@@ -0,0 +1,802 @@
+# Ground Control — Data Model & Storage Design
+
+**Version:** 1.0.0
+**Date:** 2026-03-07
+
+---
+
+## 1. Entity-Relationship Overview
+
+```
+┌──────────────┐ ┌──────────────┐ ┌──────────────────┐
+│ Tenant │1─────*│ User │*─────*│ Role │
+└──────────────┘ └──────────────┘ └──────────────────┘
+ │1 │* │
+ │ │ │
+ │ ┌───────────┼───────────┐ │
+ │ │ │ │ │
+ ▼* ▼* ▼* ▼* │
+┌──────────┐ ┌──────────┐ ┌──────────┐ ┌─────────┐ │
+│ Risk │ │ Control │ │Assessment│ │ Agent │ │
+│ │ │ │ │ Campaign │ │ │ │
+└──────────┘ └──────────┘ └──────────┘ └─────────┘ │
+ │* │* │1 │
+ │ │ │ │
+ │ ┌──────┘ ▼* │
+ │ │ ┌──────────────────┐ │
+ │ │ │ Test Procedure │ │
+ │ │ └──────────────────┘ │
+ │ │ │* │
+ │ │ │ │
+ ▼* ▼* ▼* │
+┌─────────────────────────────────────────────┐ │
+│ Artifact (Evidence) │ │
+└─────────────────────────────────────────────┘ │
+ │* │* │
+ │ │ │
+ ▼* ▼* │
+┌──────────┐ ┌──────────────┐ │
+│ Finding │ │ Audit Log │ │
+│ │ │ Entry │ │
+└──────────┘ └──────────────┘ │
+ │* │
+ ▼* │
+┌──────────────────┐ │
+│Remediation Plan │ │
+└──────────────────┘ │
+
+Cross-cutting:
+┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐
+│ Framework │ │ Framework │ │ Common Control │
+│ │*─│ Requirement │*─│ Library Entry │
+└──────────────────┘ └──────────────────┘ └──────────────────┘
+```
+
+---
+
+## 2. Core Entities
+
+### 2.1 Tenant
+
+```sql
+CREATE TABLE tenants (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ name TEXT NOT NULL,
+ slug TEXT NOT NULL UNIQUE,
+ settings JSONB NOT NULL DEFAULT '{}',
+ status TEXT NOT NULL DEFAULT 'active', -- active, suspended, archived
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
+);
+```
+
+### 2.2 User
+
+```sql
+CREATE TABLE users (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL REFERENCES tenants(id),
+ email TEXT NOT NULL,
+ display_name TEXT NOT NULL,
+ auth_provider TEXT NOT NULL DEFAULT 'local', -- local, saml, oidc
+ external_id TEXT, -- IdP subject identifier
+ status TEXT NOT NULL DEFAULT 'active', -- active, inactive, suspended
+ mfa_enabled BOOLEAN NOT NULL DEFAULT false,
+ settings JSONB NOT NULL DEFAULT '{}',
+ last_login_at TIMESTAMPTZ,
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+
+ UNIQUE (tenant_id, email)
+);
+```
+
+### 2.3 Role & Permission
+
+```sql
+CREATE TABLE roles (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL REFERENCES tenants(id),
+ name TEXT NOT NULL,
+ description TEXT,
+ is_system BOOLEAN NOT NULL DEFAULT false, -- built-in roles can't be deleted
+ permissions JSONB NOT NULL DEFAULT '[]', -- ["risks:read:*", "risks:write:bu=eng"]
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+
+ UNIQUE (tenant_id, name)
+);
+
+CREATE TABLE user_roles (
+ user_id UUID NOT NULL REFERENCES users(id),
+ role_id UUID NOT NULL REFERENCES roles(id),
+ scope JSONB DEFAULT '{}', -- optional: {"business_unit": "engineering"}
+ granted_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ granted_by UUID REFERENCES users(id),
+
+ PRIMARY KEY (user_id, role_id)
+);
+```
+
+### 2.4 Risk
+
+```sql
+CREATE TABLE risks (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL REFERENCES tenants(id),
+ ref_id TEXT NOT NULL, -- human-readable: RISK-001
+ title TEXT NOT NULL,
+ description TEXT,
+ category TEXT NOT NULL, -- from taxonomy
+ owner_id UUID REFERENCES users(id),
+ status TEXT NOT NULL DEFAULT 'open', -- open, mitigated, accepted, closed, archived
+
+ -- Inherent risk scores
+ inherent_likelihood INTEGER NOT NULL,
+ inherent_impact INTEGER NOT NULL,
+ inherent_score NUMERIC GENERATED ALWAYS AS (inherent_likelihood * inherent_impact) STORED,
+
+ -- Residual risk scores
+ residual_likelihood INTEGER,
+ residual_impact INTEGER,
+ residual_score NUMERIC GENERATED ALWAYS AS (residual_likelihood * residual_impact) STORED,
+
+ -- Risk appetite
+ appetite_threshold NUMERIC,
+
+ -- Metadata
+ business_units TEXT[] NOT NULL DEFAULT '{}',
+ tags TEXT[] NOT NULL DEFAULT '{}',
+ custom_fields JSONB NOT NULL DEFAULT '{}',
+
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ archived_at TIMESTAMPTZ,
+
+ UNIQUE (tenant_id, ref_id)
+);
+
+CREATE INDEX idx_risks_tenant_category ON risks(tenant_id, category);
+CREATE INDEX idx_risks_tenant_owner ON risks(tenant_id, owner_id);
+CREATE INDEX idx_risks_tenant_status ON risks(tenant_id, status);
+```
+
+### 2.5 Control
+
+```sql
+CREATE TABLE controls (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL REFERENCES tenants(id),
+ ref_id TEXT NOT NULL, -- CTRL-AM-001
+ ccl_entry_id UUID, -- link to Common Control Library
+ title TEXT NOT NULL,
+ objective TEXT,
+ description TEXT,
+ control_type TEXT NOT NULL, -- preventive, detective, corrective
+ control_nature TEXT NOT NULL, -- manual, automated, it_dependent_manual
+ frequency TEXT NOT NULL, -- continuous, daily, weekly, monthly, quarterly, annual, ad_hoc
+ owner_id UUID REFERENCES users(id),
+ status TEXT NOT NULL DEFAULT 'active', -- active, retired, draft
+
+ -- Effectiveness
+ effectiveness_rating TEXT, -- effective, needs_improvement, ineffective
+ last_tested_at TIMESTAMPTZ,
+
+ -- Metadata
+ business_units TEXT[] NOT NULL DEFAULT '{}',
+ systems TEXT[] NOT NULL DEFAULT '{}',
+ tags TEXT[] NOT NULL DEFAULT '{}',
+ custom_fields JSONB NOT NULL DEFAULT '{}',
+
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+
+ UNIQUE (tenant_id, ref_id)
+);
+
+CREATE INDEX idx_controls_tenant_type ON controls(tenant_id, control_type);
+CREATE INDEX idx_controls_tenant_owner ON controls(tenant_id, owner_id);
+```
+
+### 2.5a Risk-Control Mapping
+
+```sql
+-- Many-to-many: Risks ↔ Controls
+CREATE TABLE risk_control_mappings (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ risk_id UUID NOT NULL REFERENCES risks(id),
+ control_id UUID NOT NULL REFERENCES controls(id),
+ mapping_note TEXT,
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+
+ UNIQUE (risk_id, control_id)
+);
+```
+
+### 2.6 Framework & Requirements
+
+```sql
+CREATE TABLE frameworks (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID REFERENCES tenants(id), -- NULL = global/system framework
+ name TEXT NOT NULL, -- "SOX ITGC", "ISO 27001:2022"
+ version TEXT NOT NULL,
+ description TEXT,
+ source_plugin TEXT, -- plugin that provided this framework
+ status TEXT NOT NULL DEFAULT 'active',
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
+);
+
+CREATE TABLE framework_requirements (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ framework_id UUID NOT NULL REFERENCES frameworks(id),
+ ref_id TEXT NOT NULL, -- "CC6.1", "A.8.3", "AC-2"
+ title TEXT NOT NULL,
+ description TEXT,
+ parent_id UUID REFERENCES framework_requirements(id), -- hierarchy
+ sort_order INTEGER NOT NULL DEFAULT 0,
+ metadata JSONB NOT NULL DEFAULT '{}',
+
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+
+ UNIQUE (framework_id, ref_id)
+);
+
+-- Many-to-many: Controls ↔ Framework Requirements
+CREATE TABLE control_framework_mappings (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ control_id UUID NOT NULL REFERENCES controls(id),
+ requirement_id UUID NOT NULL REFERENCES framework_requirements(id),
+ mapping_type TEXT NOT NULL DEFAULT 'satisfies', -- satisfies, partially_satisfies, related
+ notes TEXT,
+ suggested_by_agent UUID, -- if AI-suggested, agent_id
+ confidence NUMERIC, -- agent confidence score
+ approved_by UUID REFERENCES users(id),
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+
+ UNIQUE (control_id, requirement_id)
+);
+```
+
+### 2.7 Common Control Library (CCL)
+
+```sql
+CREATE TABLE ccl_entries (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ ref_id TEXT NOT NULL UNIQUE, -- "CC-AM-001"
+ title TEXT NOT NULL,
+ description TEXT NOT NULL,
+ category TEXT NOT NULL,
+ control_type TEXT NOT NULL,
+ control_nature TEXT NOT NULL,
+ version INTEGER NOT NULL DEFAULT 1,
+ status TEXT NOT NULL DEFAULT 'active',
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
+);
+
+-- CCL entry ↔ Framework Requirements (reference mappings)
+CREATE TABLE ccl_framework_mappings (
+ ccl_entry_id UUID NOT NULL REFERENCES ccl_entries(id),
+ requirement_id UUID NOT NULL REFERENCES framework_requirements(id),
+
+ PRIMARY KEY (ccl_entry_id, requirement_id)
+);
+```
+
+### 2.8 Assessment Campaign
+
+```sql
+CREATE TABLE assessment_campaigns (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL REFERENCES tenants(id),
+ name TEXT NOT NULL,
+ campaign_type TEXT NOT NULL, -- sox_itgc, soc2, iso27001, custom
+ status TEXT NOT NULL DEFAULT 'planning', -- planning, active, review, finalized, archived
+ period_start DATE NOT NULL,
+ period_end DATE NOT NULL,
+ fieldwork_start DATE,
+ fieldwork_end DATE,
+ scope_filter JSONB NOT NULL DEFAULT '{}', -- {"business_units": [...], "control_types": [...]}
+
+ created_by UUID NOT NULL REFERENCES users(id),
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ finalized_at TIMESTAMPTZ
+);
+```
+
+### 2.9 Test Procedure & Steps
+
+```sql
+CREATE TABLE test_procedures (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ campaign_id UUID NOT NULL REFERENCES assessment_campaigns(id),
+ control_id UUID NOT NULL REFERENCES controls(id),
+ template_id UUID, -- from template library
+ title TEXT NOT NULL,
+ description TEXT,
+ status TEXT NOT NULL DEFAULT 'not_started',
+ -- not_started, in_progress, completed, review, approved
+ assigned_to UUID REFERENCES users(id),
+ assigned_agent_id UUID, -- if assigned to an agent
+ reviewer_id UUID REFERENCES users(id),
+
+ -- Results
+ conclusion TEXT, -- effective, ineffective, not_tested
+ agent_produced BOOLEAN NOT NULL DEFAULT false,
+ agent_confidence NUMERIC,
+ agent_provenance JSONB,
+
+ -- Sampling
+ population_size INTEGER,
+ sample_size INTEGER,
+ sampling_method TEXT, -- statistical, judgmental, haphazard
+
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ completed_at TIMESTAMPTZ,
+ approved_at TIMESTAMPTZ
+);
+
+CREATE TABLE test_steps (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ procedure_id UUID NOT NULL REFERENCES test_procedures(id),
+ step_number INTEGER NOT NULL,
+ instruction TEXT NOT NULL,
+ expected_result TEXT,
+
+ -- Tester fills in:
+ actual_result TEXT,
+ conclusion TEXT, -- pass, fail, na
+ notes TEXT,
+
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+
+ UNIQUE (procedure_id, step_number)
+);
+```
+
+### 2.10 Artifact (Evidence)
+
+```sql
+CREATE TABLE artifacts (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL REFERENCES tenants(id),
+ filename TEXT NOT NULL,
+ content_type TEXT NOT NULL,
+ size_bytes BIGINT NOT NULL,
+ storage_key TEXT NOT NULL, -- S3/MinIO object key
+ sha256_hash TEXT NOT NULL, -- integrity verification
+ version INTEGER NOT NULL DEFAULT 1,
+ parent_artifact_id UUID REFERENCES artifacts(id), -- previous version
+
+ -- Metadata
+ uploaded_by UUID NOT NULL REFERENCES users(id),
+ uploaded_by_agent UUID, -- if collected by agent
+ description TEXT,
+ tags TEXT[] NOT NULL DEFAULT '{}',
+ custom_fields JSONB NOT NULL DEFAULT '{}',
+
+ -- Encryption
+ encryption_method TEXT NOT NULL DEFAULT 'server_aes256',
+ encryption_key_id TEXT,
+
+ -- Retention
+ retention_until DATE,
+
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now()
+);
+
+CREATE INDEX idx_artifacts_tenant ON artifacts(tenant_id);
+CREATE INDEX idx_artifacts_hash ON artifacts(sha256_hash);
+
+-- Typed link tables for artifact associations (enforces FK integrity)
+CREATE TABLE artifact_risk_links (
+ artifact_id UUID NOT NULL REFERENCES artifacts(id),
+ risk_id UUID NOT NULL REFERENCES risks(id),
+ context_note TEXT,
+ linked_by UUID NOT NULL REFERENCES users(id),
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ PRIMARY KEY (artifact_id, risk_id)
+);
+
+CREATE TABLE artifact_control_links (
+ artifact_id UUID NOT NULL REFERENCES artifacts(id),
+ control_id UUID NOT NULL REFERENCES controls(id),
+ context_note TEXT,
+ linked_by UUID NOT NULL REFERENCES users(id),
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ PRIMARY KEY (artifact_id, control_id)
+);
+
+CREATE TABLE artifact_procedure_links (
+ artifact_id UUID NOT NULL REFERENCES artifacts(id),
+ procedure_id UUID NOT NULL REFERENCES test_procedures(id),
+ context_note TEXT,
+ linked_by UUID NOT NULL REFERENCES users(id),
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ PRIMARY KEY (artifact_id, procedure_id)
+);
+
+CREATE TABLE artifact_step_links (
+ artifact_id UUID NOT NULL REFERENCES artifacts(id),
+ step_id UUID NOT NULL REFERENCES test_steps(id),
+ context_note TEXT,
+ linked_by UUID NOT NULL REFERENCES users(id),
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ PRIMARY KEY (artifact_id, step_id)
+);
+
+CREATE TABLE artifact_finding_links (
+ artifact_id UUID NOT NULL REFERENCES artifacts(id),
+ finding_id UUID NOT NULL REFERENCES findings(id),
+ context_note TEXT,
+ linked_by UUID NOT NULL REFERENCES users(id),
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ PRIMARY KEY (artifact_id, finding_id)
+);
+```
+
+### 2.11 Evidence Request
+
+```sql
+CREATE TABLE evidence_requests (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL REFERENCES tenants(id),
+ campaign_id UUID REFERENCES assessment_campaigns(id),
+ control_id UUID REFERENCES controls(id),
+ procedure_id UUID REFERENCES test_procedures(id),
+
+ title TEXT NOT NULL,
+ description TEXT NOT NULL,
+ format_guidance TEXT,
+ due_date DATE NOT NULL,
+ status TEXT NOT NULL DEFAULT 'pending', -- pending, submitted, accepted, rejected, overdue
+
+ requested_by UUID NOT NULL REFERENCES users(id),
+ assigned_to UUID NOT NULL REFERENCES users(id), -- control owner
+
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ submitted_at TIMESTAMPTZ,
+ resolved_at TIMESTAMPTZ
+);
+```
+
+### 2.12 Finding
+
+```sql
+CREATE TABLE findings (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL REFERENCES tenants(id),
+ ref_id TEXT NOT NULL,
+ campaign_id UUID REFERENCES assessment_campaigns(id),
+ control_id UUID REFERENCES controls(id),
+ procedure_id UUID REFERENCES test_procedures(id),
+
+ title TEXT NOT NULL,
+ description TEXT NOT NULL,
+ root_cause TEXT,
+ risk_rating TEXT NOT NULL, -- high, medium, low
+ classification TEXT NOT NULL, -- deficiency, significant_deficiency, material_weakness
+
+ status TEXT NOT NULL DEFAULT 'draft',
+ -- draft, open, remediation_in_progress, validation, closed
+
+ owner_id UUID REFERENCES users(id), -- remediation owner
+ due_date DATE,
+ agent_produced BOOLEAN NOT NULL DEFAULT false,
+
+ created_by UUID NOT NULL REFERENCES users(id),
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ closed_at TIMESTAMPTZ,
+
+ UNIQUE (tenant_id, ref_id)
+);
+```
+
+### 2.13 Remediation Plan
+
+```sql
+CREATE TABLE remediation_plans (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ finding_id UUID NOT NULL REFERENCES findings(id),
+ description TEXT NOT NULL,
+ owner_id UUID NOT NULL REFERENCES users(id),
+ target_date DATE NOT NULL,
+ status TEXT NOT NULL DEFAULT 'planned', -- planned, in_progress, completed, validated
+
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ completed_at TIMESTAMPTZ
+);
+
+CREATE TABLE remediation_actions (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ plan_id UUID NOT NULL REFERENCES remediation_plans(id),
+ description TEXT NOT NULL,
+ owner_id UUID REFERENCES users(id),
+ due_date DATE,
+ status TEXT NOT NULL DEFAULT 'pending', -- pending, in_progress, completed
+ sort_order INTEGER NOT NULL DEFAULT 0,
+
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ completed_at TIMESTAMPTZ
+);
+```
+
+### 2.14 Agent Registration
+
+```sql
+CREATE TABLE agents (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL REFERENCES tenants(id),
+ name TEXT NOT NULL,
+ description TEXT,
+ owner_id UUID NOT NULL REFERENCES users(id), -- human owner
+ client_id TEXT NOT NULL UNIQUE,
+ client_secret_hash TEXT NOT NULL, -- Argon2id hash
+ role_id UUID NOT NULL REFERENCES roles(id),
+ status TEXT NOT NULL DEFAULT 'active', -- active, suspended, revoked
+ allowed_scopes TEXT[] NOT NULL DEFAULT '{}',
+
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ last_active_at TIMESTAMPTZ
+);
+```
+
+### 2.15 Audit Log
+
+```sql
+CREATE TABLE audit_log (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL,
+ timestamp TIMESTAMPTZ NOT NULL DEFAULT now(),
+ actor_id UUID NOT NULL,
+ actor_type TEXT NOT NULL, -- user, agent, system
+ action TEXT NOT NULL, -- create, update, delete, login, approve, reject
+ resource_type TEXT NOT NULL,
+ resource_id UUID NOT NULL,
+ changes JSONB, -- {"field": {"old": "...", "new": "..."}}
+ ip_address INET,
+ user_agent TEXT,
+ previous_hash TEXT, -- chain for tamper detection
+ entry_hash TEXT NOT NULL -- SHA-256 of this entry + previous_hash
+);
+
+-- Append-only: no UPDATE or DELETE allowed (enforced by trigger or policy)
+CREATE INDEX idx_audit_log_tenant_ts ON audit_log(tenant_id, timestamp DESC);
+CREATE INDEX idx_audit_log_resource ON audit_log(resource_type, resource_id);
+CREATE INDEX idx_audit_log_actor ON audit_log(actor_id, timestamp DESC);
+```
+
+### 2.16 Risk Treatment Plan
+
+```sql
+CREATE TABLE risk_treatments (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ risk_id UUID NOT NULL REFERENCES risks(id),
+ treatment_type TEXT NOT NULL, -- accept, mitigate, transfer, avoid
+ description TEXT NOT NULL,
+ owner_id UUID REFERENCES users(id),
+ status TEXT NOT NULL DEFAULT 'planned', -- planned, in_progress, completed
+ target_date DATE,
+
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ completed_at TIMESTAMPTZ
+);
+
+CREATE TABLE treatment_actions (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ treatment_id UUID NOT NULL REFERENCES risk_treatments(id),
+ description TEXT NOT NULL,
+ owner_id UUID REFERENCES users(id),
+ due_date DATE,
+ status TEXT NOT NULL DEFAULT 'pending',
+ sort_order INTEGER NOT NULL DEFAULT 0,
+
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ completed_at TIMESTAMPTZ
+);
+```
+
+### 2.17 Taxonomy Configuration
+
+```sql
+CREATE TABLE taxonomy_categories (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL REFERENCES tenants(id),
+ taxonomy_type TEXT NOT NULL, -- risk_category, control_type, control_nature, likelihood, impact, etc.
+ value TEXT NOT NULL,
+ label TEXT NOT NULL,
+ description TEXT,
+ color TEXT, -- hex color for UI
+ sort_order INTEGER NOT NULL DEFAULT 0,
+ is_active BOOLEAN NOT NULL DEFAULT true,
+
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+
+ UNIQUE (tenant_id, taxonomy_type, value)
+);
+```
+
+### 2.18 Plugin Registration
+
+```sql
+CREATE TABLE plugins (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL REFERENCES tenants(id),
+ plugin_name TEXT NOT NULL,
+ version TEXT NOT NULL,
+ description TEXT,
+ author TEXT,
+ plugin_type TEXT NOT NULL, -- framework, integration, evidence_collector, workflow, report, agent
+ status TEXT NOT NULL DEFAULT 'installed', -- installed, enabled, disabled, error
+ config JSONB NOT NULL DEFAULT '{}',
+ permissions TEXT[] NOT NULL DEFAULT '{}',
+ signature TEXT, -- Ed25519 signature
+ health_status TEXT DEFAULT 'unknown',
+
+ installed_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+
+ UNIQUE (tenant_id, plugin_name)
+);
+```
+
+### 2.19 Notification & Comment
+
+```sql
+-- Base comments table with typed foreign keys per entity
+-- Each entity type gets its own nullable FK column; exactly one must be set.
+CREATE TABLE comments (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL REFERENCES tenants(id),
+ risk_id UUID REFERENCES risks(id),
+ control_id UUID REFERENCES controls(id),
+ finding_id UUID REFERENCES findings(id),
+ procedure_id UUID REFERENCES test_procedures(id),
+ campaign_id UUID REFERENCES assessment_campaigns(id),
+ parent_id UUID REFERENCES comments(id),
+ author_id UUID NOT NULL REFERENCES users(id),
+ body TEXT NOT NULL,
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+
+ CONSTRAINT exactly_one_entity CHECK (
+ (CASE WHEN risk_id IS NOT NULL THEN 1 ELSE 0 END +
+ CASE WHEN control_id IS NOT NULL THEN 1 ELSE 0 END +
+ CASE WHEN finding_id IS NOT NULL THEN 1 ELSE 0 END +
+ CASE WHEN procedure_id IS NOT NULL THEN 1 ELSE 0 END +
+ CASE WHEN campaign_id IS NOT NULL THEN 1 ELSE 0 END) = 1
+ )
+);
+
+CREATE INDEX idx_comments_risk ON comments(risk_id) WHERE risk_id IS NOT NULL;
+CREATE INDEX idx_comments_control ON comments(control_id) WHERE control_id IS NOT NULL;
+CREATE INDEX idx_comments_finding ON comments(finding_id) WHERE finding_id IS NOT NULL;
+CREATE INDEX idx_comments_procedure ON comments(procedure_id) WHERE procedure_id IS NOT NULL;
+CREATE INDEX idx_comments_campaign ON comments(campaign_id) WHERE campaign_id IS NOT NULL;
+
+CREATE TABLE notifications (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ tenant_id UUID NOT NULL REFERENCES tenants(id),
+ user_id UUID NOT NULL REFERENCES users(id),
+ type TEXT NOT NULL, -- assignment, deadline, review_request, evidence_request, etc.
+ title TEXT NOT NULL,
+ body TEXT,
+ entity_type TEXT,
+ entity_id UUID,
+ is_read BOOLEAN NOT NULL DEFAULT false,
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now()
+);
+
+CREATE INDEX idx_notifications_user ON notifications(user_id, is_read, created_at DESC);
+```
+
+---
+
+## 3. Relationship Summary (ERD Legend)
+
+```
+Tenant 1──* User, Risk, Control, Assessment Campaign, Finding, Artifact, Agent, Plugin
+User *──* Role (via user_roles)
+Risk *──* Control (via risk_control_mappings)
+Risk 1──* Risk Treatment
+Control *──* Framework Requirement (via control_framework_mappings)
+Control *──1 CCL Entry (optional reference)
+CCL Entry *──* Framework Requirement (via ccl_framework_mappings)
+Framework 1──* Framework Requirement
+Assessment 1──* Test Procedure
+Test Procedure 1──* Test Step
+Test Procedure *──1 Control
+Artifact *──* Risk, Control, Test Procedure, Test Step, Finding (via typed link tables)
+Finding *──1 Campaign, Control, Test Procedure
+Finding 1──* Remediation Plan
+Remediation Plan 1──* Remediation Action
+Comment *──1 Risk | Control | Finding | Test Procedure | Campaign (via nullable FKs with CHECK constraint)
+Audit Log *──1 Any Entity (polymorphic, append-only)
+```
+
+---
+
+## 4. Storage Strategy
+
+### 4.1 Structured Data — PostgreSQL
+
+- All entities above reside in PostgreSQL.
+- JSONB columns (`custom_fields`, `settings`, `config`, `changes`) provide schema flexibility without sacrificing query performance.
+- Row-Level Security (RLS) policies enforce tenant isolation at the database level.
+
+```sql
+-- Example RLS policy
+ALTER TABLE risks ENABLE ROW LEVEL SECURITY;
+
+CREATE POLICY tenant_isolation ON risks
+ USING (tenant_id = current_setting('app.current_tenant_id')::UUID);
+```
+
+### 4.2 Binary Artifacts — S3-Compatible Object Store
+
+- Artifacts stored under: `{tenant_id}/{year}/{month}/{artifact_id}/{version}/{filename}`
+- Server-side encryption (SSE-S3 or SSE-KMS).
+- Pre-signed URLs for direct browser upload/download (bypass API server for large files).
+- Lifecycle policies handle retention (move to Glacier/cold after retention window).
+
+### 4.3 Search Index
+
+PostgreSQL `tsvector` is used for full-text search, with an optional external search engine (e.g., Meilisearch, Elasticsearch) for scale.
+
+Indexed entities:
+- Risks (title, description, category, tags)
+- Controls (title, objective, description, ref_id)
+- Findings (title, description)
+- Artifacts (filename, description, tags)
+- Framework Requirements (ref_id, title, description)
+
+### 4.4 Caching — Redis/Valkey
+
+Cached objects:
+- User sessions and JWT validation cache
+- Dashboard aggregations (TTL: 60s)
+- Taxonomy lookups (TTL: 300s)
+- Rate limit counters
+
+### 4.5 Automatic Timestamps
+
+All tables with `updated_at` columns use a shared trigger:
+
+```sql
+CREATE OR REPLACE FUNCTION set_updated_at()
+RETURNS TRIGGER AS $$
+BEGIN
+ NEW.updated_at = now();
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
+
+-- Applied to every table with updated_at, e.g.:
+CREATE TRIGGER set_updated_at BEFORE UPDATE ON risks
+ FOR EACH ROW EXECUTE FUNCTION set_updated_at();
+CREATE TRIGGER set_updated_at BEFORE UPDATE ON controls
+ FOR EACH ROW EXECUTE FUNCTION set_updated_at();
+-- ... (applied to all entities with updated_at)
+```
+
+---
+
+## 5. Migration Strategy
+
+All schema changes managed via **Alembic** migrations with:
+- Forward and rollback scripts for every migration
+- Data migrations for taxonomy or framework updates
+- Zero-downtime migration patterns (add column → backfill → add constraint)
diff --git a/docs/deployment/DEPLOYMENT.md b/docs/deployment/DEPLOYMENT.md
new file mode 100644
index 00000000..6f48d501
--- /dev/null
+++ b/docs/deployment/DEPLOYMENT.md
@@ -0,0 +1,733 @@
+# Ground Control — Deployment & SSO Guide
+
+**Version:** 1.0.0
+**Date:** 2026-03-07
+
+---
+
+## 1. Deployment Options Overview
+
+| Option | Best For | Complexity | Scaling |
+|---|---|---|---|
+| **Docker Compose** | Small teams, evaluation, development | Low | Vertical only |
+| **Kubernetes (Helm)** | Production, multi-tenant, enterprise | Medium | Horizontal |
+| **Cloud Managed** | Teams preferring managed infrastructure | Medium | Auto-scaling |
+
+---
+
+## 2. Docker Compose Deployment
+
+### 2.1 Prerequisites
+
+- Docker Engine 24+ and Docker Compose v2
+- 4 GB RAM minimum (8 GB recommended)
+- 20 GB disk (more for artifact storage)
+- Domain name with DNS pointing to the host (for TLS)
+
+### 2.2 Quick Start
+
+```bash
+# Clone the repository
+git clone https://github.com/KeplerOps/Ground-Control.git
+cd Ground-Control
+
+# Copy environment template
+cp .env.example .env
+
+# Edit configuration
+vi .env
+
+# Start all services
+docker compose up -d
+
+# Run database migrations
+docker compose exec app gc-migrate upgrade
+
+# Create initial admin user
+docker compose exec app gc-admin create-user \
+ --email admin@example.com \
+ --name "Admin" \
+ --role admin
+```
+
+### 2.3 Docker Compose Architecture
+
+```yaml
+# docker-compose.yml
+services:
+ app:
+ image: ghcr.io/keplerops/ground-control:latest
+ ports:
+ - "8000:8000"
+ environment:
+ DATABASE_URL: postgresql+asyncpg://gc:${DB_PASSWORD}@db:5432/groundcontrol
+ REDIS_URL: redis://redis:6379/0
+ S3_ENDPOINT: http://minio:9000
+ S3_BUCKET: gc-artifacts
+ S3_ACCESS_KEY: ${MINIO_ACCESS_KEY}
+ S3_SECRET_KEY: ${MINIO_SECRET_KEY}
+ SECRET_KEY: ${SECRET_KEY}
+ ALLOWED_ORIGINS: https://gc.example.com
+ depends_on:
+ - db
+ - redis
+ - minio
+
+ worker:
+ image: ghcr.io/keplerops/ground-control:latest
+ command: gc-worker
+ environment:
+ # Same as app
+ depends_on:
+ - db
+ - redis
+
+ db:
+ image: postgres:16-alpine
+ volumes:
+ - pgdata:/var/lib/postgresql/data
+ environment:
+ POSTGRES_DB: groundcontrol
+ POSTGRES_USER: gc
+ POSTGRES_PASSWORD: ${DB_PASSWORD}
+
+ redis:
+ image: redis:7-alpine
+ volumes:
+ - redisdata:/data
+
+ minio:
+ image: minio/minio:latest
+ command: server /data --console-address ":9001"
+ volumes:
+ - miniodata:/data
+ environment:
+ MINIO_ROOT_USER: ${MINIO_ACCESS_KEY}
+ MINIO_ROOT_PASSWORD: ${MINIO_SECRET_KEY}
+
+ caddy:
+ image: caddy:2-alpine
+ ports:
+ - "80:80"
+ - "443:443"
+ volumes:
+ - ./Caddyfile:/etc/caddy/Caddyfile
+ - caddydata:/data
+
+volumes:
+ pgdata:
+ redisdata:
+ miniodata:
+ caddydata:
+```
+
+### 2.4 Caddyfile (Reverse Proxy with Auto-TLS)
+
+```
+gc.example.com {
+ reverse_proxy app:8000
+}
+```
+
+### 2.5 Environment Variables
+
+```bash
+# .env.example
+
+# --- Core ---
+SECRET_KEY=change-me-to-a-random-64-char-string
+ALLOWED_ORIGINS=https://gc.example.com
+LOG_LEVEL=info
+
+# --- Database ---
+DB_PASSWORD=change-me
+
+# --- Object Storage ---
+MINIO_ACCESS_KEY=gc-access-key
+MINIO_SECRET_KEY=change-me
+
+# --- SSO (optional) ---
+SSO_PROVIDER= # saml or oidc
+SAML_IDP_METADATA_URL=
+SAML_SP_ENTITY_ID=
+OIDC_ISSUER=
+OIDC_CLIENT_ID=
+OIDC_CLIENT_SECRET=
+
+# --- SCIM (optional) ---
+SCIM_ENABLED=false
+SCIM_BEARER_TOKEN=
+
+# --- Email (optional) ---
+SMTP_HOST=
+SMTP_PORT=587
+SMTP_USER=
+SMTP_PASSWORD=
+SMTP_FROM=noreply@example.com
+
+# --- Encryption ---
+ARTIFACT_ENCRYPTION_KEY= # 32-byte base64 key for artifact encryption
+```
+
+---
+
+## 3. Kubernetes Deployment (Helm)
+
+### 3.1 Prerequisites
+
+- Kubernetes 1.28+
+- Helm 3.14+
+- kubectl configured for target cluster
+- Ingress controller (nginx-ingress or traefik)
+- cert-manager (for TLS certificates) — optional if using cloud LB
+
+### 3.2 Install
+
+```bash
+# Add Helm repo
+helm repo add ground-control https://keplerops.github.io/ground-control-charts
+helm repo update
+
+# Create namespace
+kubectl create namespace ground-control
+
+# Create secrets
+kubectl create secret generic gc-secrets \
+ --namespace ground-control \
+ --from-literal=secret-key=$(openssl rand -hex 32) \
+ --from-literal=db-password=$(openssl rand -hex 16) \
+ --from-literal=minio-secret-key=$(openssl rand -hex 16) \
+ --from-literal=search-key=$(openssl rand -hex 16)
+
+# Install with custom values
+helm install ground-control ground-control/ground-control \
+ --namespace ground-control \
+ --values values.yaml
+```
+
+### 3.3 Helm Values
+
+```yaml
+# values.yaml
+
+replicaCount:
+ app: 3
+ worker: 2
+
+image:
+ repository: ghcr.io/keplerops/ground-control
+ tag: "1.0.0"
+
+ingress:
+ enabled: true
+ className: nginx
+ hosts:
+ - host: gc.example.com
+ paths:
+ - path: /
+ pathType: Prefix
+ tls:
+ - secretName: gc-tls
+ hosts:
+ - gc.example.com
+
+postgresql:
+ enabled: true # Use bundled PostgreSQL
+ auth:
+ existingSecret: gc-secrets
+ secretKeys:
+ adminPasswordKey: db-password
+ primary:
+ persistence:
+ size: 50Gi
+
+redis:
+ enabled: true
+ architecture: standalone
+ auth:
+ enabled: false
+
+minio:
+ enabled: true
+ auth:
+ existingSecret: gc-secrets
+ persistence:
+ size: 100Gi
+
+# External database (instead of bundled)
+# externalDatabase:
+# host: my-rds-instance.region.rds.amazonaws.com
+# port: 5432
+# database: groundcontrol
+# existingSecret: gc-db-secret
+
+resources:
+ app:
+ requests:
+ cpu: 500m
+ memory: 512Mi
+ limits:
+ cpu: 2000m
+ memory: 2Gi
+ worker:
+ requests:
+ cpu: 250m
+ memory: 256Mi
+ limits:
+ cpu: 1000m
+ memory: 1Gi
+
+autoscaling:
+ enabled: true
+ minReplicas: 2
+ maxReplicas: 10
+ targetCPUUtilizationPercentage: 70
+
+persistence:
+ artifacts:
+ storageClass: "" # Use default
+ size: 100Gi
+
+monitoring:
+ enabled: true
+ serviceMonitor: true # Prometheus ServiceMonitor
+```
+
+### 3.4 Kubernetes Architecture
+
+```
+┌──────────────────────────────────────────────────────────────────┐
+│ Namespace: ground-control │
+│ │
+│ ┌─────────────┐ ┌───────────────────┐ ┌─────────────┐ │
+│ │ Ingress │────▶│ Service: gc-app │────▶│ Deployment: │ │
+│ │ (TLS term.) │ │ (ClusterIP) │ │ gc-app │ │
+│ └─────────────┘ └───────────────────┘ │ (3 replicas) │ │
+│ └─────────────┘ │
+│ │
+│ ┌───────────────────┐ ┌──────────────────────────────────┐ │
+│ │ Deployment: │ │ StatefulSets: │ │
+│ │ gc-worker │ │ ├── PostgreSQL (or external RDS) │ │
+│ │ (2 replicas) │ │ ├── Redis │ │
+│ └───────────────────┘ │ ├── MinIO (or external S3) │ │
+│ ┌───────────────────┐ └──────────────────────────────────┘ │
+│ │ CronJob: │ │
+│ │ gc-scheduled-tasks │ ┌──────────────────────────────────┐ │
+│ └───────────────────┘ │ ConfigMap: gc-config │ │
+│ │ Secret: gc-secrets │ │
+│ ┌───────────────────┐ │ PVC: gc-artifacts │ │
+│ │ Job: gc-migrate │ └──────────────────────────────────┘ │
+│ │ (runs on upgrade) │ │
+│ └───────────────────┘ ┌──────────────────────────────────┐ │
+│ │ NetworkPolicy: restrict inter-pod │ │
+│ │ PodDisruptionBudget: min 1 avail │ │
+│ └──────────────────────────────────┘ │
+└──────────────────────────────────────────────────────────────────┘
+```
+
+---
+
+## 4. Cloud-Managed Deployment
+
+### 4.1 AWS Reference Architecture
+
+```
+┌──────────────────────────────────────────────────────────────┐
+│ AWS Region │
+│ │
+│ ┌──────────┐ ┌──────────────┐ ┌──────────────────┐ │
+│ │ ALB │────▶│ ECS Fargate │────▶│ RDS PostgreSQL │ │
+│ │ (TLS) │ │ (API + UI) │ │ (Multi-AZ) │ │
+│ └──────────┘ └──────────────┘ └──────────────────┘ │
+│ ┌──────────────┐ ┌──────────────────┐ │
+│ │ ECS Fargate │ │ ElastiCache │ │
+│ │ (Workers) │ │ Redis │ │
+│ └──────────────┘ └──────────────────┘ │
+│ ┌──────────────┐ │
+│ │ S3 Bucket │ SSO: Cognito or │
+│ │ (Artifacts) │ SAML federation │
+│ └──────────────┘ │
+│ ┌──────────────┐ │
+│ │ OpenSearch │ Monitoring: │
+│ │ (Search) │ CloudWatch │
+│ └──────────────┘ │
+└──────────────────────────────────────────────────────────────┘
+```
+
+### 4.2 Azure Reference Architecture
+
+```
+┌──────────────────────────────────────────────────────────────┐
+│ Azure Region │
+│ │
+│ ┌─────────────┐ ┌──────────────┐ ┌──────────────────────┐│
+│ │ App Gateway │─▶│ Container │─▶│ Azure DB for ││
+│ │ (TLS/WAF) │ │ Apps (API) │ │ PostgreSQL ││
+│ └─────────────┘ └──────────────┘ └──────────────────────┘│
+│ ┌──────────────┐ ┌──────────────────────┐│
+│ │ Container │ │ Azure Cache ││
+│ │ Apps (Worker)│ │ for Redis ││
+│ └──────────────┘ └──────────────────────┘│
+│ ┌──────────────┐ │
+│ │ Blob Storage │ SSO: Azure AD / Entra │
+│ │ (Artifacts) │ ID (SAML/OIDC native) │
+│ └──────────────┘ │
+└──────────────────────────────────────────────────────────────┘
+```
+
+---
+
+## 5. SSO Configuration
+
+### 5.1 SAML 2.0
+
+#### Ground Control SP Configuration
+
+| Setting | Value |
+|---|---|
+| Entity ID | `https://gc.example.com/saml/metadata` |
+| ACS URL | `https://gc.example.com/api/v1/auth/saml/acs` |
+| SLO URL | `https://gc.example.com/api/v1/auth/saml/slo` |
+| NameID Format | `urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress` |
+| Signed Requests | Yes (RSA-SHA256) |
+
+#### Required SAML Attributes
+
+| Attribute | SAML Name | Required |
+|---|---|---|
+| Email | `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress` | Yes |
+| Display Name | `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name` | Yes |
+| Groups | `http://schemas.xmlsoap.org/claims/Group` | Optional (for role mapping) |
+
+#### Configuration Steps
+
+1. **In Ground Control Admin UI:**
+ ```
+ Settings → Authentication → SSO → SAML 2.0
+ - IdP Metadata URL: https://login.example.com/saml/metadata
+ (or upload IdP metadata XML)
+ - Attribute Mapping:
+ email: emailaddress
+ display_name: name
+ groups: Group
+ ```
+
+2. **In your IdP (e.g., Okta, Azure AD):**
+ - Create a new SAML 2.0 application
+ - Set ACS URL to `https://gc.example.com/api/v1/auth/saml/acs`
+ - Set Entity ID to `https://gc.example.com/saml/metadata`
+ - Configure attribute statements (email, name, groups)
+ - Assign users/groups
+
+3. **Test:** Click "Test SSO" in Ground Control admin → should complete round-trip.
+
+4. **Enforce:** Enable "Require SSO" to disable local password login.
+
+#### Okta-Specific Setup
+
+```
+1. Okta Admin → Applications → Create App Integration → SAML 2.0
+2. General Settings:
+ - App name: Ground Control
+ - Logo: (upload)
+3. SAML Settings:
+ - Single sign-on URL: https://gc.example.com/api/v1/auth/saml/acs
+ - Audience URI: https://gc.example.com/saml/metadata
+ - Name ID format: EmailAddress
+ - Attribute Statements:
+ name → user.displayName
+ email → user.email
+ - Group Attribute Statements:
+ groups → Matches regex: .*
+4. Assignments: Assign users or groups
+```
+
+#### Azure AD / Entra ID — Specific Setup
+
+```
+1. Azure Portal → Enterprise Applications → New Application → Create your own
+2. Single sign-on → SAML
+3. Basic SAML Configuration:
+ - Identifier: https://gc.example.com/saml/metadata
+ - Reply URL: https://gc.example.com/api/v1/auth/saml/acs
+4. Attributes & Claims:
+ - email → user.mail
+ - name → user.displayname
+ - groups → (add group claim)
+5. Assign users and groups
+```
+
+### 5.2 OpenID Connect (OIDC)
+
+#### Configuration
+
+```
+Settings → Authentication → SSO → OpenID Connect
+- Issuer URL: https://accounts.google.com
+ or https://login.microsoftonline.com/{tenant}/v2.0
+ or https://dev-xxxxx.okta.com/oauth2/default
+- Client ID: (from IdP)
+- Client Secret: (from IdP)
+- Scopes: openid profile email
+- Redirect URI: https://gc.example.com/api/v1/auth/oidc/callback
+```
+
+#### OIDC Flow
+
+```
+1. User clicks "Sign in with SSO"
+2. Browser redirects to IdP authorization endpoint
+3. User authenticates at IdP
+4. IdP redirects back to Ground Control with authorization code
+5. Ground Control exchanges code for tokens (server-side)
+6. Ground Control validates ID token, extracts claims
+7. User session created; JWT issued
+```
+
+### 5.3 SCIM 2.0 Provisioning
+
+Ground Control exposes a SCIM 2.0 endpoint for automated user/group sync.
+
+#### SCIM Endpoint
+
+```
+Base URL: https://gc.example.com/api/v1/scim/v2
+Authentication: Bearer token
+
+Supported resources:
+- /Users — Create, Read, Update, Delete, List
+- /Groups — Create, Read, Update, Delete, List
+- /Schemas — Discovery
+- /ServiceProviderConfig — Capabilities
+```
+
+#### SCIM Setup
+
+1. **In Ground Control:** Settings → Provisioning → SCIM
+ - Enable SCIM
+ - Generate bearer token
+ - Copy SCIM base URL
+
+2. **In your IdP:**
+ - Okta: Applications → Your App → Provisioning → Enable SCIM
+ - Azure AD: Enterprise App → Provisioning → Automatic → Enter SCIM URL + token
+ - Configure attribute mapping
+ - Enable: Create, Update, Deactivate
+
+3. **Role Mapping:** Map IdP groups to Ground Control roles:
+ ```
+ IdP Group "GRC-Admins" → Role: Admin
+ IdP Group "IT-Audit" → Role: Auditor
+ IdP Group "Risk-Team" → Role: Risk Manager
+ IdP Group "Control-Owners" → Role: Control Owner
+ ```
+
+---
+
+## 6. Multi-Tenancy Configuration
+
+### 6.1 Shared Schema (Default)
+
+Every table has a `tenant_id` column. Row-Level Security (RLS) enforces isolation.
+
+```yaml
+# values.yaml or .env
+MULTI_TENANCY_MODE: shared_schema
+```
+
+### 6.2 Schema Per Tenant
+
+Each tenant gets a dedicated PostgreSQL schema. More isolation, more overhead.
+
+```yaml
+MULTI_TENANCY_MODE: schema_per_tenant
+```
+
+### 6.3 Database Per Tenant
+
+Each tenant gets a dedicated database. Maximum isolation for regulated environments.
+
+```yaml
+MULTI_TENANCY_MODE: database_per_tenant
+TENANT_DB_TEMPLATE: postgresql+asyncpg://gc:{password}@{host}:5432/gc_{tenant_slug}
+```
+
+---
+
+## 7. Backup & Disaster Recovery
+
+### 7.1 Backup Strategy
+
+| Component | Method | Frequency | Retention |
+|---|---|---|---|
+| PostgreSQL | pg_dump or WAL archiving | Continuous (WAL) + daily full | 30 days |
+| Object Storage | S3 versioning + cross-region replication | Continuous | Per retention policy |
+| Redis | RDB snapshots | Hourly | 24 hours |
+| Search Index | Rebuild from PostgreSQL | On-demand | N/A (derived data) |
+| Configuration | Git (values.yaml, .env) | On change | Indefinite |
+
+### 7.2 Recovery Procedures
+
+```bash
+# Database restore from backup
+pg_restore -d groundcontrol backup.dump
+
+# Rebuild search index
+docker compose exec app gc-admin reindex-search
+
+# Verify artifact integrity
+docker compose exec app gc-admin verify-artifacts --repair
+```
+
+### 7.3 Recovery Targets
+
+| Metric | Target |
+|---|---|
+| RPO (Recovery Point Objective) | < 1 hour |
+| RTO (Recovery Time Objective) | < 4 hours |
+
+---
+
+## 8. Monitoring & Observability
+
+### 8.1 Health Endpoints
+
+```
+GET /health → 200 if all dependencies healthy
+GET /health/ready → 200 if ready to serve traffic
+GET /health/live → 200 if process is alive
+```
+
+### 8.2 Metrics (Prometheus)
+
+```
+GET /metrics
+
+# Key metrics:
+gc_api_requests_total{method, path, status}
+gc_api_request_duration_seconds{method, path}
+gc_active_users_total{tenant}
+gc_artifacts_stored_bytes{tenant}
+gc_assessments_active_total{tenant}
+gc_agent_results_total{agent_id, status}
+gc_plugin_health{plugin, status}
+gc_background_jobs_total{queue, status}
+gc_background_job_duration_seconds{queue}
+```
+
+### 8.3 Logging
+
+Structured JSON logs:
+```json
+{
+ "timestamp": "2026-03-07T15:30:00Z",
+ "level": "info",
+ "service": "gc-app",
+ "tenant_id": "...",
+ "request_id": "req_abc123",
+ "message": "Risk assessment completed",
+ "risk_id": "...",
+ "user_id": "..."
+}
+```
+
+Log aggregation: Forward to Elasticsearch/Loki via stdout (container logging driver).
+
+### 8.4 Alerting Recommendations
+
+| Alert | Condition | Severity |
+|---|---|---|
+| API error rate | > 5% 5xx in 5 min | Critical |
+| API latency | p95 > 1s for 5 min | Warning |
+| Database connections | > 80% pool utilized | Warning |
+| Disk usage | > 85% on any volume | Warning |
+| Certificate expiry | < 14 days | Warning |
+| Failed background jobs | > 10 failures in 1 hour | Warning |
+| Plugin health check | Failed 3 consecutive times | Warning |
+
+---
+
+## 9. Security Hardening Checklist
+
+- [ ] Change all default secrets in `.env` / Kubernetes secrets
+- [ ] Enable TLS for all external endpoints
+- [ ] Enable database encryption at rest
+- [ ] Configure network policies (Kubernetes) or security groups (cloud)
+- [ ] Enable audit logging and forward to SIEM
+- [ ] Configure SSO and disable local password login
+- [ ] Set up SCIM for automated user provisioning
+- [ ] Enable MFA for any remaining local accounts
+- [ ] Review and restrict API rate limits
+- [ ] Configure artifact encryption keys (BYOK if required)
+- [ ] Enable database connection encryption (SSL)
+- [ ] Set up automated backups and test restore procedures
+- [ ] Configure Content Security Policy headers
+- [ ] Enable HSTS
+- [ ] Review plugin permissions before enabling
+
+---
+
+## 10. Upgrade Procedures
+
+### 10.1 Docker Compose
+
+```bash
+# Pull latest images
+docker compose pull
+
+# Apply database migrations
+docker compose run --rm app gc-migrate upgrade
+
+# Restart services
+docker compose up -d
+
+# Verify health
+curl https://gc.example.com/health
+```
+
+### 10.2 Kubernetes (Helm)
+
+```bash
+# Update chart repo
+helm repo update
+
+# Review changes
+helm diff upgrade ground-control ground-control/ground-control \
+ --namespace ground-control \
+ --values values.yaml
+
+# Apply upgrade (migrations run as pre-upgrade hook)
+helm upgrade ground-control ground-control/ground-control \
+ --namespace ground-control \
+ --values values.yaml
+
+# Verify
+kubectl rollout status deployment/gc-app -n ground-control
+```
+
+### 10.3 Rollback
+
+```bash
+# Helm rollback
+helm rollback ground-control -n ground-control
+
+# Docker Compose rollback
+docker compose down
+docker compose pull # with previous image tag in docker-compose.yml
+docker compose up -d
+docker compose exec app gc-migrate downgrade -1
+```
+
+---
+
+## 11. AI-Assisted Development Tooling
+
+Ground Control development uses MCP (Model Context Protocol) servers for AI-assisted workflows:
+
+| MCP Server | Purpose | Phase |
+|---|---|---|
+| **rocq-mcp** | Interactive Coq/Rocq proof development and type checking | 0 |
+| **AWS MCP** | AWS infrastructure management and deployment | 0 |
+| **GC Ops MCP** | Local dev operations (Docker, migrations, logs, test data) | 11 |
+
+See issue #006b for setup instructions.
diff --git a/docs/user-stories/USER_STORIES.md b/docs/user-stories/USER_STORIES.md
new file mode 100644
index 00000000..39906e0f
--- /dev/null
+++ b/docs/user-stories/USER_STORIES.md
@@ -0,0 +1,421 @@
+# Ground Control — User Stories
+
+**Version:** 1.0.0
+**Date:** 2026-03-07
+
+Stories are grouped by epic and reference personas from the PRD.
+
+---
+
+## Epic 1: Risk Management
+
+### US-1.1 — Maintain Risk Register [MVP]
+**As** Riley (IT Risk Manager),
+**I want** to create, view, edit, and archive IT risks in a centralized register,
+**So that** the organization has a single source of truth for its IT risk landscape.
+
+**Acceptance Criteria:**
+- [ ] Can create a risk with: title, description, category, owner, inherent likelihood, inherent impact
+- [ ] Can assign a risk to one or more business units / systems
+- [ ] Can filter and search risks by category, owner, rating, status
+- [ ] Can archive a risk (soft-delete) with a reason
+- [ ] Risk history (all field changes) is preserved in an immutable audit log
+- [ ] Bulk import risks from CSV or JSON
+
+### US-1.2 — Conduct Risk Assessment Campaign
+**As** Riley,
+**I want** to launch a time-boxed risk assessment campaign that assigns risks to assessors,
+**So that** the organization periodically reassesses its risk posture.
+
+**Acceptance Criteria:**
+- [ ] Can create a campaign with: name, scope (risk categories, business units), start/end dates
+- [ ] Can assign individual risks to specific assessors
+- [ ] Assessors receive notifications and see assigned risks in their task list
+- [ ] Assessors can update likelihood and impact scores with justification text and evidence links
+- [ ] Campaign dashboard shows completion percentage, overdue items, score changes
+- [ ] Campaign can be finalized (locked) after review
+
+### US-1.3 — Define Risk Appetite & Thresholds
+**As** Pat (CISO),
+**I want** to set risk appetite thresholds that visually overlay on heat maps,
+**So that** the organization can see which risks exceed tolerance levels.
+
+**Acceptance Criteria:**
+- [ ] Can define appetite thresholds per risk category (or global)
+- [ ] Heat map displays appetite boundary line
+- [ ] Risks above appetite are flagged and trigger notifications
+- [ ] Can export heat map with appetite overlay as image or PDF
+
+### US-1.4 — Track Risk Treatment Plans
+**As** Riley,
+**I want** to create treatment plans linked to risks with action items, owners, and deadlines,
+**So that** risk mitigation activities are tracked to completion.
+
+**Acceptance Criteria:**
+- [ ] Can create a treatment plan (accept, mitigate, transfer, avoid) linked to a risk
+- [ ] Each plan has one or more action items with owner, due date, status
+- [ ] Owners receive reminders as due dates approach
+- [ ] Completing all action items prompts residual risk re-assessment
+- [ ] Treatment plan history is auditable
+
+### US-1.5 — View Risk Dashboard
+**As** Pat (CISO),
+**I want** an executive dashboard showing overall risk posture, trends, and top risks,
+**So that** I can make informed decisions in risk committee meetings.
+
+**Acceptance Criteria:**
+- [ ] Dashboard shows: risk heat map, top 10 risks, trend over last 4 quarters, treatment plan status
+- [ ] Can filter by business unit, category, assessment period
+- [ ] Can export dashboard as PDF for board reporting
+- [ ] Dashboard data refreshes on page load
+
+---
+
+## Epic 2: Control Management
+
+### US-2.1 — Maintain Control Catalog [MVP]
+**As** Avery (Internal Auditor),
+**I want** to maintain a catalog of IT controls with standard attributes,
+**So that** controls are consistently documented and testable.
+
+**Acceptance Criteria:**
+- [ ] Can create a control with: ID, title, objective, description, type, nature, frequency, owner
+- [ ] Controls link to the Common Control Library (CCL)
+- [ ] Can map a control to one or more framework requirements
+- [ ] Can filter controls by framework, type, nature, owner, effectiveness rating
+- [ ] Control changes are version-controlled with diff history
+
+### US-2.2 — Map Controls Across Frameworks
+**As** Morgan (Compliance Analyst),
+**I want** to map a single control to multiple framework requirements,
+**So that** testing that control once satisfies compliance obligations across frameworks.
+
+**Acceptance Criteria:**
+- [ ] Can search framework requirements by ID or keyword
+- [ ] Can link a control to multiple framework requirements (many-to-many)
+- [ ] Mapping view shows which frameworks are covered for each control
+- [ ] Gap analysis report shows framework requirements without mapped controls
+- [ ] Can bulk-map controls using CCL suggested mappings
+
+### US-2.3 — Use Common Control Library
+**As** Morgan,
+**I want** to browse and adopt pre-built common controls from the CCL,
+**So that** I can quickly establish control coverage using standardized language.
+
+**Acceptance Criteria:**
+- [ ] Can browse CCL by category, keyword, or mapped framework
+- [ ] Can adopt a CCL control into the org catalog (creates a linked copy)
+- [ ] Adopted controls inherit framework mappings from the CCL
+- [ ] When CCL updates, adopted controls show available updates
+- [ ] Can contribute custom controls back to a shared CCL (if configured)
+
+---
+
+## Epic 3: Assessment & Testing
+
+### US-3.1 — Plan Assessment Campaign [MVP]
+**As** Avery,
+**I want** to create an assessment campaign that defines scope, timeline, and team assignments,
+**So that** testing work is organized and tracked.
+
+**Acceptance Criteria:**
+- [ ] Can create a campaign with: name, type (SOX ITGC, SOC 2, etc.), period, scope (controls/systems)
+- [ ] Can assign testers and reviewers to specific controls
+- [ ] Campaign generates workpapers from templates for each in-scope control
+- [ ] Can set milestones (fieldwork start, fieldwork end, review deadline, final report)
+- [ ] Calendar view shows all campaigns and milestones
+
+### US-3.2 — Execute Test Procedures [MVP]
+**As** Avery,
+**I want** to execute test procedures against a control and record results step by step,
+**So that** testing is thorough, consistent, and documented.
+
+**Acceptance Criteria:**
+- [ ] Each control in a campaign has one or more test procedures (from template or ad-hoc)
+- [ ] Each test procedure has ordered steps with: instruction, expected result
+- [ ] Tester records: actual result, pass/fail/N-A, evidence links, notes
+- [ ] Can select a sample from a population and record sample items
+- [ ] Can mark a test procedure as complete, which rolls up to control status
+- [ ] Partially completed test procedures show progress percentage
+
+### US-3.3 — Collect Evidence via Requests
+**As** Avery,
+**I want** to send evidence requests to control owners with clear instructions and deadlines,
+**So that** evidence collection is structured and tracked rather than ad-hoc.
+
+**Acceptance Criteria:**
+- [ ] Can create an evidence request linked to a control, test procedure, or assessment
+- [ ] Request specifies: description of needed evidence, format guidance, due date
+- [ ] Control owner receives notification and sees request in their portal
+- [ ] Owner uploads evidence directly; artifacts auto-link to the request
+- [ ] Overdue requests trigger escalation notifications
+- [ ] Requester can approve or reject submitted evidence with comments
+
+### US-3.4 — Review and Approve Workpapers
+**As** Avery (as reviewer),
+**I want** to review completed workpapers and either approve or send back for rework,
+**So that** testing quality is maintained through peer/manager review.
+
+**Acceptance Criteria:**
+- [ ] Workpapers follow a configurable review chain (preparer → reviewer → approver)
+- [ ] Reviewer can add review notes at workpaper or step level
+- [ ] Reviewer can approve, reject (with comments), or request changes
+- [ ] Approval locks the workpaper from further edits (unless re-opened)
+- [ ] Review status is visible on the campaign dashboard
+
+### US-3.5 — Agent-Performed Testing
+**As** an AI Assessment Agent,
+**I want** to receive assigned test procedures via API, execute them, and submit structured results,
+**So that** routine tests can be automated while maintaining audit quality.
+
+**Acceptance Criteria:**
+- [ ] Agent authenticates via OAuth2 client credentials or API key
+- [ ] Agent retrieves assigned test procedures with all context (control description, prior results, evidence)
+- [ ] Agent submits results in structured format: step results, evidence references, confidence score
+- [ ] Agent results are flagged as "agent-produced" and routed to human review by default
+- [ ] Agent provenance metadata (model, version, input hash) is recorded
+- [ ] Agent cannot mark a workpaper as final — only humans can approve
+
+---
+
+## Epic 4: Evidence & Artifact Management
+
+### US-4.1 — Upload and Manage Artifacts [MVP]
+**As** Jordan (Control Owner),
+**I want** to upload files as evidence and have them securely stored with version tracking,
+**So that** evidence is preserved with integrity for audit purposes.
+
+**Acceptance Criteria:**
+- [ ] Can upload files up to 500 MB (configurable) via UI or API
+- [ ] Files are encrypted at rest (AES-256) and integrity-hashed (SHA-256)
+- [ ] Can upload new versions of an artifact; previous versions are retained
+- [ ] Can tag artifacts with metadata (type, period, control, system)
+- [ ] Can preview common file types in-browser (PDF, images, text, spreadsheets)
+
+### US-4.2 — Link Evidence to Entities [MVP]
+**As** Avery,
+**I want** to link an artifact to one or more controls, test steps, risks, or findings,
+**So that** evidence relationships are explicit and traceable.
+
+**Acceptance Criteria:**
+- [ ] Can link an artifact to any entity type (many-to-many)
+- [ ] Entity detail pages show linked artifacts
+- [ ] Artifact detail page shows all linked entities
+- [ ] Can add a description/context note to each link (why this evidence is relevant)
+- [ ] Unlinking preserves audit trail of the former link
+
+### US-4.3 — Automated Evidence Collection
+**As** Avery,
+**I want** to configure plugins that automatically collect evidence from source systems,
+**So that** evidence gathering requires less manual effort.
+
+**Acceptance Criteria:**
+- [ ] Can configure a collection plugin (e.g., AWS Config snapshot, Jira query, GitHub PR list)
+- [ ] Plugin runs on schedule or on-demand
+- [ ] Collected artifacts are auto-linked to the configured control/assessment
+- [ ] Collection runs are logged with status, timestamp, artifact count
+- [ ] Failed collections generate alerts
+
+### US-4.4 — Evidence Lineage and Chain of Custody
+**As** an External Auditor (consumer),
+**I want** to see the full history of an evidence artifact — who provided it, when, what it was linked to, and who reviewed it,
+**So that** I can trust the integrity of the evidence.
+
+**Acceptance Criteria:**
+- [ ] Artifact detail shows full timeline: uploaded, linked, reviewed, approved
+- [ ] Each event records: actor (user or agent), timestamp, action, context
+- [ ] Hash verification confirms artifact has not been tampered with
+- [ ] Can generate a chain-of-custody report for a set of artifacts
+
+---
+
+## Epic 5: Findings & Remediation
+
+### US-5.1 — Record Findings [MVP]
+**As** Avery,
+**I want** to create findings when control testing identifies deficiencies,
+**So that** gaps are formally documented and tracked to remediation.
+
+**Acceptance Criteria:**
+- [ ] Can create a finding linked to a control, test procedure, and assessment
+- [ ] Finding includes: title, description, root cause, risk rating, classification (deficiency / significant deficiency / material weakness)
+- [ ] Can attach evidence that supports the finding
+- [ ] Finding follows configurable lifecycle states (Draft → Open → Remediation → Validation → Closed)
+- [ ] Duplicate finding detection suggests potential matches
+
+### US-5.2 — Manage Remediation
+**As** Jordan (Control Owner),
+**I want** to create and track remediation action plans for findings assigned to me,
+**So that** I can resolve identified issues and demonstrate closure.
+
+**Acceptance Criteria:**
+- [ ] Finding shows assigned remediation owner and target date
+- [ ] Owner creates action plan with steps, dates, and evidence of remediation
+- [ ] Owner uploads remediation evidence and marks plan as complete
+- [ ] Completion triggers validation testing by the audit team
+- [ ] Overdue remediations trigger escalation notifications
+
+### US-5.3 — Validate Remediation
+**As** Avery,
+**I want** to validate that remediation actions effectively address the finding,
+**So that** findings are only closed when the issue is truly resolved.
+
+**Acceptance Criteria:**
+- [ ] Validator reviews remediation evidence and performs re-testing
+- [ ] Can approve (close finding) or reject (send back for rework)
+- [ ] Validation results and evidence are recorded on the finding
+- [ ] Closed findings contribute to control effectiveness ratings
+
+---
+
+## Epic 6: Reporting
+
+### US-6.1 — Generate Executive Reports
+**As** Pat (CISO),
+**I want** to generate board-ready reports summarizing risk posture, control health, and findings,
+**So that** I can present to the risk committee and board of directors.
+
+**Acceptance Criteria:**
+- [ ] One-click report generation for standard report types (risk summary, control health, assessment status)
+- [ ] Reports include charts, tables, and narrative sections
+- [ ] Export as PDF or PPTX
+- [ ] Can customize report templates (add logo, sections, boilerplate)
+- [ ] Report archives are retained for historical comparison
+
+### US-6.2 — Build Custom Reports
+**As** Riley,
+**I want** to build ad-hoc reports by selecting fields, filters, and groupings,
+**So that** I can answer specific questions about the risk and control data.
+
+**Acceptance Criteria:**
+- [ ] Report view with selectable entities, filters, and export (risks, controls, findings, assessments)
+- [ ] Can add filters, sort, group, and aggregate
+- [ ] Can save reports as templates for reuse
+- [ ] Export to CSV, Excel, PDF
+- [ ] Can schedule saved reports for periodic delivery via email
+
+### US-6.3 — API-Driven Analytics
+**As** a BI Engineer,
+**I want** to query Ground Control data via API or read replica,
+**So that** I can build dashboards in our organization's BI tool.
+
+**Acceptance Criteria:**
+- [ ] REST and GraphQL APIs expose all reporting data
+- [ ] Can configure a read-only database replica for direct BI tool connection
+- [ ] API supports pagination, filtering, and field selection
+- [ ] Rate limiting protects production workloads from heavy BI queries
+
+---
+
+## Epic 7: Administration & Platform
+
+### US-7.1 — Configure SSO
+**As** Sam (Platform Admin),
+**I want** to configure SSO via SAML 2.0 or OIDC so users authenticate through our corporate IdP,
+**So that** access is centrally managed and secure.
+
+**Acceptance Criteria:**
+- [ ] Admin UI to configure SAML 2.0 SP settings (entity ID, ACS URL, certificate)
+- [ ] Admin UI to configure OIDC settings (issuer, client ID/secret, scopes)
+- [ ] Can test SSO configuration before enforcing
+- [ ] Can enforce SSO (disable local password login)
+- [ ] JIT (just-in-time) provisioning creates user accounts on first SSO login
+
+### US-7.2 — Manage Users and Roles [MVP]
+**As** Sam,
+**I want** to manage users, groups, and role assignments with fine-grained permissions,
+**So that** users only access what they need.
+
+**Acceptance Criteria:**
+- [ ] Pre-built roles: Admin, Risk Manager, Auditor, Control Owner, Viewer, Agent
+- [ ] Can create custom roles with granular permissions (per entity type: create, read, update, delete, approve)
+- [ ] Can assign roles at global, business unit, or assessment scope
+- [ ] SCIM 2.0 provisioning syncs users and groups from IdP
+- [ ] User deactivation revokes access immediately; data is preserved
+
+### US-7.3 — Install and Configure Plugins
+**As** Sam,
+**I want** to install plugins that extend Ground Control with new integrations and framework support,
+**So that** the platform adapts to our organization's specific needs.
+
+**Acceptance Criteria:**
+- [ ] Plugin catalog (local or marketplace) lists available plugins
+- [ ] Can install, enable, disable, and uninstall plugins
+- [ ] Plugins declare required permissions (API scopes, data access)
+- [ ] Plugin configuration UI is rendered from the plugin's schema
+- [ ] Plugin updates are versioned; can roll back to previous version
+
+### US-7.4 — View Audit Logs
+**As** Sam,
+**I want** to view an immutable audit log of all actions taken in the platform,
+**So that** we have a complete record for security and compliance purposes.
+
+**Acceptance Criteria:**
+- [ ] Every create, update, delete, login, and permission change is logged
+- [ ] Log entries include: timestamp, actor, action, entity, old/new values, IP address
+- [ ] Can search and filter logs by date range, actor, action type, entity
+- [ ] Logs cannot be modified or deleted (append-only)
+- [ ] Can export logs to SIEM (Splunk, Elastic) via syslog or webhook
+
+### US-7.5 — Manage Taxonomy & Configuration
+**As** Sam,
+**I want** to customize the platform's taxonomy (risk categories, rating scales, lifecycle states),
+**So that** Ground Control uses our organization's language and processes.
+
+**Acceptance Criteria:**
+- [ ] Can add, edit, reorder, and retire taxonomy values (risk categories, control types, etc.)
+- [ ] Can configure scoring scales (3-point, 5-point, custom) with labels and colors
+- [ ] Can configure workflow states and transitions for each entity type
+- [ ] Changes to taxonomy are versioned; historical data retains original labels
+- [ ] Can import/export taxonomy configuration as YAML/JSON
+
+---
+
+## Epic 8: Agent-Specific Stories
+
+### US-8.1 — Register an Agent
+**As** Sam,
+**I want** to register an AI agent with specific permissions and an API credential,
+**So that** agents can interact with the platform under controlled, auditable access.
+
+**Acceptance Criteria:**
+- [ ] Can register an agent with: name, description, owner (human), allowed scopes
+- [ ] Agent receives OAuth2 client credentials (client_id + client_secret)
+- [ ] Agent is assigned a role that limits its permissions
+- [ ] Agent registration is logged in the audit trail
+- [ ] Can revoke agent credentials immediately
+
+### US-8.2 — Agent Retrieves Assignments
+**As** an AI Agent,
+**I want** to query the API for test procedures assigned to me,
+**So that** I know what work to perform.
+
+**Acceptance Criteria:**
+- [ ] `GET /api/v1/agents/{agent_id}/assignments` returns pending test procedures
+- [ ] Response includes: control context, test steps, prior period results, linked evidence
+- [ ] Can filter by assessment campaign, priority, due date
+- [ ] Pagination and rate limiting protect the API
+
+### US-8.3 — Agent Submits Results
+**As** an AI Agent,
+**I want** to submit structured test results for a test procedure,
+**So that** my analysis is recorded and routed for human review.
+
+**Acceptance Criteria:**
+- [ ] `POST /api/v1/test-procedures/{id}/results` accepts structured result payload
+- [ ] Payload includes: per-step results, overall conclusion, confidence score, evidence references, provenance metadata
+- [ ] Results are flagged as `agent_produced = true`
+- [ ] Submission triggers notification to the assigned human reviewer
+- [ ] Invalid or incomplete payloads return descriptive 422 errors
+
+### US-8.4 — Agent Suggests Control Mappings
+**As** an AI Agent,
+**I want** to analyze a control description and suggest framework mappings,
+**So that** compliance analysts can quickly map new controls to relevant requirements.
+
+**Acceptance Criteria:**
+- [ ] `POST /api/v1/controls/{id}/suggest-mappings` triggers agent analysis
+- [ ] Response includes: suggested framework requirements with confidence scores
+- [ ] Suggestions are presented to the human analyst for approval/rejection
+- [ ] Approved suggestions create the actual mappings
+- [ ] Suggestion history is retained for agent performance tracking
diff --git a/docs/user-stories/USE_CASES.md b/docs/user-stories/USE_CASES.md
new file mode 100644
index 00000000..b47f5178
--- /dev/null
+++ b/docs/user-stories/USE_CASES.md
@@ -0,0 +1,559 @@
+# Ground Control — Use Cases (UML)
+
+**Version:** 1.0.0
+**Date:** 2026-03-07
+
+This document describes system use cases with PlantUML-compatible diagrams.
+
+---
+
+## 1. System Use Case Overview
+
+```plantuml
+@startuml
+left to right direction
+skinparam actorStyle awesome
+
+actor "IT Risk Manager" as RM
+actor "Internal Auditor" as IA
+actor "Control Owner" as CO
+actor "Compliance Analyst" as CA
+actor "CISO / Executive" as EX
+actor "AI Agent" as AG
+actor "Platform Admin" as PA
+
+rectangle "Ground Control" {
+ package "Risk Management" {
+ usecase "Manage Risk Register" as UC_RR
+ usecase "Run Risk Assessment" as UC_RA
+ usecase "Define Risk Appetite" as UC_AP
+ usecase "Track Treatment Plans" as UC_TP
+ usecase "View Risk Dashboard" as UC_RD
+ }
+
+ package "Control Management" {
+ usecase "Manage Control Catalog" as UC_CC
+ usecase "Map Controls to Frameworks" as UC_CF
+ usecase "Browse Common Control Library" as UC_CL
+ }
+
+ package "Assessment & Testing" {
+ usecase "Plan Assessment Campaign" as UC_AC
+ usecase "Execute Test Procedures" as UC_ET
+ usecase "Collect Evidence" as UC_CE
+ usecase "Review Workpapers" as UC_RW
+ usecase "Agent-Performed Testing" as UC_AT
+ }
+
+ package "Evidence Management" {
+ usecase "Upload Artifacts" as UC_UA
+ usecase "Link Evidence" as UC_LE
+ usecase "Auto-Collect Evidence" as UC_AE
+ usecase "View Evidence Lineage" as UC_VL
+ }
+
+ package "Findings & Remediation" {
+ usecase "Record Findings" as UC_RF
+ usecase "Manage Remediation" as UC_MR
+ usecase "Validate Remediation" as UC_VR
+ }
+
+ package "Reporting" {
+ usecase "Generate Executive Reports" as UC_ER
+ usecase "Build Custom Reports" as UC_CR
+ usecase "API Analytics" as UC_AA
+ }
+
+ package "Administration" {
+ usecase "Configure SSO" as UC_SS
+ usecase "Manage Users & Roles" as UC_UR
+ usecase "Install Plugins" as UC_IP
+ usecase "View Audit Logs" as UC_AL
+ usecase "Configure Taxonomy" as UC_CT
+ usecase "Register Agents" as UC_AG
+ }
+}
+
+' Risk Management
+RM --> UC_RR
+RM --> UC_RA
+RM --> UC_TP
+RM --> UC_RD
+EX --> UC_AP
+EX --> UC_RD
+
+' Control Management
+IA --> UC_CC
+CA --> UC_CF
+CA --> UC_CL
+
+' Assessment & Testing
+IA --> UC_AC
+IA --> UC_ET
+IA --> UC_CE
+IA --> UC_RW
+AG --> UC_AT
+
+' Evidence Management
+CO --> UC_UA
+IA --> UC_LE
+AG --> UC_AE
+IA --> UC_VL
+
+' Findings & Remediation
+IA --> UC_RF
+CO --> UC_MR
+IA --> UC_VR
+
+' Reporting
+EX --> UC_ER
+RM --> UC_CR
+AG --> UC_AA
+
+' Administration
+PA --> UC_SS
+PA --> UC_UR
+PA --> UC_IP
+PA --> UC_AL
+PA --> UC_CT
+PA --> UC_AG
+
+@enduml
+```
+
+---
+
+## 2. Detailed Use Cases
+
+### UC-01: Manage Risk Register
+
+| Field | Value |
+|---|---|
+| **Name** | Manage Risk Register |
+| **Primary Actor** | IT Risk Manager |
+| **Stakeholders** | CISO, Control Owners, Auditors |
+| **Precondition** | User has Risk Manager role; taxonomy is configured |
+| **Postcondition** | Risk register is updated; changes are audit-logged |
+| **Trigger** | User navigates to Risk Register or API call |
+
+**Main Success Scenario:**
+1. Actor opens the risk register view.
+2. System displays list of risks with filters (category, rating, owner, status).
+3. Actor creates a new risk entry, filling in required fields.
+4. System validates input against configured taxonomy and saves.
+5. System logs the creation event in the audit trail.
+6. Actor optionally links the risk to business units, systems, and controls.
+
+**Extensions:**
+- 3a. Actor imports risks from CSV/JSON → system validates each row, reports errors, imports valid rows.
+- 4a. Validation fails → system displays field-level errors, actor corrects.
+- 6a. Actor archives a risk → system prompts for reason, soft-deletes, logs.
+
+---
+
+### UC-02: Run Risk Assessment Campaign
+
+| Field | Value |
+|---|---|
+| **Name** | Run Risk Assessment Campaign |
+| **Primary Actor** | IT Risk Manager |
+| **Stakeholders** | Risk Assessors, CISO |
+| **Precondition** | Risk register has risks; assessors are provisioned |
+| **Postcondition** | All in-scope risks are reassessed; campaign is finalized |
+| **Trigger** | Periodic schedule or manual initiation |
+
+**Main Success Scenario:**
+1. Actor creates a new campaign (name, scope, dates, scoring methodology).
+2. System populates the campaign with in-scope risks based on filters.
+3. Actor assigns risks to individual assessors.
+4. System notifies assessors of their assignments.
+5. Assessors open each assigned risk and update scores with justification.
+6. Assessors link supporting evidence to their assessments.
+7. Actor reviews campaign progress on the dashboard.
+8. Actor finalizes the campaign; system locks all assessments.
+9. System generates comparison report (prior period vs. current).
+
+**Extensions:**
+- 5a. Assessor disagrees with risk categorization → adds comment, flags for Risk Manager review.
+- 8a. Incomplete assessments exist → system blocks finalization, lists incomplete items.
+
+---
+
+### UC-03: Execute Test Procedures
+
+| Field | Value |
+|---|---|
+| **Name** | Execute Test Procedures |
+| **Primary Actor** | Internal Auditor (or AI Agent) |
+| **Stakeholders** | Audit Manager (reviewer), Control Owners |
+| **Precondition** | Assessment campaign is active; test procedures exist |
+| **Postcondition** | Test results recorded; workpaper ready for review |
+| **Trigger** | Auditor opens assigned workpaper |
+
+**Main Success Scenario:**
+1. Actor opens the workpaper for an assigned control.
+2. System displays the test procedure with ordered steps.
+3. For each step, actor performs the test and records:
+ a. Actual result observed
+ b. Pass / Fail / N-A conclusion
+ c. Links to supporting evidence
+ d. Notes or screenshots
+4. Actor marks the test procedure as complete.
+5. System updates control status and rolls up to campaign progress.
+6. System routes the workpaper to the assigned reviewer.
+
+**Extensions:**
+- 1a. Actor is an AI Agent → authenticates via API, retrieves test procedure via `GET /api/v1/test-procedures/{id}`.
+- 3a. Agent submits results via `POST /api/v1/test-procedures/{id}/results` with structured payload.
+- 3b. Agent results include provenance metadata and confidence score.
+- 6a. Agent-produced results are auto-flagged for mandatory human review.
+
+```plantuml
+@startuml
+title UC-03: Execute Test Procedures — Activity Diagram
+
+start
+
+:Open assigned workpaper;
+
+if (Actor type?) then (Human)
+ :View test procedure steps in UI;
+else (AI Agent)
+ :Retrieve test procedure via API;
+endif
+
+repeat
+ :Read test step instructions;
+ :Perform test / analyze evidence;
+ :Record actual result;
+ :Select conclusion (Pass/Fail/N-A);
+ :Link supporting evidence;
+repeat while (More steps?) is (Yes)
+->No;
+
+:Mark test procedure complete;
+
+if (Agent-produced?) then (Yes)
+ :Flag for mandatory human review;
+else (No)
+ :Route to assigned reviewer;
+endif
+
+:Update campaign progress;
+
+stop
+@enduml
+```
+
+---
+
+### UC-04: Collect Evidence
+
+| Field | Value |
+|---|---|
+| **Name** | Collect Evidence |
+| **Primary Actor** | Internal Auditor (requester) / Control Owner (provider) |
+| **Stakeholders** | Audit Manager |
+| **Precondition** | Assessment is active; control owner is provisioned |
+| **Postcondition** | Evidence artifacts are uploaded and linked |
+| **Trigger** | Auditor creates evidence request |
+
+**Main Success Scenario:**
+1. Auditor creates an evidence request specifying: what's needed, format, due date.
+2. System sends notification to the Control Owner.
+3. Control Owner opens their evidence request portal.
+4. Control Owner uploads requested artifacts.
+5. System hashes artifacts, encrypts at rest, links to the request.
+6. Auditor receives notification of submission.
+7. Auditor reviews and accepts the evidence (or rejects with comments).
+
+**Extensions:**
+- 2a. Automated collection plugin → system runs plugin on schedule, uploads artifacts automatically.
+- 4a. Owner uploads wrong format → system warns but accepts (auditor can reject later).
+- 5a. Overdue deadline → system sends escalation to Control Owner and their manager.
+
+```plantuml
+@startuml
+title UC-04: Evidence Collection — Sequence Diagram
+
+actor Auditor as A
+participant "Ground Control" as GC
+actor "Control Owner" as CO
+participant "Plugin Engine" as PE
+database "Artifact Store" as AS
+
+== Manual Evidence Request ==
+
+A -> GC : Create evidence request
+GC -> CO : Send notification
+CO -> GC : Upload artifact(s)
+GC -> AS : Store (encrypt, hash)
+GC -> A : Notify: evidence submitted
+A -> GC : Review & accept/reject
+
+== Automated Evidence Collection ==
+
+GC -> PE : Trigger scheduled collection
+PE -> PE : Query source system (AWS, Jira, etc.)
+PE -> AS : Store collected artifacts
+PE -> GC : Log collection results
+GC -> A : Notify: auto-evidence available
+
+@enduml
+```
+
+---
+
+### UC-05: Cross-Framework Control Mapping
+
+| Field | Value |
+|---|---|
+| **Name** | Cross-Framework Control Mapping |
+| **Primary Actor** | Compliance Analyst |
+| **Supporting Actor** | AI Agent (suggestion) |
+| **Precondition** | Control exists; framework libraries are loaded |
+| **Postcondition** | Control is mapped to relevant framework requirements |
+| **Trigger** | New control created or framework added |
+
+**Main Success Scenario:**
+1. Analyst opens a control's framework mapping view.
+2. System displays current mappings and the CCL reference mapping.
+3. Analyst searches for framework requirements to map.
+4. Analyst adds mappings with a relevance note.
+5. System validates no circular or duplicate mappings.
+6. System updates the coverage matrix.
+
+**Extensions:**
+- 2a. Analyst requests AI suggestions → system calls agent endpoint.
+- 2b. Agent returns ranked suggestions with confidence scores.
+- 4a. Analyst approves/rejects each suggestion → approved ones become mappings.
+
+```plantuml
+@startuml
+title UC-05: Cross-Framework Mapping — Sequence Diagram
+
+actor "Compliance Analyst" as CA
+participant "Ground Control" as GC
+participant "AI Agent" as AG
+database "Framework Library" as FL
+
+CA -> GC : Open control mapping view
+GC -> FL : Load current mappings + CCL reference
+GC -> CA : Display mapping matrix
+
+CA -> GC : Request AI mapping suggestions
+GC -> AG : POST /suggest-mappings (control description)
+AG -> FL : Query framework requirements
+AG -> GC : Return ranked suggestions + confidence
+GC -> CA : Display suggestions
+
+loop For each suggestion
+ CA -> GC : Approve or Reject
+end
+
+GC -> FL : Save approved mappings
+GC -> CA : Update coverage matrix
+
+@enduml
+```
+
+---
+
+### UC-06: Agent-Performed Testing
+
+| Field | Value |
+|---|---|
+| **Name** | Agent-Performed Testing |
+| **Primary Actor** | AI Assessment Agent |
+| **Stakeholders** | Internal Auditor (reviewer), Audit Manager |
+| **Precondition** | Agent is registered; test procedures are assigned |
+| **Postcondition** | Structured results submitted; routed for human review |
+| **Trigger** | Agent polls for assignments or receives webhook |
+
+**Main Success Scenario:**
+1. Agent authenticates via OAuth2 client credentials.
+2. Agent queries `GET /api/v1/agents/{id}/assignments` for pending work.
+3. For each assignment, agent retrieves full test procedure context.
+4. Agent performs analysis (evidence review, configuration check, etc.).
+5. Agent submits structured results via API with provenance metadata.
+6. System validates result schema, records provenance, flags for review.
+7. Human reviewer receives notification.
+8. Reviewer approves, rejects, or annotates agent results.
+
+```plantuml
+@startuml
+title UC-06: Agent-Performed Testing — Sequence Diagram
+
+participant "AI Agent" as AG
+participant "API Gateway" as API
+participant "Ground Control Core" as GC
+participant "Artifact Store" as AS
+actor "Human Reviewer" as HR
+
+AG -> API : POST /oauth/token (client_credentials)
+API -> AG : Access token
+
+AG -> API : GET /agents/{id}/assignments
+API -> GC : Query pending test procedures
+GC -> API : Return assignments list
+API -> AG : Assignments with context
+
+loop For each assignment
+ AG -> API : GET /test-procedures/{id}
+ API -> AG : Full context (control, evidence, prior results)
+
+ AG -> AG : Perform analysis
+
+ AG -> API : POST /test-procedures/{id}/results
+ note right
+ {
+ "steps": [...],
+ "conclusion": "effective",
+ "confidence": 0.87,
+ "provenance": {
+ "model": "claude-opus-4-6",
+ "input_hash": "sha256:...",
+ "agent_version": "1.2.0"
+ }
+ }
+ end note
+
+ API -> GC : Validate & store results
+ GC -> GC : Flag as agent_produced
+ GC -> HR : Notify: agent results pending review
+end
+
+HR -> GC : Review agent results
+HR -> GC : Approve / Reject / Annotate
+
+@enduml
+```
+
+---
+
+### UC-07: Configure SSO and Provision Users
+
+| Field | Value |
+|---|---|
+| **Name** | Configure SSO and Provision Users |
+| **Primary Actor** | Platform Administrator |
+| **Precondition** | Admin has administrator role; IdP is available |
+| **Postcondition** | SSO is configured; users authenticate via IdP |
+| **Trigger** | Initial setup or IdP change |
+
+**Main Success Scenario:**
+1. Admin navigates to SSO configuration.
+2. Admin selects protocol (SAML 2.0 or OIDC).
+3. Admin enters IdP metadata (entity ID, endpoints, certificate / issuer, client ID).
+4. System generates SP metadata / redirect URI for the IdP.
+5. Admin configures the IdP with SP details.
+6. Admin tests SSO login flow.
+7. System confirms successful authentication.
+8. Admin enables SSO enforcement.
+9. Admin configures SCIM endpoint for automated provisioning.
+10. IdP syncs users and groups to Ground Control.
+
+**Extensions:**
+- 6a. Test fails → system shows error details (certificate mismatch, clock skew, attribute mapping).
+- 9a. No SCIM available → admin enables JIT provisioning (auto-create on first login).
+
+---
+
+### UC-08: Install and Configure Plugin
+
+| Field | Value |
+|---|---|
+| **Name** | Install and Configure Plugin |
+| **Primary Actor** | Platform Administrator |
+| **Precondition** | Admin has administrator role |
+| **Postcondition** | Plugin is installed and operational |
+| **Trigger** | Need for new integration or framework |
+
+**Main Success Scenario:**
+1. Admin opens plugin catalog.
+2. Admin browses or searches for desired plugin.
+3. Admin reviews plugin details (description, permissions, version, author).
+4. Admin installs the plugin.
+5. System validates plugin signature and compatibility.
+6. System renders plugin configuration UI from its schema.
+7. Admin provides configuration values (API keys, endpoints, scopes).
+8. Admin enables the plugin.
+9. System runs plugin health check and confirms operational status.
+
+**Extensions:**
+- 5a. Signature validation fails → system blocks installation, alerts admin.
+- 9a. Health check fails → system displays error, plugin remains disabled.
+
+---
+
+### UC-09: Generate Compliance Report
+
+| Field | Value |
+|---|---|
+| **Name** | Generate Compliance Report |
+| **Primary Actor** | CISO / IT Risk Manager |
+| **Precondition** | Assessment data exists |
+| **Postcondition** | Report is generated and available for download/delivery |
+| **Trigger** | Manual request or scheduled trigger |
+
+**Main Success Scenario:**
+1. Actor selects report type (or opens report builder).
+2. Actor configures parameters (scope, date range, framework, format).
+3. System queries relevant data and generates the report.
+4. Actor previews the report in-browser.
+5. Actor downloads (PDF/PPTX/Excel) or schedules for email delivery.
+
+```plantuml
+@startuml
+title UC-09: Report Generation — Activity Diagram
+
+start
+
+:Select report type or open builder;
+
+if (Standard report?) then (Yes)
+ :Apply default template;
+else (No)
+ :Configure fields, filters, groupings;
+ :Save as custom template (optional);
+endif
+
+:Set parameters (scope, dates, framework);
+:Generate report;
+:Preview in browser;
+
+if (Satisfied?) then (Yes)
+ fork
+ :Download (PDF/PPTX/Excel);
+ fork again
+ :Schedule email delivery;
+ end fork
+else (No)
+ :Modify parameters;
+ :Re-generate;
+endif
+
+stop
+@enduml
+```
+
+---
+
+## 3. Use Case — Actor Matrix
+
+| Use Case | Risk Mgr | Auditor | Control Owner | Compliance | CISO | AI Agent | Admin |
+|---|---|---|---|---|---|---|---|
+| Manage Risk Register | **P** | R | | | R | | |
+| Run Risk Assessment | **P** | | | | R | S | |
+| Execute Test Procedures | | **P** | | | | **P** | |
+| Collect Evidence | | **P** | **P** | | | S | |
+| Cross-Framework Mapping | | | | **P** | | S | |
+| Record Findings | | **P** | | | | S | |
+| Manage Remediation | | R | **P** | | | | |
+| Generate Reports | **P** | R | | R | **P** | S | |
+| Configure SSO | | | | | | | **P** |
+| Manage Users & Roles | | | | | | | **P** |
+| Install Plugins | | | | | | | **P** |
+| Register Agents | | | | | | | **P** |
+
+**P** = Primary Actor, **R** = Reader/Consumer, **S** = Supporting Actor
diff --git a/poetry.lock b/poetry.lock
deleted file mode 100644
index fed14ea1..00000000
--- a/poetry.lock
+++ /dev/null
@@ -1,998 +0,0 @@
-# This file is automatically @generated by Poetry 2.0.0 and should not be changed by hand.
-
-[[package]]
-name = "bandit"
-version = "1.8.2"
-description = "Security oriented static analyser for python code."
-optional = false
-python-versions = ">=3.9"
-groups = ["dev"]
-files = [
- {file = "bandit-1.8.2-py3-none-any.whl", hash = "sha256:df6146ad73dd30e8cbda4e29689ddda48364e36ff655dbfc86998401fcf1721f"},
- {file = "bandit-1.8.2.tar.gz", hash = "sha256:e00ad5a6bc676c0954669fe13818024d66b70e42cf5adb971480cf3b671e835f"},
-]
-
-[package.dependencies]
-colorama = {version = ">=0.3.9", markers = "platform_system == \"Windows\""}
-PyYAML = ">=5.3.1"
-rich = "*"
-stevedore = ">=1.20.0"
-
-[package.extras]
-baseline = ["GitPython (>=3.1.30)"]
-sarif = ["jschema-to-python (>=1.2.3)", "sarif-om (>=1.0.4)"]
-test = ["beautifulsoup4 (>=4.8.0)", "coverage (>=4.5.4)", "fixtures (>=3.0.0)", "flake8 (>=4.0.0)", "pylint (==1.9.4)", "stestr (>=2.5.0)", "testscenarios (>=0.5.0)", "testtools (>=2.3.0)"]
-toml = ["tomli (>=1.1.0)"]
-yaml = ["PyYAML"]
-
-[[package]]
-name = "black"
-version = "24.10.0"
-description = "The uncompromising code formatter."
-optional = false
-python-versions = ">=3.9"
-groups = ["dev"]
-files = [
- {file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"},
- {file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"},
- {file = "black-24.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:649fff99a20bd06c6f727d2a27f401331dc0cc861fb69cde910fe95b01b5928f"},
- {file = "black-24.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:fe4d6476887de70546212c99ac9bd803d90b42fc4767f058a0baa895013fbb3e"},
- {file = "black-24.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5a2221696a8224e335c28816a9d331a6c2ae15a2ee34ec857dcf3e45dbfa99ad"},
- {file = "black-24.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f9da3333530dbcecc1be13e69c250ed8dfa67f43c4005fb537bb426e19200d50"},
- {file = "black-24.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4007b1393d902b48b36958a216c20c4482f601569d19ed1df294a496eb366392"},
- {file = "black-24.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:394d4ddc64782e51153eadcaaca95144ac4c35e27ef9b0a42e121ae7e57a9175"},
- {file = "black-24.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b5e39e0fae001df40f95bd8cc36b9165c5e2ea88900167bddf258bacef9bbdc3"},
- {file = "black-24.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d37d422772111794b26757c5b55a3eade028aa3fde43121ab7b673d050949d65"},
- {file = "black-24.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14b3502784f09ce2443830e3133dacf2c0110d45191ed470ecb04d0f5f6fcb0f"},
- {file = "black-24.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:30d2c30dc5139211dda799758559d1b049f7f14c580c409d6ad925b74a4208a8"},
- {file = "black-24.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cbacacb19e922a1d75ef2b6ccaefcd6e93a2c05ede32f06a21386a04cedb981"},
- {file = "black-24.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1f93102e0c5bb3907451063e08b9876dbeac810e7da5a8bfb7aeb5a9ef89066b"},
- {file = "black-24.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ddacb691cdcdf77b96f549cf9591701d8db36b2f19519373d60d31746068dbf2"},
- {file = "black-24.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:680359d932801c76d2e9c9068d05c6b107f2584b2a5b88831c83962eb9984c1b"},
- {file = "black-24.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:17374989640fbca88b6a448129cd1745c5eb8d9547b464f281b251dd00155ccd"},
- {file = "black-24.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:63f626344343083322233f175aaf372d326de8436f5928c042639a4afbbf1d3f"},
- {file = "black-24.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfa1d0cb6200857f1923b602f978386a3a2758a65b52e0950299ea014be6800"},
- {file = "black-24.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cd9c95431d94adc56600710f8813ee27eea544dd118d45896bb734e9d7a0dc7"},
- {file = "black-24.10.0-py3-none-any.whl", hash = "sha256:3bb2b7a1f7b685f85b11fed1ef10f8a9148bceb49853e47a294a3dd963c1dd7d"},
- {file = "black-24.10.0.tar.gz", hash = "sha256:846ea64c97afe3bc677b761787993be4991810ecc7a4a937816dd6bddedc4875"},
-]
-
-[package.dependencies]
-click = ">=8.0.0"
-mypy-extensions = ">=0.4.3"
-packaging = ">=22.0"
-pathspec = ">=0.9.0"
-platformdirs = ">=2"
-
-[package.extras]
-colorama = ["colorama (>=0.4.3)"]
-d = ["aiohttp (>=3.10)"]
-jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
-uvloop = ["uvloop (>=0.15.2)"]
-
-[[package]]
-name = "certifi"
-version = "2024.12.14"
-description = "Python package for providing Mozilla's CA Bundle."
-optional = false
-python-versions = ">=3.6"
-groups = ["main"]
-files = [
- {file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"},
- {file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"},
-]
-
-[[package]]
-name = "cfgv"
-version = "3.4.0"
-description = "Validate configuration and produce human readable error messages."
-optional = false
-python-versions = ">=3.8"
-groups = ["dev"]
-files = [
- {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"},
- {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"},
-]
-
-[[package]]
-name = "charset-normalizer"
-version = "3.4.1"
-description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
-optional = false
-python-versions = ">=3.7"
-groups = ["main"]
-files = [
- {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"},
- {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"},
- {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"},
-]
-
-[[package]]
-name = "click"
-version = "8.1.8"
-description = "Composable command line interface toolkit"
-optional = false
-python-versions = ">=3.7"
-groups = ["dev"]
-files = [
- {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
- {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
-]
-
-[package.dependencies]
-colorama = {version = "*", markers = "platform_system == \"Windows\""}
-
-[[package]]
-name = "colorama"
-version = "0.4.6"
-description = "Cross-platform colored terminal text."
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
-groups = ["dev"]
-markers = "sys_platform == \"win32\" or platform_system == \"Windows\""
-files = [
- {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
- {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
-]
-
-[[package]]
-name = "coverage"
-version = "7.6.10"
-description = "Code coverage measurement for Python"
-optional = false
-python-versions = ">=3.9"
-groups = ["dev"]
-files = [
- {file = "coverage-7.6.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5c912978f7fbf47ef99cec50c4401340436d200d41d714c7a4766f377c5b7b78"},
- {file = "coverage-7.6.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a01ec4af7dfeb96ff0078ad9a48810bb0cc8abcb0115180c6013a6b26237626c"},
- {file = "coverage-7.6.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3b204c11e2b2d883946fe1d97f89403aa1811df28ce0447439178cc7463448a"},
- {file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32ee6d8491fcfc82652a37109f69dee9a830e9379166cb73c16d8dc5c2915165"},
- {file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675cefc4c06e3b4c876b85bfb7c59c5e2218167bbd4da5075cbe3b5790a28988"},
- {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f4f620668dbc6f5e909a0946a877310fb3d57aea8198bde792aae369ee1c23b5"},
- {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4eea95ef275de7abaef630c9b2c002ffbc01918b726a39f5a4353916ec72d2f3"},
- {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e2f0280519e42b0a17550072861e0bc8a80a0870de260f9796157d3fca2733c5"},
- {file = "coverage-7.6.10-cp310-cp310-win32.whl", hash = "sha256:bc67deb76bc3717f22e765ab3e07ee9c7a5e26b9019ca19a3b063d9f4b874244"},
- {file = "coverage-7.6.10-cp310-cp310-win_amd64.whl", hash = "sha256:0f460286cb94036455e703c66988851d970fdfd8acc2a1122ab7f4f904e4029e"},
- {file = "coverage-7.6.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ea3c8f04b3e4af80e17bab607c386a830ffc2fb88a5484e1df756478cf70d1d3"},
- {file = "coverage-7.6.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:507a20fc863cae1d5720797761b42d2d87a04b3e5aeb682ef3b7332e90598f43"},
- {file = "coverage-7.6.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d37a84878285b903c0fe21ac8794c6dab58150e9359f1aaebbeddd6412d53132"},
- {file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a534738b47b0de1995f85f582d983d94031dffb48ab86c95bdf88dc62212142f"},
- {file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d7a2bf79378d8fb8afaa994f91bfd8215134f8631d27eba3e0e2c13546ce994"},
- {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6713ba4b4ebc330f3def51df1d5d38fad60b66720948112f114968feb52d3f99"},
- {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ab32947f481f7e8c763fa2c92fd9f44eeb143e7610c4ca9ecd6a36adab4081bd"},
- {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7bbd8c8f1b115b892e34ba66a097b915d3871db7ce0e6b9901f462ff3a975377"},
- {file = "coverage-7.6.10-cp311-cp311-win32.whl", hash = "sha256:299e91b274c5c9cdb64cbdf1b3e4a8fe538a7a86acdd08fae52301b28ba297f8"},
- {file = "coverage-7.6.10-cp311-cp311-win_amd64.whl", hash = "sha256:489a01f94aa581dbd961f306e37d75d4ba16104bbfa2b0edb21d29b73be83609"},
- {file = "coverage-7.6.10-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:27c6e64726b307782fa5cbe531e7647aee385a29b2107cd87ba7c0105a5d3853"},
- {file = "coverage-7.6.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c56e097019e72c373bae32d946ecf9858fda841e48d82df7e81c63ac25554078"},
- {file = "coverage-7.6.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7827a5bc7bdb197b9e066cdf650b2887597ad124dd99777332776f7b7c7d0d0"},
- {file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204a8238afe787323a8b47d8be4df89772d5c1e4651b9ffa808552bdf20e1d50"},
- {file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67926f51821b8e9deb6426ff3164870976fe414d033ad90ea75e7ed0c2e5022"},
- {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e78b270eadb5702938c3dbe9367f878249b5ef9a2fcc5360ac7bff694310d17b"},
- {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:714f942b9c15c3a7a5fe6876ce30af831c2ad4ce902410b7466b662358c852c0"},
- {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:abb02e2f5a3187b2ac4cd46b8ced85a0858230b577ccb2c62c81482ca7d18852"},
- {file = "coverage-7.6.10-cp312-cp312-win32.whl", hash = "sha256:55b201b97286cf61f5e76063f9e2a1d8d2972fc2fcfd2c1272530172fd28c359"},
- {file = "coverage-7.6.10-cp312-cp312-win_amd64.whl", hash = "sha256:e4ae5ac5e0d1e4edfc9b4b57b4cbecd5bc266a6915c500f358817a8496739247"},
- {file = "coverage-7.6.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:05fca8ba6a87aabdd2d30d0b6c838b50510b56cdcfc604d40760dae7153b73d9"},
- {file = "coverage-7.6.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9e80eba8801c386f72e0712a0453431259c45c3249f0009aff537a517b52942b"},
- {file = "coverage-7.6.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a372c89c939d57abe09e08c0578c1d212e7a678135d53aa16eec4430adc5e690"},
- {file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec22b5e7fe7a0fa8509181c4aac1db48f3dd4d3a566131b313d1efc102892c18"},
- {file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26bcf5c4df41cad1b19c84af71c22cbc9ea9a547fc973f1f2cc9a290002c8b3c"},
- {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4e4630c26b6084c9b3cb53b15bd488f30ceb50b73c35c5ad7871b869cb7365fd"},
- {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2396e8116db77789f819d2bc8a7e200232b7a282c66e0ae2d2cd84581a89757e"},
- {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79109c70cc0882e4d2d002fe69a24aa504dec0cc17169b3c7f41a1d341a73694"},
- {file = "coverage-7.6.10-cp313-cp313-win32.whl", hash = "sha256:9e1747bab246d6ff2c4f28b4d186b205adced9f7bd9dc362051cc37c4a0c7bd6"},
- {file = "coverage-7.6.10-cp313-cp313-win_amd64.whl", hash = "sha256:254f1a3b1eef5f7ed23ef265eaa89c65c8c5b6b257327c149db1ca9d4a35f25e"},
- {file = "coverage-7.6.10-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2ccf240eb719789cedbb9fd1338055de2761088202a9a0b73032857e53f612fe"},
- {file = "coverage-7.6.10-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0c807ca74d5a5e64427c8805de15b9ca140bba13572d6d74e262f46f50b13273"},
- {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bcfa46d7709b5a7ffe089075799b902020b62e7ee56ebaed2f4bdac04c508d8"},
- {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e0de1e902669dccbf80b0415fb6b43d27edca2fbd48c74da378923b05316098"},
- {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f7b444c42bbc533aaae6b5a2166fd1a797cdb5eb58ee51a92bee1eb94a1e1cb"},
- {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b330368cb99ef72fcd2dc3ed260adf67b31499584dc8a20225e85bfe6f6cfed0"},
- {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:9a7cfb50515f87f7ed30bc882f68812fd98bc2852957df69f3003d22a2aa0abf"},
- {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f93531882a5f68c28090f901b1d135de61b56331bba82028489bc51bdd818d2"},
- {file = "coverage-7.6.10-cp313-cp313t-win32.whl", hash = "sha256:89d76815a26197c858f53c7f6a656686ec392b25991f9e409bcef020cd532312"},
- {file = "coverage-7.6.10-cp313-cp313t-win_amd64.whl", hash = "sha256:54a5f0f43950a36312155dae55c505a76cd7f2b12d26abeebbe7a0b36dbc868d"},
- {file = "coverage-7.6.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:656c82b8a0ead8bba147de9a89bda95064874c91a3ed43a00e687f23cc19d53a"},
- {file = "coverage-7.6.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ccc2b70a7ed475c68ceb548bf69cec1e27305c1c2606a5eb7c3afff56a1b3b27"},
- {file = "coverage-7.6.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5e37dc41d57ceba70956fa2fc5b63c26dba863c946ace9705f8eca99daecdc4"},
- {file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0aa9692b4fdd83a4647eeb7db46410ea1322b5ed94cd1715ef09d1d5922ba87f"},
- {file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa744da1820678b475e4ba3dfd994c321c5b13381d1041fe9c608620e6676e25"},
- {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0b1818063dc9e9d838c09e3a473c1422f517889436dd980f5d721899e66f315"},
- {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:59af35558ba08b758aec4d56182b222976330ef8d2feacbb93964f576a7e7a90"},
- {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7ed2f37cfce1ce101e6dffdfd1c99e729dd2ffc291d02d3e2d0af8b53d13840d"},
- {file = "coverage-7.6.10-cp39-cp39-win32.whl", hash = "sha256:4bcc276261505d82f0ad426870c3b12cb177752834a633e737ec5ee79bbdff18"},
- {file = "coverage-7.6.10-cp39-cp39-win_amd64.whl", hash = "sha256:457574f4599d2b00f7f637a0700a6422243b3565509457b2dbd3f50703e11f59"},
- {file = "coverage-7.6.10-pp39.pp310-none-any.whl", hash = "sha256:fd34e7b3405f0cc7ab03d54a334c17a9e802897580d964bd8c2001f4b9fd488f"},
- {file = "coverage-7.6.10.tar.gz", hash = "sha256:7fb105327c8f8f0682e29843e2ff96af9dcbe5bab8eeb4b398c6a33a16d80a23"},
-]
-
-[package.extras]
-toml = ["tomli"]
-
-[[package]]
-name = "defusedxml"
-version = "0.7.1"
-description = "XML bomb protection for Python stdlib modules"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
-groups = ["main"]
-files = [
- {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
- {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
-]
-
-[[package]]
-name = "distlib"
-version = "0.3.9"
-description = "Distribution utilities"
-optional = false
-python-versions = "*"
-groups = ["dev"]
-files = [
- {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"},
- {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"},
-]
-
-[[package]]
-name = "filelock"
-version = "3.17.0"
-description = "A platform independent file lock."
-optional = false
-python-versions = ">=3.9"
-groups = ["dev"]
-files = [
- {file = "filelock-3.17.0-py3-none-any.whl", hash = "sha256:533dc2f7ba78dc2f0f531fc6c4940addf7b70a481e269a5a3b93be94ffbe8338"},
- {file = "filelock-3.17.0.tar.gz", hash = "sha256:ee4e77401ef576ebb38cd7f13b9b28893194acc20a8e68e18730ba9c0e54660e"},
-]
-
-[package.extras]
-docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"]
-testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"]
-typing = ["typing-extensions (>=4.12.2)"]
-
-[[package]]
-name = "flake8"
-version = "7.1.1"
-description = "the modular source code checker: pep8 pyflakes and co"
-optional = false
-python-versions = ">=3.8.1"
-groups = ["dev"]
-files = [
- {file = "flake8-7.1.1-py2.py3-none-any.whl", hash = "sha256:597477df7860daa5aa0fdd84bf5208a043ab96b8e96ab708770ae0364dd03213"},
- {file = "flake8-7.1.1.tar.gz", hash = "sha256:049d058491e228e03e67b390f311bbf88fce2dbaa8fa673e7aea87b7198b8d38"},
-]
-
-[package.dependencies]
-mccabe = ">=0.7.0,<0.8.0"
-pycodestyle = ">=2.12.0,<2.13.0"
-pyflakes = ">=3.2.0,<3.3.0"
-
-[[package]]
-name = "identify"
-version = "2.6.6"
-description = "File identification library for Python"
-optional = false
-python-versions = ">=3.9"
-groups = ["dev"]
-files = [
- {file = "identify-2.6.6-py2.py3-none-any.whl", hash = "sha256:cbd1810bce79f8b671ecb20f53ee0ae8e86ae84b557de31d89709dc2a48ba881"},
- {file = "identify-2.6.6.tar.gz", hash = "sha256:7bec12768ed44ea4761efb47806f0a41f86e7c0a5fdf5950d4648c90eca7e251"},
-]
-
-[package.extras]
-license = ["ukkonen"]
-
-[[package]]
-name = "idna"
-version = "3.10"
-description = "Internationalized Domain Names in Applications (IDNA)"
-optional = false
-python-versions = ">=3.6"
-groups = ["main"]
-files = [
- {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
- {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
-]
-
-[package.extras]
-all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
-
-[[package]]
-name = "iniconfig"
-version = "2.0.0"
-description = "brain-dead simple config-ini parsing"
-optional = false
-python-versions = ">=3.7"
-groups = ["dev"]
-files = [
- {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
- {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
-]
-
-[[package]]
-name = "isort"
-version = "5.13.2"
-description = "A Python utility / library to sort Python imports."
-optional = false
-python-versions = ">=3.8.0"
-groups = ["dev"]
-files = [
- {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"},
- {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"},
-]
-
-[package.extras]
-colors = ["colorama (>=0.4.6)"]
-
-[[package]]
-name = "jira"
-version = "3.8.0"
-description = "Python library for interacting with JIRA via REST APIs."
-optional = false
-python-versions = ">=3.8"
-groups = ["main"]
-files = [
- {file = "jira-3.8.0-py3-none-any.whl", hash = "sha256:12190dc84dad00b8a6c0341f7e8a254b0f38785afdec022bd5941e1184a5a3fb"},
- {file = "jira-3.8.0.tar.gz", hash = "sha256:63719c529a570aaa01c3373dbb5a104dab70381c5be447f6c27f997302fa335a"},
-]
-
-[package.dependencies]
-defusedxml = "*"
-packaging = "*"
-Pillow = ">=2.1.0"
-requests = ">=2.10.0"
-requests-oauthlib = ">=1.1.0"
-requests-toolbelt = "*"
-typing-extensions = ">=3.7.4.2"
-
-[package.extras]
-async = ["requests-futures (>=0.9.7)"]
-cli = ["ipython (>=4.0.0)", "keyring"]
-docs = ["furo", "sphinx (>=5.0.0)", "sphinx-copybutton"]
-opt = ["PyJWT", "filemagic (>=1.6)", "requests-jwt", "requests-kerberos"]
-test = ["MarkupSafe (>=0.23)", "PyYAML (>=5.1)", "docutils (>=0.12)", "flaky", "oauthlib", "parameterized (>=0.8.1)", "pytest (>=6.0.0)", "pytest-cache", "pytest-cov", "pytest-instafail", "pytest-sugar", "pytest-timeout (>=1.3.1)", "pytest-xdist (>=2.2)", "requests-mock", "requires.io", "tenacity", "wheel (>=0.24.0)", "yanc (>=0.3.3)"]
-
-[[package]]
-name = "markdown-it-py"
-version = "3.0.0"
-description = "Python port of markdown-it. Markdown parsing, done right!"
-optional = false
-python-versions = ">=3.8"
-groups = ["dev"]
-files = [
- {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"},
- {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"},
-]
-
-[package.dependencies]
-mdurl = ">=0.1,<1.0"
-
-[package.extras]
-benchmarking = ["psutil", "pytest", "pytest-benchmark"]
-code-style = ["pre-commit (>=3.0,<4.0)"]
-compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"]
-linkify = ["linkify-it-py (>=1,<3)"]
-plugins = ["mdit-py-plugins"]
-profiling = ["gprof2dot"]
-rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
-testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
-
-[[package]]
-name = "mccabe"
-version = "0.7.0"
-description = "McCabe checker, plugin for flake8"
-optional = false
-python-versions = ">=3.6"
-groups = ["dev"]
-files = [
- {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
- {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
-]
-
-[[package]]
-name = "mdurl"
-version = "0.1.2"
-description = "Markdown URL utilities"
-optional = false
-python-versions = ">=3.7"
-groups = ["dev"]
-files = [
- {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
- {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
-]
-
-[[package]]
-name = "mypy-extensions"
-version = "1.0.0"
-description = "Type system extensions for programs checked with the mypy type checker."
-optional = false
-python-versions = ">=3.5"
-groups = ["dev"]
-files = [
- {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
- {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
-]
-
-[[package]]
-name = "nodeenv"
-version = "1.9.1"
-description = "Node.js virtual environment builder"
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
-groups = ["dev"]
-files = [
- {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"},
- {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"},
-]
-
-[[package]]
-name = "oauthlib"
-version = "3.2.2"
-description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic"
-optional = false
-python-versions = ">=3.6"
-groups = ["main"]
-files = [
- {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"},
- {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"},
-]
-
-[package.extras]
-rsa = ["cryptography (>=3.0.0)"]
-signals = ["blinker (>=1.4.0)"]
-signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
-
-[[package]]
-name = "packaging"
-version = "24.2"
-description = "Core utilities for Python packages"
-optional = false
-python-versions = ">=3.8"
-groups = ["main", "dev"]
-files = [
- {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
- {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
-]
-
-[[package]]
-name = "pathspec"
-version = "0.12.1"
-description = "Utility library for gitignore style pattern matching of file paths."
-optional = false
-python-versions = ">=3.8"
-groups = ["dev"]
-files = [
- {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
- {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
-]
-
-[[package]]
-name = "pbr"
-version = "6.1.0"
-description = "Python Build Reasonableness"
-optional = false
-python-versions = ">=2.6"
-groups = ["dev"]
-files = [
- {file = "pbr-6.1.0-py2.py3-none-any.whl", hash = "sha256:a776ae228892d8013649c0aeccbb3d5f99ee15e005a4cbb7e61d55a067b28a2a"},
- {file = "pbr-6.1.0.tar.gz", hash = "sha256:788183e382e3d1d7707db08978239965e8b9e4e5ed42669bf4758186734d5f24"},
-]
-
-[[package]]
-name = "pillow"
-version = "11.1.0"
-description = "Python Imaging Library (Fork)"
-optional = false
-python-versions = ">=3.9"
-groups = ["main"]
-files = [
- {file = "pillow-11.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:e1abe69aca89514737465752b4bcaf8016de61b3be1397a8fc260ba33321b3a8"},
- {file = "pillow-11.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c640e5a06869c75994624551f45e5506e4256562ead981cce820d5ab39ae2192"},
- {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a07dba04c5e22824816b2615ad7a7484432d7f540e6fa86af60d2de57b0fcee2"},
- {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e267b0ed063341f3e60acd25c05200df4193e15a4a5807075cd71225a2386e26"},
- {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bd165131fd51697e22421d0e467997ad31621b74bfc0b75956608cb2906dda07"},
- {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:abc56501c3fd148d60659aae0af6ddc149660469082859fa7b066a298bde9482"},
- {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:54ce1c9a16a9561b6d6d8cb30089ab1e5eb66918cb47d457bd996ef34182922e"},
- {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:73ddde795ee9b06257dac5ad42fcb07f3b9b813f8c1f7f870f402f4dc54b5269"},
- {file = "pillow-11.1.0-cp310-cp310-win32.whl", hash = "sha256:3a5fe20a7b66e8135d7fd617b13272626a28278d0e578c98720d9ba4b2439d49"},
- {file = "pillow-11.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:b6123aa4a59d75f06e9dd3dac5bf8bc9aa383121bb3dd9a7a612e05eabc9961a"},
- {file = "pillow-11.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:a76da0a31da6fcae4210aa94fd779c65c75786bc9af06289cd1c184451ef7a65"},
- {file = "pillow-11.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e06695e0326d05b06833b40b7ef477e475d0b1ba3a6d27da1bb48c23209bf457"},
- {file = "pillow-11.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96f82000e12f23e4f29346e42702b6ed9a2f2fea34a740dd5ffffcc8c539eb35"},
- {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3cd561ded2cf2bbae44d4605837221b987c216cff94f49dfeed63488bb228d2"},
- {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f189805c8be5ca5add39e6f899e6ce2ed824e65fb45f3c28cb2841911da19070"},
- {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dd0052e9db3474df30433f83a71b9b23bd9e4ef1de13d92df21a52c0303b8ab6"},
- {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:837060a8599b8f5d402e97197d4924f05a2e0d68756998345c829c33186217b1"},
- {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:aa8dd43daa836b9a8128dbe7d923423e5ad86f50a7a14dc688194b7be5c0dea2"},
- {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0a2f91f8a8b367e7a57c6e91cd25af510168091fb89ec5146003e424e1558a96"},
- {file = "pillow-11.1.0-cp311-cp311-win32.whl", hash = "sha256:c12fc111ef090845de2bb15009372175d76ac99969bdf31e2ce9b42e4b8cd88f"},
- {file = "pillow-11.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd43429d0d7ed6533b25fc993861b8fd512c42d04514a0dd6337fb3ccf22761"},
- {file = "pillow-11.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:f7955ecf5609dee9442cbface754f2c6e541d9e6eda87fad7f7a989b0bdb9d71"},
- {file = "pillow-11.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2062ffb1d36544d42fcaa277b069c88b01bb7298f4efa06731a7fd6cc290b81a"},
- {file = "pillow-11.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a85b653980faad27e88b141348707ceeef8a1186f75ecc600c395dcac19f385b"},
- {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9409c080586d1f683df3f184f20e36fb647f2e0bc3988094d4fd8c9f4eb1b3b3"},
- {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fdadc077553621911f27ce206ffcbec7d3f8d7b50e0da39f10997e8e2bb7f6a"},
- {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:93a18841d09bcdd774dcdc308e4537e1f867b3dec059c131fde0327899734aa1"},
- {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9aa9aeddeed452b2f616ff5507459e7bab436916ccb10961c4a382cd3e03f47f"},
- {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3cdcdb0b896e981678eee140d882b70092dac83ac1cdf6b3a60e2216a73f2b91"},
- {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:36ba10b9cb413e7c7dfa3e189aba252deee0602c86c309799da5a74009ac7a1c"},
- {file = "pillow-11.1.0-cp312-cp312-win32.whl", hash = "sha256:cfd5cd998c2e36a862d0e27b2df63237e67273f2fc78f47445b14e73a810e7e6"},
- {file = "pillow-11.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a697cd8ba0383bba3d2d3ada02b34ed268cb548b369943cd349007730c92bddf"},
- {file = "pillow-11.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:4dd43a78897793f60766563969442020e90eb7847463eca901e41ba186a7d4a5"},
- {file = "pillow-11.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae98e14432d458fc3de11a77ccb3ae65ddce70f730e7c76140653048c71bfcbc"},
- {file = "pillow-11.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cc1331b6d5a6e144aeb5e626f4375f5b7ae9934ba620c0ac6b3e43d5e683a0f0"},
- {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:758e9d4ef15d3560214cddbc97b8ef3ef86ce04d62ddac17ad39ba87e89bd3b1"},
- {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b523466b1a31d0dcef7c5be1f20b942919b62fd6e9a9be199d035509cbefc0ec"},
- {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:9044b5e4f7083f209c4e35aa5dd54b1dd5b112b108648f5c902ad586d4f945c5"},
- {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:3764d53e09cdedd91bee65c2527815d315c6b90d7b8b79759cc48d7bf5d4f114"},
- {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:31eba6bbdd27dde97b0174ddf0297d7a9c3a507a8a1480e1e60ef914fe23d352"},
- {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b5d658fbd9f0d6eea113aea286b21d3cd4d3fd978157cbf2447a6035916506d3"},
- {file = "pillow-11.1.0-cp313-cp313-win32.whl", hash = "sha256:f86d3a7a9af5d826744fabf4afd15b9dfef44fe69a98541f666f66fbb8d3fef9"},
- {file = "pillow-11.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:593c5fd6be85da83656b93ffcccc2312d2d149d251e98588b14fbc288fd8909c"},
- {file = "pillow-11.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:11633d58b6ee5733bde153a8dafd25e505ea3d32e261accd388827ee987baf65"},
- {file = "pillow-11.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70ca5ef3b3b1c4a0812b5c63c57c23b63e53bc38e758b37a951e5bc466449861"},
- {file = "pillow-11.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8000376f139d4d38d6851eb149b321a52bb8893a88dae8ee7d95840431977081"},
- {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee85f0696a17dd28fbcfceb59f9510aa71934b483d1f5601d1030c3c8304f3c"},
- {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:dd0e081319328928531df7a0e63621caf67652c8464303fd102141b785ef9547"},
- {file = "pillow-11.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e63e4e5081de46517099dc30abe418122f54531a6ae2ebc8680bcd7096860eab"},
- {file = "pillow-11.1.0-cp313-cp313t-win32.whl", hash = "sha256:dda60aa465b861324e65a78c9f5cf0f4bc713e4309f83bc387be158b077963d9"},
- {file = "pillow-11.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ad5db5781c774ab9a9b2c4302bbf0c1014960a0a7be63278d13ae6fdf88126fe"},
- {file = "pillow-11.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:67cd427c68926108778a9005f2a04adbd5e67c442ed21d95389fe1d595458756"},
- {file = "pillow-11.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:bf902d7413c82a1bfa08b06a070876132a5ae6b2388e2712aab3a7cbc02205c6"},
- {file = "pillow-11.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c1eec9d950b6fe688edee07138993e54ee4ae634c51443cfb7c1e7613322718e"},
- {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e275ee4cb11c262bd108ab2081f750db2a1c0b8c12c1897f27b160c8bd57bbc"},
- {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4db853948ce4e718f2fc775b75c37ba2efb6aaea41a1a5fc57f0af59eee774b2"},
- {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ab8a209b8485d3db694fa97a896d96dd6533d63c22829043fd9de627060beade"},
- {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:54251ef02a2309b5eec99d151ebf5c9904b77976c8abdcbce7891ed22df53884"},
- {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5bb94705aea800051a743aa4874bb1397d4695fb0583ba5e425ee0328757f196"},
- {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89dbdb3e6e9594d512780a5a1c42801879628b38e3efc7038094430844e271d8"},
- {file = "pillow-11.1.0-cp39-cp39-win32.whl", hash = "sha256:e5449ca63da169a2e6068dd0e2fcc8d91f9558aba89ff6d02121ca8ab11e79e5"},
- {file = "pillow-11.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:3362c6ca227e65c54bf71a5f88b3d4565ff1bcbc63ae72c34b07bbb1cc59a43f"},
- {file = "pillow-11.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:b20be51b37a75cc54c2c55def3fa2c65bb94ba859dde241cd0a4fd302de5ae0a"},
- {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8c730dc3a83e5ac137fbc92dfcfe1511ce3b2b5d7578315b63dbbb76f7f51d90"},
- {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d33d2fae0e8b170b6a6c57400e077412240f6f5bb2a342cf1ee512a787942bb"},
- {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8d65b38173085f24bc07f8b6c505cbb7418009fa1a1fcb111b1f4961814a442"},
- {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:015c6e863faa4779251436db398ae75051469f7c903b043a48f078e437656f83"},
- {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d44ff19eea13ae4acdaaab0179fa68c0c6f2f45d66a4d8ec1eda7d6cecbcc15f"},
- {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d3d8da4a631471dfaf94c10c85f5277b1f8e42ac42bade1ac67da4b4a7359b73"},
- {file = "pillow-11.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4637b88343166249fe8aa94e7c4a62a180c4b3898283bb5d3d2fd5fe10d8e4e0"},
- {file = "pillow-11.1.0.tar.gz", hash = "sha256:368da70808b36d73b4b390a8ffac11069f8a5c85f29eff1f1b01bcf3ef5b2a20"},
-]
-
-[package.extras]
-docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"]
-fpx = ["olefile"]
-mic = ["olefile"]
-tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"]
-typing = ["typing-extensions"]
-xmp = ["defusedxml"]
-
-[[package]]
-name = "platformdirs"
-version = "4.3.6"
-description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
-optional = false
-python-versions = ">=3.8"
-groups = ["dev"]
-files = [
- {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"},
- {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"},
-]
-
-[package.extras]
-docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"]
-test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"]
-type = ["mypy (>=1.11.2)"]
-
-[[package]]
-name = "pluggy"
-version = "1.5.0"
-description = "plugin and hook calling mechanisms for python"
-optional = false
-python-versions = ">=3.8"
-groups = ["dev"]
-files = [
- {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
- {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
-]
-
-[package.extras]
-dev = ["pre-commit", "tox"]
-testing = ["pytest", "pytest-benchmark"]
-
-[[package]]
-name = "pre-commit"
-version = "4.1.0"
-description = "A framework for managing and maintaining multi-language pre-commit hooks."
-optional = false
-python-versions = ">=3.9"
-groups = ["dev"]
-files = [
- {file = "pre_commit-4.1.0-py2.py3-none-any.whl", hash = "sha256:d29e7cb346295bcc1cc75fc3e92e343495e3ea0196c9ec6ba53f49f10ab6ae7b"},
- {file = "pre_commit-4.1.0.tar.gz", hash = "sha256:ae3f018575a588e30dfddfab9a05448bfbd6b73d78709617b5a2b853549716d4"},
-]
-
-[package.dependencies]
-cfgv = ">=2.0.0"
-identify = ">=1.0.0"
-nodeenv = ">=0.11.1"
-pyyaml = ">=5.1"
-virtualenv = ">=20.10.0"
-
-[[package]]
-name = "pycodestyle"
-version = "2.12.1"
-description = "Python style guide checker"
-optional = false
-python-versions = ">=3.8"
-groups = ["dev"]
-files = [
- {file = "pycodestyle-2.12.1-py2.py3-none-any.whl", hash = "sha256:46f0fb92069a7c28ab7bb558f05bfc0110dac69a0cd23c61ea0040283a9d78b3"},
- {file = "pycodestyle-2.12.1.tar.gz", hash = "sha256:6838eae08bbce4f6accd5d5572075c63626a15ee3e6f842df996bf62f6d73521"},
-]
-
-[[package]]
-name = "pyflakes"
-version = "3.2.0"
-description = "passive checker of Python programs"
-optional = false
-python-versions = ">=3.8"
-groups = ["dev"]
-files = [
- {file = "pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"},
- {file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"},
-]
-
-[[package]]
-name = "pygments"
-version = "2.19.1"
-description = "Pygments is a syntax highlighting package written in Python."
-optional = false
-python-versions = ">=3.8"
-groups = ["dev"]
-files = [
- {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"},
- {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"},
-]
-
-[package.extras]
-windows-terminal = ["colorama (>=0.4.6)"]
-
-[[package]]
-name = "pytest"
-version = "8.3.4"
-description = "pytest: simple powerful testing with Python"
-optional = false
-python-versions = ">=3.8"
-groups = ["dev"]
-files = [
- {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"},
- {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"},
-]
-
-[package.dependencies]
-colorama = {version = "*", markers = "sys_platform == \"win32\""}
-iniconfig = "*"
-packaging = "*"
-pluggy = ">=1.5,<2"
-
-[package.extras]
-dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
-
-[[package]]
-name = "pytest-cov"
-version = "6.0.0"
-description = "Pytest plugin for measuring coverage."
-optional = false
-python-versions = ">=3.9"
-groups = ["dev"]
-files = [
- {file = "pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0"},
- {file = "pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35"},
-]
-
-[package.dependencies]
-coverage = {version = ">=7.5", extras = ["toml"]}
-pytest = ">=4.6"
-
-[package.extras]
-testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"]
-
-[[package]]
-name = "pyyaml"
-version = "6.0.2"
-description = "YAML parser and emitter for Python"
-optional = false
-python-versions = ">=3.8"
-groups = ["dev"]
-files = [
- {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
- {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
- {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
- {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
- {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
- {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
- {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
- {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
- {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
- {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
- {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
- {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
- {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
- {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
- {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
- {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
- {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
- {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
- {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
- {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
- {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
- {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
- {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
- {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
- {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
- {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
- {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
- {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
- {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
- {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
- {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
- {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
- {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
- {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
- {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
- {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
- {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
- {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
- {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
- {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
- {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
- {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
- {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
- {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
- {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
- {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
- {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
- {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
- {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
- {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
- {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
- {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
- {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
-]
-
-[[package]]
-name = "requests"
-version = "2.32.3"
-description = "Python HTTP for Humans."
-optional = false
-python-versions = ">=3.8"
-groups = ["main"]
-files = [
- {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
- {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
-]
-
-[package.dependencies]
-certifi = ">=2017.4.17"
-charset-normalizer = ">=2,<4"
-idna = ">=2.5,<4"
-urllib3 = ">=1.21.1,<3"
-
-[package.extras]
-socks = ["PySocks (>=1.5.6,!=1.5.7)"]
-use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
-
-[[package]]
-name = "requests-oauthlib"
-version = "2.0.0"
-description = "OAuthlib authentication support for Requests."
-optional = false
-python-versions = ">=3.4"
-groups = ["main"]
-files = [
- {file = "requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"},
- {file = "requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36"},
-]
-
-[package.dependencies]
-oauthlib = ">=3.0.0"
-requests = ">=2.0.0"
-
-[package.extras]
-rsa = ["oauthlib[signedtoken] (>=3.0.0)"]
-
-[[package]]
-name = "requests-toolbelt"
-version = "1.0.0"
-description = "A utility belt for advanced users of python-requests"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-groups = ["main"]
-files = [
- {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"},
- {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"},
-]
-
-[package.dependencies]
-requests = ">=2.0.1,<3.0.0"
-
-[[package]]
-name = "rich"
-version = "13.9.4"
-description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
-optional = false
-python-versions = ">=3.8.0"
-groups = ["dev"]
-files = [
- {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"},
- {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"},
-]
-
-[package.dependencies]
-markdown-it-py = ">=2.2.0"
-pygments = ">=2.13.0,<3.0.0"
-
-[package.extras]
-jupyter = ["ipywidgets (>=7.5.1,<9)"]
-
-[[package]]
-name = "stevedore"
-version = "5.4.0"
-description = "Manage dynamic plugins for Python applications"
-optional = false
-python-versions = ">=3.9"
-groups = ["dev"]
-files = [
- {file = "stevedore-5.4.0-py3-none-any.whl", hash = "sha256:b0be3c4748b3ea7b854b265dcb4caa891015e442416422be16f8b31756107857"},
- {file = "stevedore-5.4.0.tar.gz", hash = "sha256:79e92235ecb828fe952b6b8b0c6c87863248631922c8e8e0fa5b17b232c4514d"},
-]
-
-[package.dependencies]
-pbr = ">=2.0.0"
-
-[[package]]
-name = "typing-extensions"
-version = "4.12.2"
-description = "Backported and Experimental Type Hints for Python 3.8+"
-optional = false
-python-versions = ">=3.8"
-groups = ["main"]
-files = [
- {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
- {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
-]
-
-[[package]]
-name = "urllib3"
-version = "2.3.0"
-description = "HTTP library with thread-safe connection pooling, file post, and more."
-optional = false
-python-versions = ">=3.9"
-groups = ["main"]
-files = [
- {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"},
- {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"},
-]
-
-[package.extras]
-brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
-h2 = ["h2 (>=4,<5)"]
-socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
-zstd = ["zstandard (>=0.18.0)"]
-
-[[package]]
-name = "virtualenv"
-version = "20.29.1"
-description = "Virtual Python Environment builder"
-optional = false
-python-versions = ">=3.8"
-groups = ["dev"]
-files = [
- {file = "virtualenv-20.29.1-py3-none-any.whl", hash = "sha256:4e4cb403c0b0da39e13b46b1b2476e505cb0046b25f242bee80f62bf990b2779"},
- {file = "virtualenv-20.29.1.tar.gz", hash = "sha256:b8b8970138d32fb606192cb97f6cd4bb644fa486be9308fb9b63f81091b5dc35"},
-]
-
-[package.dependencies]
-distlib = ">=0.3.7,<1"
-filelock = ">=3.12.2,<4"
-platformdirs = ">=3.9.1,<5"
-
-[package.extras]
-docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"]
-test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"]
-
-[metadata]
-lock-version = "2.1"
-python-versions = "^3.12"
-content-hash = "9b37f4de64a6d2fe3bbe179c2e5ebed39dc55df6681ebcabc23ced3739f16606"
diff --git a/pyproject.toml b/pyproject.toml
deleted file mode 100644
index fd7a6b18..00000000
--- a/pyproject.toml
+++ /dev/null
@@ -1,53 +0,0 @@
-[build-system]
-requires = ['setuptools>=42']
-build-backend = 'setuptools.build_meta'
-
-[tool.poetry]
-name = "grndctl"
-version = "1.0.1"
-description = "A tool for syncing Jira tickets with local files"
-authors = ["Brad Edwards "]
-
-[tool.poetry.dependencies]
-python = "^3.12"
-jira = "^3.8.0"
-
-[tool.poetry.group.dev.dependencies]
-pytest = "^8.3.4"
-pytest-cov = "^6.0.0"
-pre-commit = "^4.1.0"
-black = "^24.10.0"
-isort = "^5.13.2"
-flake8 = "^7.1.1"
-bandit = "^1.8.2"
-
-[tool.poetry.scripts]
-gctl = "ground_control.main:main"
-
-[tool.black]
-line-length = 100
-target-version = ["py38"]
-include = '\.pyi?$'
-
-[tool.isort]
-profile = "black"
-line_length = 100
-multi_line_output = 3
-include_trailing_comma = true
-force_grid_wrap = 0
-use_parentheses = true
-ensure_newline_before_comments = true
-
-[tool.bandit]
-exclude_dirs = ["tests"]
-skips = ["B101"] # Skip assert warnings in tests
-
-[tool.flake8]
-max-line-length = 100
-extend-ignore = ["E203", "W503"]
-exclude = [".git", "__pycache__", "build", "dist"]
-per-file-ignores = [
- "__init__.py:F401",
- "tests/*:D100,D101,D102,D103",
-]
-docstring-convention = "google"
diff --git a/scripts/create-github-issues.sh b/scripts/create-github-issues.sh
new file mode 100644
index 00000000..0d297aea
--- /dev/null
+++ b/scripts/create-github-issues.sh
@@ -0,0 +1,170 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+# Create GitHub issues from .github/issues/**/*.md files.
+# Includes rate-limit delay between API calls.
+#
+# Usage:
+# ./scripts/create-github-issues.sh # dry-run (default)
+# ./scripts/create-github-issues.sh --execute # actually create issues
+# ./scripts/create-github-issues.sh --phase 0 # only phase 0 issues
+
+DELAY=4 # seconds between API calls (GitHub allows ~30 req/min for mutations)
+DRY_RUN=true
+PHASE_FILTER=""
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --execute) DRY_RUN=false; shift ;;
+ --phase) PHASE_FILTER="$2"; shift 2 ;;
+ *) echo "Unknown arg: $1"; exit 1 ;;
+ esac
+done
+
+ISSUE_DIR=".github/issues"
+CREATED=0
+SKIPPED=0
+FAILED=0
+
+if [[ ! -d "$ISSUE_DIR" ]]; then
+ echo "Error: $ISSUE_DIR not found. Run from repo root."
+ exit 1
+fi
+
+# --- Ensure all labels exist ---
+echo "Checking labels..."
+
+# Collect all labels used across issue files
+declare -A ALL_LABELS
+while IFS= read -r line; do
+ raw=$(echo "$line" | sed 's/^labels: *\[//;s/\]$//')
+ IFS=',' read -ra parts <<< "$raw"
+ for l in "${parts[@]}"; do
+ l=$(echo "$l" | xargs)
+ [[ -n "$l" ]] && ALL_LABELS["$l"]=1
+ done
+done < <(grep -rh '^labels:' "$ISSUE_DIR")
+
+# Add phase-N and priority labels
+while IFS= read -r p; do
+ p=$(echo "$p" | xargs)
+ [[ -n "$p" ]] && ALL_LABELS["phase-$p"]=1
+done < <(grep -rh '^phase:' "$ISSUE_DIR" | sed 's/^phase: *//')
+
+while IFS= read -r p; do
+ p=$(echo "$p" | xargs)
+ [[ -n "$p" ]] && ALL_LABELS["$p"]=1
+done < <(grep -rh '^priority:' "$ISSUE_DIR" | sed 's/^priority: *//')
+
+# Label colors by category
+label_color() {
+ case "$1" in
+ phase-*) echo "0e8a16" ;; # green
+ P0) echo "b60205" ;; # red
+ P1) echo "d93f0b" ;; # orange
+ P2) echo "fbca04" ;; # yellow
+ *) echo "c5def5" ;; # light blue
+ esac
+}
+
+# Fetch existing labels
+mapfile -t EXISTING < <(gh label list --limit 200 --json name -q '.[].name')
+declare -A EXISTING_SET
+for el in "${EXISTING[@]}"; do
+ EXISTING_SET["$el"]=1
+done
+
+LABELS_CREATED=0
+for label in "${!ALL_LABELS[@]}"; do
+ if [[ -z "${EXISTING_SET[$label]:-}" ]]; then
+ color=$(label_color "$label")
+ if [[ "$DRY_RUN" == false ]]; then
+ if gh label create "$label" --color "$color" 2>/dev/null; then
+ LABELS_CREATED=$((LABELS_CREATED + 1))
+ else
+ echo " Warning: failed to create label '$label'"
+ fi
+ sleep 1
+ else
+ LABELS_CREATED=$((LABELS_CREATED + 1))
+ fi
+ fi
+done
+echo "Labels: $LABELS_CREATED new (${#ALL_LABELS[@]} total needed, ${#EXISTING[@]} already existed)"
+echo "---"
+
+# Collect and sort issue files
+mapfile -t FILES < <(find "$ISSUE_DIR" -name '*.md' ! -name 'README.md' | sort)
+
+echo "Found ${#FILES[@]} issue files"
+if [[ "$DRY_RUN" == true ]]; then
+ echo "DRY RUN — pass --execute to create issues"
+fi
+echo "---"
+
+for f in "${FILES[@]}"; do
+ # Parse frontmatter
+ title=$(awk '/^title:/{gsub(/^title: *"?|"$/,""); print; exit}' "$f")
+ labels_raw=$(awk '/^labels:/{gsub(/^labels: *\[|\]$/,""); print; exit}' "$f")
+ phase=$(awk '/^phase:/{gsub(/^phase: */,""); print; exit}' "$f")
+ priority=$(awk '/^priority:/{gsub(/^priority: */,""); print; exit}' "$f")
+
+ # Filter by phase if requested
+ if [[ -n "$PHASE_FILTER" && "$phase" != "$PHASE_FILTER" ]]; then
+ continue
+ fi
+
+ # Build label list (strip spaces, add phase/priority labels)
+ labels=""
+ IFS=',' read -ra LABEL_ARR <<< "$labels_raw"
+ for l in "${LABEL_ARR[@]}"; do
+ l=$(echo "$l" | xargs) # trim whitespace
+ if [[ -n "$l" ]]; then
+ labels="${labels:+$labels,}$l"
+ fi
+ done
+ if [[ -n "$phase" ]]; then
+ labels="${labels:+$labels,}phase-$phase"
+ fi
+ if [[ -n "$priority" ]]; then
+ labels="${labels:+$labels,}$priority"
+ fi
+
+ # Extract body (everything after second ---)
+ body=$(awk 'BEGIN{c=0} /^---$/{c++; next} c>=2{print}' "$f")
+
+ if [[ -z "$title" ]]; then
+ echo "SKIP (no title): $f"
+ SKIPPED=$((SKIPPED + 1))
+ continue
+ fi
+
+ echo "[$((CREATED + SKIPPED + FAILED + 1))/${#FILES[@]}] $title"
+ echo " Labels: $labels"
+ echo " File: $f"
+
+ if [[ "$DRY_RUN" == false ]]; then
+ if gh issue create \
+ --title "$title" \
+ --label "$labels" \
+ --body "$body" 2>/tmp/gh-issue-err; then
+ CREATED=$((CREATED + 1))
+ echo " -> Created"
+ else
+ FAILED=$((FAILED + 1))
+ echo " -> FAILED: $(cat /tmp/gh-issue-err)"
+ fi
+ echo " (waiting ${DELAY}s...)"
+ sleep "$DELAY"
+ else
+ CREATED=$((CREATED + 1))
+ fi
+
+ echo ""
+done
+
+echo "---"
+echo "Done. Created: $CREATED | Skipped: $SKIPPED | Failed: $FAILED"
+if [[ "$DRY_RUN" == true ]]; then
+ echo "(Dry run — no issues were actually created)"
+fi
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index bebe6f4b..00000000
--- a/setup.cfg
+++ /dev/null
@@ -1,8 +0,0 @@
-[flake8]
-max-line-length = 100
-extend-ignore = E203, W503
-exclude = .git,__pycache__,build,dist
-per-file-ignores =
- __init__.py:F401
- tests/*:D100,D101,D102,D103
-docstring-convention = google
diff --git a/sonar-project.properties b/sonar-project.properties
new file mode 100644
index 00000000..bf4f9041
--- /dev/null
+++ b/sonar-project.properties
@@ -0,0 +1,29 @@
+# SonarCloud Configuration
+# https://sonarcloud.io/project/overview?id=KeplerOps_Ground-Control
+
+sonar.projectKey=KeplerOps_Ground-Control
+sonar.organization=keplerops
+
+# Source and test directories (update as code is added)
+sonar.sources=src
+sonar.tests=tests
+
+# Exclusions
+sonar.exclusions=\
+ **/node_modules/**,\
+ **/__pycache__/**,\
+ **/.venv/**,\
+ **/dist/**,\
+ **/build/**,\
+ **/coverage/**,\
+ **/*.min.js,\
+ **/migrations/**
+
+# Python
+sonar.python.version=3.12
+
+# TypeScript/JavaScript coverage
+# sonar.javascript.lcov.reportPaths=coverage/lcov.info
+
+# Python coverage
+# sonar.python.coverage.reportPaths=coverage.xml
diff --git a/src/grndctl/__init__.py b/src/grndctl/__init__.py
deleted file mode 100644
index 07915168..00000000
--- a/src/grndctl/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Ground Control - A tool for syncing Jira tickets with local files."""
diff --git a/src/grndctl/main.py b/src/grndctl/main.py
deleted file mode 100644
index adc75a2a..00000000
--- a/src/grndctl/main.py
+++ /dev/null
@@ -1,353 +0,0 @@
-#!/usr/bin/env python3
-
-"""Ground Control main module for syncing JIRA tickets to local filesystem.
-
-This module provides functionality to:
-- Connect to JIRA using API tokens
-- Download tickets and their relationships
-- Create a local file structure mirroring the ticket hierarchy
-- Save ticket content and metadata locally
-"""
-
-import argparse
-import json
-import os
-
-from jira import JIRA
-
-JIRA_URL = os.environ.get("JIRA_URL", "")
-BOARD_ID = os.environ.get("JIRA_PROJECT", "")
-DEFAULT_OUTPUT_DIR = "tickets"
-
-# Authentication: typically via an API token
-# Create tokens at: https://support.atlassian.com/atlassian-account/docs/
-USERNAME = os.environ.get("JIRA_USERNAME", "")
-API_TOKEN = os.environ.get("JIRA_API_TOKEN", "")
-
-
-def sanitize_filename(s):
- """Convert string to a valid filename by removing or replacing invalid characters."""
- # Replace invalid characters with underscores
- invalid_chars = '<>:"/\\|?*'
- for c in invalid_chars:
- s = s.replace(c, "_")
- # Remove any leading/trailing spaces or dots
- s = s.strip(". ")
- return s
-
-
-def cleanup_directory(directory):
- """Remove all contents of the specified directory."""
- if os.path.exists(directory):
- for item in os.listdir(directory):
- item_path = os.path.join(directory, item)
- if os.path.isdir(item_path):
- import shutil
-
- shutil.rmtree(item_path)
- else:
- os.remove(item_path)
-
-
-def get_issue_relationships(issue, jira):
- """Get parent and children relationships for an issue."""
- relationships = {"parent": None, "children": []}
-
- # Debug: print available fields
- print(f"\nDebug: Checking fields for {issue.key}")
- for field_name in dir(issue.fields):
- if not field_name.startswith("_"):
- try:
- value = getattr(issue.fields, field_name)
- if value is not None:
- print(f" {field_name}: {value}")
- except AttributeError:
- # Skip fields that can't be accessed
- continue
-
- # Get parent (could be epic link or initiative)
- if hasattr(issue.fields, "parent") and issue.fields.parent is not None:
- parent = issue.fields.parent
- relationships["parent"] = {"key": parent.key, "type": parent.fields.issuetype.name}
- elif (
- hasattr(issue.fields, "customfield_10014") and issue.fields.customfield_10014
- ): # Epic link field
- epic_key = issue.fields.customfield_10014
- epic = jira.issue(epic_key)
- relationships["parent"] = {"key": epic_key, "type": epic.fields.issuetype.name}
-
- return relationships
-
-
-def get_type_prefix(issue_type):
- """Get a short prefix for the issue type."""
- type_lower = issue_type.lower()
- if "initiative" in type_lower:
- return "INI"
- elif "epic" in type_lower:
- return "EPIC"
- elif "story" in type_lower:
- return "STORY"
- else:
- return "TASK"
-
-
-def create_ticket_directory(issue, jira, parent_dir=None):
- """Create directory for a ticket and write its contents."""
- # Get type prefix
- type_prefix = get_type_prefix(issue.fields.issuetype.name)
-
- # Create directory name with type prefix, key and truncated summary
- summary = sanitize_filename(issue.fields.summary)
- if len(summary) > 50: # Truncate long summaries
- summary = summary[:47] + "..."
-
- dir_name = f"{type_prefix}-{issue.key}-{summary}"
-
- # Use parent directory if provided, otherwise use OUTPUT_DIR
- base_dir = parent_dir if parent_dir else DEFAULT_OUTPUT_DIR
- issue_dir = os.path.join(base_dir, dir_name)
- os.makedirs(issue_dir, exist_ok=True)
-
- # Get relationships
- relationships = get_issue_relationships(issue, jira)
-
- # Build metadata
- metadata = {
- "key": issue.key,
- "id": issue.id,
- "url": f"{JIRA_URL}/browse/{issue.key}",
- "type": str(issue.fields.issuetype.name),
- "status": str(issue.fields.status.name),
- "summary": issue.fields.summary,
- "reporter": str(issue.fields.reporter),
- "assignee": str(issue.fields.assignee) if issue.fields.assignee else None,
- "updated": str(issue.fields.updated),
- }
-
- # Add parent info if exists
- if relationships["parent"]:
- metadata["parent"] = relationships["parent"]
-
- # Write metadata
- metadata_path = os.path.join(issue_dir, "metadata.json")
- with open(metadata_path, "w", encoding="utf-8") as f:
- json.dump(metadata, f, indent=2)
- f.write("\n")
-
- # Write the main ticket markdown file
- ticket_file = os.path.join(issue_dir, "ticket.md")
- with open(ticket_file, "w", encoding="utf-8") as f:
- # Write ticket header with metadata
- f.write(f"# {issue.key}: {issue.fields.summary}\n\n")
-
- # Write metadata section
- f.write("# Metadata\n\n")
- f.write(f"- Type: {issue.fields.issuetype.name}\n")
- f.write(f"- Status: {issue.fields.status.name}\n")
- f.write(f"- Reporter: {issue.fields.reporter}\n")
- f.write(f"- Assignee: {issue.fields.assignee if issue.fields.assignee else 'Unassigned'}\n")
- f.write(f"- Updated: {issue.fields.updated}\n")
- f.write(f"- URL: {JIRA_URL}/browse/{issue.key}\n")
-
- # Add parent info if exists
- if relationships["parent"]:
- parent = relationships["parent"]
- f.write(f"- Parent: [{parent['key']}]({JIRA_URL}/browse/{parent['key']})")
- if "summary" in parent:
- f.write(f" - {parent['summary']}")
- f.write("\n")
- f.write("\n")
-
- # Write description
- f.write("# Description\n\n")
- description = (
- issue.fields.description if issue.fields.description else "_No description provided_"
- )
- f.write(f"{description}\n\n")
-
- # Write comments if any
- comments = jira.comments(issue)
- if comments:
- f.write("# Comments\n\n")
- for c in comments:
- f.write(f"## {c.author.displayName} - {c.updated}\n\n")
- f.write(f"{c.body}\n\n")
- f.write("\n")
-
- return issue_dir
-
-
-def check_directory(directory):
- """Check if directory exists and is empty, create if missing."""
- if os.path.exists(directory):
- if os.path.isdir(directory):
- if os.listdir(directory):
- raise ValueError(
- f"Output directory '{directory}' exists and is not empty.\n"
- "Please specify a different directory or remove the existing content."
- )
- else:
- raise ValueError(
- f"'{directory}' exists but is not a directory.\n" "Please specify a different path."
- )
- else:
- os.makedirs(directory)
-
-
-def parse_args():
- """Parse command line arguments."""
- parser = argparse.ArgumentParser(
- description="Sync JIRA tickets to local filesystem with hierarchy."
- )
- parser.add_argument(
- "-o",
- "--output",
- default=DEFAULT_OUTPUT_DIR,
- help=f"Output directory (default: {DEFAULT_OUTPUT_DIR})",
- )
- parser.add_argument(
- "ticket",
- nargs="?",
- help="Specific ticket to fetch (e.g., SECOPS-123). If not provided, fetches all tickets.",
- )
- parser.add_argument(
- "-r",
- "--recursive",
- action="store_true",
- help="When fetching a specific ticket, also fetch its children recursively",
- )
- return parser.parse_args()
-
-
-def main():
- """Run the main JIRA ticket sync process.
-
- Returns:
- int: Exit code (0 for success, 1 for error)
- """
- # Parse command line arguments
- args = parse_args()
- output_dir = args.output
-
- # Check and prepare output directory
- try:
- check_directory(output_dir)
- except ValueError as e:
- print(f"Error: {e}")
- return 1
-
- # Check for credentials
- print("Debug: Reading environment variables...")
- print(f"JIRA_USERNAME: {os.environ.get('JIRA_USERNAME', 'not set')}")
- print(f"JIRA_API_TOKEN: {'[hidden]' if os.environ.get('JIRA_API_TOKEN') else 'not set'}")
-
- default_username = os.environ.get("JIRA_USERNAME", "")
- default_token = os.environ.get("JIRA_API_TOKEN", "")
- if USERNAME == default_username or API_TOKEN == default_token:
- print("Error: Please set JIRA_USERNAME and JIRA_API_TOKEN environment variables")
- print(
- "Visit: https://support.atlassian.com/atlassian-account/docs/"
- "manage-api-tokens-for-your-atlassian-account/"
- )
- return 1
-
- # Connect to Jira
- jira = JIRA(server=JIRA_URL, basic_auth=(USERNAME, API_TOKEN))
-
- # Build JQL query based on arguments
- if args.ticket:
- if args.recursive:
- # Get the ticket and all its children
- jql = f"""project = {BOARD_ID} AND (
- key = {args.ticket} OR
- parent = {args.ticket} OR
- "Epic Link" = {args.ticket}
- )"""
- else:
- # Get just the specific ticket
- jql = f"project = {BOARD_ID} AND key = {args.ticket}"
- else:
- # Default behavior: get all tickets with hierarchy rules
- jql = f"""project = {BOARD_ID} AND type != Sub-task AND (
- issuetype in (Initiative, Epic) OR
- parent is not empty OR
- "Epic Link" is not empty OR
- (issuetype not in (Initiative, Epic) AND statusCategory != Done AND status != Cancelled)
- )"""
-
- # Collect all issues (paging through results)
- start_at = 0
- max_results = 50
- all_issues = []
-
- while True:
- issues_batch = jira.search_issues(jql, startAt=start_at, maxResults=max_results)
- if not issues_batch:
- break
- all_issues.extend(issues_batch)
- start_at += max_results
- # Temporary limit: stop after 20 tickets
- if len(all_issues) >= 50:
- all_issues = all_issues[:50] # Ensure exactly 20 if we got more
- break
- if len(issues_batch) < max_results:
- break
-
- # Create output directory and unassigned directory
- os.makedirs(output_dir, exist_ok=True)
- unassigned_dir = os.path.join(output_dir, "0-UNASSIGNED")
- os.makedirs(unassigned_dir, exist_ok=True)
-
- # Sort issues by type to ensure proper hierarchy
- initiatives = []
- epics = []
- others = []
-
- for issue in all_issues:
- issue_type = issue.fields.issuetype.name.lower()
- if "initiative" in issue_type:
- initiatives.append(issue)
- elif "epic" in issue_type:
- epics.append(issue)
- else:
- others.append(issue)
-
- # Process in hierarchical order
- issue_dirs = {} # Keep track of created directories
-
- # First: Create initiatives
- for issue in initiatives:
- issue_dirs[issue.key] = create_ticket_directory(issue, jira)
-
- # Second: Create epics under their initiatives
- for issue in epics:
- relationships = get_issue_relationships(issue, jira)
- if relationships["parent"] and relationships["parent"]["key"] in issue_dirs:
- issue_dirs[issue.key] = create_ticket_directory(
- issue, jira, issue_dirs[relationships["parent"]["key"]]
- )
- else:
- issue_dirs[issue.key] = create_ticket_directory(issue, jira)
-
- # Finally: Create stories/tasks under their epics or in unassigned
- for issue in others:
- relationships = get_issue_relationships(issue, jira)
- if relationships["parent"] and relationships["parent"]["key"] in issue_dirs:
- issue_dirs[issue.key] = create_ticket_directory(
- issue, jira, issue_dirs[relationships["parent"]["key"]]
- )
- else:
- issue_dirs[issue.key] = create_ticket_directory(issue, jira, unassigned_dir)
-
- print(f"Synced {len(all_issues)} open issues into '{output_dir}/'")
- print(f"- Initiatives: {len(initiatives)}")
- print(f"- Epics: {len(epics)}")
- print(f"- Stories/Tasks: {len(others)}")
- print("Tickets are organized in a hierarchy based on their relationships")
-
- return 0
-
-
-if __name__ == "__main__":
- exit(main())
diff --git a/tests/test_ground_control.py b/tests/test_ground_control.py
deleted file mode 100644
index c4a7183e..00000000
--- a/tests/test_ground_control.py
+++ /dev/null
@@ -1,333 +0,0 @@
-import json
-import os
-from unittest.mock import Mock, patch
-
-import pytest
-
-from ground_control.main import (
- check_directory,
- cleanup_directory,
- create_ticket_directory,
- get_issue_relationships,
- get_type_prefix,
- main,
- parse_args,
- sanitize_filename,
-)
-
-
-def test_sanitize_filename():
- """Test filename sanitization."""
- # Test invalid characters
- assert sanitize_filename('test<>:"/\\|?*file') == "test_________file"
-
- # Test leading/trailing spaces and dots
- assert sanitize_filename(" test.file. ") == "test.file"
-
- # Test mixed case
- assert sanitize_filename("TEST file") == "TEST file"
-
- # Test empty string
- assert sanitize_filename("") == ""
-
-
-def test_get_type_prefix():
- """Test issue type prefix generation."""
- # Test various issue types
- assert get_type_prefix("Initiative") == "INI"
- assert get_type_prefix("Technical Initiative") == "INI"
- assert get_type_prefix("Epic") == "EPIC"
- assert get_type_prefix("Story") == "STORY"
- assert get_type_prefix("User Story") == "STORY"
- assert get_type_prefix("Task") == "TASK"
- assert get_type_prefix("Technical Task") == "TASK"
-
- # Test case insensitivity
- assert get_type_prefix("INITIATIVE") == "INI"
- assert get_type_prefix("epic") == "EPIC"
- assert get_type_prefix("Story") == "STORY"
-
- # Test unknown types default to TASK
- assert get_type_prefix("Unknown") == "TASK"
- assert get_type_prefix("Custom Type") == "TASK"
-
-
-def test_check_directory(tmp_path):
- """Test directory checking and creation."""
- # Test creating new directory
- new_dir = tmp_path / "new_dir"
- check_directory(str(new_dir))
- assert new_dir.exists()
- assert new_dir.is_dir()
-
- # Test empty existing directory
- empty_dir = tmp_path / "empty_dir"
- empty_dir.mkdir()
- check_directory(str(empty_dir)) # Should not raise
-
- # Test non-empty directory
- nonempty_dir = tmp_path / "nonempty_dir"
- nonempty_dir.mkdir()
- (nonempty_dir / "file.txt").write_text("content")
- with pytest.raises(ValueError) as exc:
- check_directory(str(nonempty_dir))
- assert "not empty" in str(exc.value)
-
- # Test file path
- file_path = tmp_path / "file.txt"
- file_path.write_text("content")
- with pytest.raises(ValueError) as exc:
- check_directory(str(file_path))
- assert "not a directory" in str(exc.value)
-
-
-def test_get_issue_relationships():
- """Test relationship detection with different issue types."""
- # Mock JIRA client
- mock_jira = Mock()
-
- # Mock issue with parent
- parent_issue = Mock()
- parent_issue.key = "SECOPS-100"
- parent_issue.fields.issuetype.name = "Epic"
-
- issue_with_parent = Mock()
- issue_with_parent.key = "SECOPS-101"
- issue_with_parent.fields.parent = parent_issue
- issue_with_parent.fields.customfield_10014 = None
-
- rels = get_issue_relationships(issue_with_parent, mock_jira)
- assert rels["parent"] == {"key": "SECOPS-100", "type": "Epic"}
-
- # Mock issue with epic link
- epic_issue = Mock()
- epic_issue.key = "SECOPS-200"
- epic_issue.fields.issuetype.name = "Epic"
- mock_jira.issue.return_value = epic_issue
-
- issue_with_epic = Mock()
- issue_with_epic.key = "SECOPS-201"
- issue_with_epic.fields.parent = None
- issue_with_epic.fields.customfield_10014 = "SECOPS-200"
-
- rels = get_issue_relationships(issue_with_epic, mock_jira)
- assert rels["parent"] == {"key": "SECOPS-200", "type": "Epic"}
-
- # Mock issue with no relationships
- issue_no_parent = Mock()
- issue_no_parent.key = "SECOPS-300"
- issue_no_parent.fields.parent = None
- issue_no_parent.fields.customfield_10014 = None
-
- rels = get_issue_relationships(issue_no_parent, mock_jira)
- assert rels["parent"] is None
-
-
-def test_create_ticket_directory(tmp_path):
- """Test ticket directory creation and content."""
- # Mock JIRA issue
- issue = Mock()
- issue.key = "SECOPS-123"
- issue.id = "10000"
- issue.fields.issuetype.name = "Story"
- issue.fields.status.name = "In Progress"
- issue.fields.summary = "Test ticket"
- issue.fields.reporter = "John Doe"
- issue.fields.assignee = "Jane Smith"
- issue.fields.updated = "2024-01-22T12:34:56"
- issue.fields.description = "Test description"
-
- # Mock JIRA client
- jira = Mock()
- jira.comments.return_value = [
- Mock(
- author=Mock(displayName="Commenter"), updated="2024-01-22T13:00:00", body="Test comment"
- )
- ]
-
- # Create ticket directory
- with patch("ground_control.main.get_issue_relationships") as mock_get_rels:
- mock_get_rels.return_value = {"parent": None, "children": []}
- dir_path = create_ticket_directory(issue, jira, tmp_path)
-
- # Check directory structure
- assert os.path.exists(dir_path)
- assert os.path.exists(os.path.join(dir_path, "metadata.json"))
- assert os.path.exists(os.path.join(dir_path, "ticket.md"))
-
- # Check metadata content
- with open(os.path.join(dir_path, "metadata.json")) as f:
- metadata = json.load(f)
- assert metadata["key"] == "SECOPS-123"
- assert metadata["type"] == "Story"
- assert metadata["status"] == "In Progress"
-
- # Check markdown content
- with open(os.path.join(dir_path, "ticket.md")) as f:
- content = f.read()
- assert "# SECOPS-123: Test ticket" in content
- assert "Test description" in content
- assert "Test comment" in content
-
-
-def test_parse_args():
- """Test command line argument parsing."""
- # Test default arguments
- with patch("sys.argv", ["ground-control"]):
- args = parse_args()
- assert args.output == "tickets"
- assert args.ticket is None
- assert not args.recursive
-
- # Test output directory
- with patch("sys.argv", ["ground-control", "-o", "custom_dir"]):
- args = parse_args()
- assert args.output == "custom_dir"
-
- # Test specific ticket
- with patch("sys.argv", ["ground-control", "SECOPS-123"]):
- args = parse_args()
- assert args.ticket == "SECOPS-123"
-
- # Test recursive flag
- with patch("sys.argv", ["ground-control", "SECOPS-123", "-r"]):
- args = parse_args()
- assert args.ticket == "SECOPS-123"
- assert args.recursive
-
-
-def test_cleanup_directory(tmp_path):
- """Test directory cleanup functionality."""
- # Create test directory structure
- test_dir = tmp_path / "test_cleanup"
- test_dir.mkdir()
-
- # Create some files
- (test_dir / "file1.txt").write_text("content1")
- (test_dir / "file2.txt").write_text("content2")
-
- # Create a subdirectory with files
- subdir = test_dir / "subdir"
- subdir.mkdir()
- (subdir / "file3.txt").write_text("content3")
-
- # Run cleanup
- cleanup_directory(str(test_dir))
-
- # Verify directory is empty
- assert os.path.exists(test_dir)
- assert len(os.listdir(test_dir)) == 0
-
-
-def test_create_ticket_directory_long_summary(tmp_path):
- """Test ticket directory creation with long summary."""
- # Mock JIRA issue with long summary
- issue = Mock()
- issue.key = "SECOPS-123"
- issue.id = "10000"
- issue.fields.issuetype.name = "Story"
- issue.fields.status.name = "In Progress"
- issue.fields.summary = (
- "This is a very long summary that should be truncated because it exceeds fifty characters"
- )
- issue.fields.reporter = "John Doe"
- issue.fields.assignee = None # Test None assignee
- issue.fields.updated = "2024-01-22T12:34:56"
- issue.fields.description = "Test description"
-
- # Mock JIRA client
- jira = Mock()
- jira.comments.return_value = [] # Test no comments
-
- # Create ticket directory
- with patch("ground_control.main.get_issue_relationships") as mock_get_rels:
- mock_get_rels.return_value = {"parent": None, "children": []}
- dir_path = create_ticket_directory(issue, jira, tmp_path)
-
- # Verify directory name is truncated
- dir_name = os.path.basename(dir_path)
- assert len(dir_name) < 100 # Reasonable length
- assert dir_name.startswith("STORY-SECOPS-123-This is a very long summary that")
- assert dir_name.endswith("...")
-
-
-@patch("ground_control.main.JIRA")
-@patch("ground_control.main.USERNAME", "test@example.com")
-@patch("ground_control.main.API_TOKEN", "test-token")
-@patch("ground_control.main.JIRA_URL", "https://jira.example.com")
-@patch("ground_control.main.BOARD_ID", "SECOPS")
-def test_main_jira_connection(mock_jira_class, *_):
- """Test main function's JIRA connection and error handling."""
- # Mock environment variables
- env_vars = {
- "JIRA_URL": "https://jira.example.com",
- "JIRA_PROJECT": "SECOPS",
- "JIRA_USERNAME": "test@example.com",
- "JIRA_API_TOKEN": "test-token",
- }
-
- with patch.dict(os.environ, env_vars, clear=True), patch(
- "sys.argv", ["ground-control", "-o", "test_output"]
- ), patch("ground_control.main.check_directory"), patch("ground_control.main.cleanup_directory"):
-
- # Mock JIRA client and search results
- mock_jira_instance = Mock()
- mock_jira_class.return_value = mock_jira_instance
-
- # Mock issues of different types
- initiative = Mock()
- initiative.fields = Mock()
- initiative.fields.issuetype = Mock()
- initiative.fields.issuetype.name = "Initiative"
- initiative.key = "SECOPS-1"
-
- epic = Mock()
- epic.fields = Mock()
- epic.fields.issuetype = Mock()
- epic.fields.issuetype.name = "Epic"
- epic.key = "SECOPS-2"
-
- story = Mock()
- story.fields = Mock()
- story.fields.issuetype = Mock()
- story.fields.issuetype.name = "Story"
- story.key = "SECOPS-3"
-
- # Set up mock search results
- mock_jira_instance.search_issues.return_value = [initiative, epic, story]
-
- # Run main
- with patch("ground_control.main.create_ticket_directory") as mock_create_dir:
- mock_create_dir.return_value = "test_dir"
- exit_code = main()
-
- # Verify JIRA client creation
- mock_jira_class.assert_called_once_with(
- server="https://jira.example.com", basic_auth=("test@example.com", "test-token")
- )
-
- # Verify issue search
- mock_jira_instance.search_issues.assert_called()
-
- # Verify exit code
- assert exit_code == 0
-
-
-@patch("ground_control.main.JIRA")
-def test_main_missing_credentials(mock_jira_class):
- """Test main function with missing credentials."""
- # Mock environment variables with missing credentials
- env_vars = {
- "JIRA_URL": "https://jira.example.com",
- "JIRA_PROJECT": "SECOPS",
- "JIRA_USERNAME": "", # Default value
- "JIRA_API_TOKEN": "", # Default value
- }
-
- with patch.dict(os.environ, env_vars), patch("sys.argv", ["ground-control"]):
- exit_code = main()
-
- # Should exit with error
- assert exit_code == 1
- # JIRA client should not be created
- mock_jira_class.assert_not_called()