diff --git a/.envrc b/.envrc index 2271d49..ca71928 100644 --- a/.envrc +++ b/.envrc @@ -1,2 +1,8 @@ -use nix export CODEX_HOME=$PWD/.codex + +if ! has nix_direnv_version || ! nix_direnv_version 3.1.0; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.1.0/direnvrc" \ + "sha256-yMJ2OVMzrFaDPn7q8nCBZFRYpL/f0RcHzhmw/i6btJM=" +fi + +use flake diff --git a/AGENTS.md b/AGENTS.md index cec272c..65ad009 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,10 +1,11 @@ # core-ops Development Guidelines -Auto-generated from all feature plans. Last updated: 2026-03-19 +Auto-generated from all feature plans. Last updated: 2026-03-20 ## Active Technologies - Rust (stable toolchain) + Git (CLI), systemd (systemctl), Quadlet generator, clap, thiserror, miette, journald logger (002-systemd-agent) - Files on disk (Quadlet unit files + optional reconciliation state) (002-systemd-agent) +- Files on disk (repository layout + evaluated desired state in memory) (003-layered-overrides) - Rust (stable toolchain) + Git (CLI), systemd (systemctl), Podman/Quadlet generator (001-gitops-quadlet-controller) @@ -24,8 +25,8 @@ cargo test [ONLY COMMANDS FOR ACTIVE TECHNOLOGIES][ONLY COMMANDS FOR ACTIVE TECH Rust (stable toolchain): Follow standard conventions ## Recent Changes +- 003-layered-overrides: Added Rust (stable toolchain) + Git (CLI), systemd (systemctl), Quadlet generator, clap, thiserror, miette, journald logger - 002-systemd-agent: Added Rust (stable toolchain) + Git (CLI), systemd (systemctl), Quadlet generator, clap, thiserror, miette, journald logger - - 001-gitops-quadlet-controller: Added Rust (stable toolchain) + Git (CLI), systemd (systemctl), Podman/Quadlet generator diff --git a/Cargo.lock b/Cargo.lock index 663ea6a..8fdd792 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -160,8 +160,11 @@ name = "core-ops" version = "0.1.0" dependencies = [ "clap", + "libc", "log", "miette", + "serde", + "serde_yaml", "systemd-journal-logger", "tempfile", "thiserror", @@ -420,6 +423,12 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "ryu" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" + [[package]] name = "semver" version = "1.0.27" @@ -433,6 +442,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ "serde_core", + "serde_derive", ] [[package]] @@ -468,6 +478,19 @@ dependencies = [ "zmij", ] +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + [[package]] name = "strsim" version = "0.11.1" @@ -599,6 +622,12 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + [[package]] name = "utf8parse" version = "0.2.2" diff --git a/Cargo.toml b/Cargo.toml index 3fa151a..72017d6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,8 +5,11 @@ edition = "2021" [dependencies] clap = { version = "4.5.4", features = ["derive"] } +libc = "0.2.153" log = { version = "0.4", features = ["kv"] } miette = { version = "7.2.0", features = ["fancy"] } +serde = { version = "1.0", features = ["derive"] } +serde_yaml = "0.9.34" systemd-journal-logger = "2.2.2" tempfile = "3.10.1" thiserror = "1.0.58" diff --git a/docs/development.md b/docs/development.md index 9f9f454..9f77eb4 100644 --- a/docs/development.md +++ b/docs/development.md @@ -47,3 +47,18 @@ Timer enablement example: ``` systemctl enable --now core-ops.timer ``` + +## Layered Overrides Development + +Use the layered overrides fixture in `tests/fixtures/layered_overrides/` for +local testing. The repository layout should include: + +- `services//` for base artifacts and base drop-ins +- `hosts//host.yaml` with explicit service selection +- `hosts//overrides/` for host-specific drop-ins + +Override host selection during development with: + +``` +CORE_OPS_HOST= core-ops plan --repo --rev +``` diff --git a/flake.lock b/flake.lock new file mode 100644 index 0000000..16e5c1c --- /dev/null +++ b/flake.lock @@ -0,0 +1,63 @@ +{ + "nodes": { + "codex": { + "inputs": { + "nixpkgs": "nixpkgs" + }, + "locked": { + "lastModified": 1769115457, + "narHash": "sha256-VFbtxGOqX80qWqVo+BG+BnUr8DiLCfcJCrN9fwy7utY=", + "owner": "openai", + "repo": "codex", + "rev": "605d43719efa7f62160904d2dcecc46a64bcd32e", + "type": "github" + }, + "original": { + "owner": "openai", + "ref": "rust-v0.89.0", + "repo": "codex", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1758427187, + "narHash": "sha256-pHpxZ/IyCwoTQPtFIAG2QaxuSm8jWzrzBGjwQZIttJc=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "554be6495561ff07b6c724047bdd7e0716aa7b46", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1773821835, + "narHash": "sha256-TJ3lSQtW0E2JrznGVm8hOQGVpXjJyXY2guAxku2O9A4=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "b40629efe5d6ec48dd1efba650c797ddbd39ace0", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "codex": "codex", + "nixpkgs": "nixpkgs_2" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000..59131c6 --- /dev/null +++ b/flake.nix @@ -0,0 +1,35 @@ +{ + description = "Project dev shell with recent Codex"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; + codex.url = "github:openai/codex/rust-v0.89.0"; + }; + + outputs = { self, nixpkgs, codex, ... }: + let + system = "x86_64-linux"; # change as needed + pkgs = import nixpkgs { inherit system; }; + in { + devShells.${system}.default = pkgs.mkShell { + packages = with pkgs; [ + codex.packages.${system}.default + yaml-language-server + cargo + rustc + rust-analyzer + rustfmt + clippy + pkg-config + git + uv + python314 + podman + systemd + jq + ]; + + RUST_SRC_PATH = "${pkgs.rustPlatform.rustLibSrc}"; + }; + }; +} diff --git a/shell.nix b/shell.nix deleted file mode 100644 index 0b2326d..0000000 --- a/shell.nix +++ /dev/null @@ -1,27 +0,0 @@ -{ pkgs ? import {} }: - -pkgs.mkShell { - strictDeps = true; - - nativeBuildInputs = with pkgs; [ - cargo - rustc - rust-analyzer - rustfmt - clippy - pkg-config - git - codex - uv - python314 - ]; - - buildInputs = with pkgs; [ - podman - systemd - jq - ]; - - # Helpful for editors / rust-analyzer in Nix shells - RUST_SRC_PATH = "${pkgs.rustPlatform.rustLibSrc}"; -} diff --git a/specs/003-layered-overrides/checklists/requirements.md b/specs/003-layered-overrides/checklists/requirements.md new file mode 100644 index 0000000..a21cf2f --- /dev/null +++ b/specs/003-layered-overrides/checklists/requirements.md @@ -0,0 +1,34 @@ +# Specification Quality Checklist: Layered Overrides for Reusable Desired State + +**Purpose**: Validate specification completeness and quality before proceeding to planning +**Created**: 2026-03-20 +**Feature**: /home/outergod/code/github.com/outergod/core-ops/specs/003-layered-overrides/spec.md + +## Content Quality + +- [x] No implementation details (languages, frameworks, APIs) +- [x] Focused on user value and business needs +- [x] Written for non-technical stakeholders +- [x] All mandatory sections completed + +## Requirement Completeness + +- [x] No [NEEDS CLARIFICATION] markers remain +- [x] Requirements are testable and unambiguous +- [x] Success criteria are measurable +- [x] Success criteria are technology-agnostic (no implementation details) +- [x] All acceptance scenarios are defined +- [x] Edge cases are identified +- [x] Scope is clearly bounded +- [x] Dependencies and assumptions identified + +## Feature Readiness + +- [x] All functional requirements have clear acceptance criteria +- [x] User scenarios cover primary flows +- [x] Feature meets measurable outcomes defined in Success Criteria +- [x] No implementation details leak into specification + +## Notes + +- All checklist items passed on 2026-03-20 after adding host-level service selection. diff --git a/specs/003-layered-overrides/contracts/repo-layout.md b/specs/003-layered-overrides/contracts/repo-layout.md new file mode 100644 index 0000000..f295598 --- /dev/null +++ b/specs/003-layered-overrides/contracts/repo-layout.md @@ -0,0 +1,42 @@ +# Repository Layout Contract + +## Purpose +Define the repository structure and host selection inputs used for layered overrides. + +## Required Structure + +- `services//` contains base artifacts and base drop-ins. +- `hosts//host.yaml` declares host identity and explicit service selection. +- `hosts//overrides/` contains host-specific drop-ins layered after base drop-ins. + +## Host Declaration Schema + +`hosts//host.yaml`: + +```yaml +host: +services: + - + - +``` + +### Rules + +- `host` must match the `` directory name. +- `services` must reference existing `services//` directories. +- No groups/roles are supported; selection is an explicit list. + +## Drop-in Rules + +- Quadlet drop-ins: `artifact.container.d/*.conf`, `artifact.volume.d/*.conf`. +- Systemd socket drop-ins: `artifact.socket.d/*.conf`. +- Drop-ins are applied in lexicographic order by filename. +- Host overrides are applied after base drop-ins. +- For socket drop-ins, host override filenames must sort after base filenames + for the same target, or evaluation fails. + +## Validation Failures + +- Undefined service selection → evaluation fails. +- Drop-in targets nonexistent artifact → evaluation fails. +- Unsupported file types/extensions → evaluation fails. diff --git a/specs/003-layered-overrides/data-model.md b/specs/003-layered-overrides/data-model.md new file mode 100644 index 0000000..d7b6be2 --- /dev/null +++ b/specs/003-layered-overrides/data-model.md @@ -0,0 +1,44 @@ +# Data Model: Layered Overrides for Reusable Desired State + +## Entities + +### Service Catalog +- **Purpose**: Shared base definitions under `services/`. +- **Fields**: + - `service_name` (string) + - `artifacts` (list of Quadlet/systemd unit files) + - `base_dropins` (list of drop-in files by artifact) + +### Host Declaration +- **Purpose**: Per-host selection and identity in `hosts//host.yaml`. +- **Fields**: + - `host` (string) + - `services` (list of service names) + +### Host Overlay Set +- **Purpose**: Host-specific drop-ins under `hosts//overrides/`. +- **Fields**: + - `host` (string) + - `overrides` (list of drop-in files by artifact) + +### Evaluated Artifact +- **Purpose**: Concrete artifact after base + drop-in evaluation. +- **Fields**: + - `artifact_name` (string) + - `artifact_type` (container, volume, socket) + - `contents` (string) + - `source_layers` (list of files applied in order) + +## Relationships + +- A **Service Catalog** contains many base artifacts and base drop-ins. +- A **Host Declaration** selects services from the Service Catalog. +- A **Host Overlay Set** provides drop-ins applied after base drop-ins. +- **Evaluated Artifacts** are produced by applying base artifacts + base drop-ins + host overlays. + +## Validation Rules + +- Host-selected services must exist in the Service Catalog. +- Drop-ins must target an existing base artifact. +- Drop-ins are applied in lexicographic order; host overlays are applied after base drop-ins. +- Evaluation must be deterministic and side-effect free. diff --git a/specs/003-layered-overrides/plan.md b/specs/003-layered-overrides/plan.md new file mode 100644 index 0000000..0755d73 --- /dev/null +++ b/specs/003-layered-overrides/plan.md @@ -0,0 +1,84 @@ +# Implementation Plan: Layered Overrides for Reusable Desired State + +**Branch**: `[003-layered-overrides]` | **Date**: 2026-03-20 | **Spec**: specs/003-layered-overrides/spec.md +**Input**: Feature specification from `/specs/003-layered-overrides/spec.md` + +**Note**: This template is filled in by the `/speckit.plan` command. See `.specify/templates/plan-template.md` for the execution workflow. + +## Summary + +Add a deterministic evaluation phase that composes shared base artifacts, +host-selected services, host-specific drop-ins, and bounded config payloads, +producing a concrete desired state before diff/plan/apply. Reuse is achieved +via native Quadlet and systemd drop-in semantics plus whole-file config layering, +without any templating language, and with explicit failures for invalid overlays +or undefined services. Managed config roots are authoritative for selected +services, and reconciliation removes stale config files that are no longer in +desired state by scanning those roots (no external index state). + +## Technical Context + +**Language/Version**: Rust (stable toolchain) +**Primary Dependencies**: Git (CLI), systemd (systemctl), Quadlet generator, clap, thiserror, miette, journald logger, serde, serde_yaml, libc +**Storage**: Files on disk (repository layout + bounded config payloads + evaluated desired state in memory) +**Testing**: cargo test (unit + integration) +**Target Platform**: Fedora CoreOS (single host) +**Project Type**: CLI + systemd service/timer agent +**Performance Goals**: Evaluation overhead <= 1s per 50 artifacts (per SC-004) +**Constraints**: No templating language; no semantic config merging; deterministic evaluation; explicit failure diagnostics; native Quadlet/systemd drop-ins only; managed config roots are closed-world for selected services and observed by scanning the host filesystem (no sidecar state) +**Scale/Scope**: Single host, shared base definitions reused across multiple hosts + +## Constitution Check + +*GATE: Must pass before Phase 0 research. Re-check after Phase 1 design.* + +- Functional core and imperative shell boundaries are explicit; side effects are isolated. +- Desired/observed state, reconciliation plans, and outcomes are represented as data. +- Abstractions are minimal and justified; complexity tracking added if needed. +- Effects, assumptions, and failure modes are explicit in interfaces and returns. +- Idempotence and convergence strategy are defined, including retry behavior. +- Open standards and native interfaces are preferred; deviations justified. +- Observability plan covers diffs, plans, actions, failures, and dry-run/audit needs. +- Safe defaults are documented; destructive actions require explicit intent. +- Compatibility impact is assessed; breaking changes are documented with migration. +- Test strategy covers invariants, external behavior, convergence, and failures. +- Modules are structured to be regenerable from specs and tests. + +Status: PASS (pre-design). Post-design re-check: PASS. + +## Project Structure + +### Documentation (this feature) + +```text +specs/003-layered-overrides/ +├── plan.md # This file (/speckit.plan command output) +├── research.md # Phase 0 output (/speckit.plan command) +├── data-model.md # Phase 1 output (/speckit.plan command) +├── quickstart.md # Phase 1 output (/speckit.plan command) +├── contracts/ # Phase 1 output (/speckit.plan command) +└── tasks.md # Phase 2 output (/speckit.tasks command - NOT created by /speckit.plan) +``` + +### Source Code (repository root) + +```text +src/ +├── cli/ +├── core/ +└── io/ + +tests/ +├── integration/ +└── unit/ +``` + +**Structure Decision**: Single project with `core` (evaluation, planning, validation), `io` (repo/systemd/Quadlet interactions), and `cli` (entrypoints/reporting). No additional services are introduced. + +## Complexity Tracking + +> **Fill ONLY if Constitution Check has violations that must be justified** + +| Violation | Why Needed | Simpler Alternative Rejected Because | +|-----------|------------|-------------------------------------| +| | | | diff --git a/specs/003-layered-overrides/quickstart.md b/specs/003-layered-overrides/quickstart.md new file mode 100644 index 0000000..8974c9d --- /dev/null +++ b/specs/003-layered-overrides/quickstart.md @@ -0,0 +1,39 @@ +# Quickstart: Layered Overrides for Reusable Desired State + +**Goal**: Reuse shared service definitions across hosts using native drop-ins and +host selection, without templating. + +## Repository Layout + +- Place shared services under `services/`. +- For each host, create `hosts//host.yaml` with a service list. +- Put host-specific drop-ins under `hosts//overrides/`. + +Example `hosts/kadath/host.yaml`: + +```yaml +host: kadath +services: + - traefik + - immich +``` + +## Host Identity Selection + +- Default: OS hostname. +- Override: supply a CLI/env host override when running the controller (e.g., `CORE_OPS_HOST=ulthar` or `--host ulthar`). + +## Evaluation Flow + +1. Load base artifacts from `services//` for selected services. +2. Apply base drop-ins in lexicographic order. +3. Apply host overrides in lexicographic order after base drop-ins. + For socket drop-ins, host filenames must sort after base filenames + (e.g., `90-host.conf`). +4. Produce a concrete desired state and proceed with normal diff/plan/apply. + +## Validation Rules + +- Undefined services in `host.yaml` fail evaluation. +- Drop-ins targeting missing artifacts fail evaluation. +- Unsupported file types/extensions fail evaluation. diff --git a/specs/003-layered-overrides/research.md b/specs/003-layered-overrides/research.md new file mode 100644 index 0000000..d7b118c --- /dev/null +++ b/specs/003-layered-overrides/research.md @@ -0,0 +1,25 @@ +# Research: Layered Overrides for Reusable Desired State + +## Decision: Host identity selection + +- **Decision**: Determine host identity from OS hostname by default, with an explicit CLI/env override. +- **Rationale**: Avoids circular dependency on repo contents and keeps evaluation deterministic. +- **Alternatives considered**: Host identity derived from host.yaml only (circular), required explicit override only (adds operator burden). + +## Decision: Repository layout + +- **Decision**: Use `services/` for shared base artifacts and `hosts//overrides/` for host-specific drop-ins; `hosts//host.yaml` declares service selection. +- **Rationale**: Mirrors native drop-in semantics and keeps base artifacts reusable. +- **Alternatives considered**: Single flat quadlets directory (no reuse), custom templating directories (violates constraints). + +## Decision: Drop-in ordering + +- **Decision**: Apply native lexicographic ordering for drop-ins, with host overrides layered after base drop-ins. +- **Rationale**: Matches systemd/Quadlet expectations and ensures deterministic evaluation. +- **Alternatives considered**: Fixed override order without filename ordering (non-native), single override file (too restrictive). + +## Decision: Validation rules + +- **Decision**: Fail evaluation when a host selects undefined services or overlays target nonexistent artifacts. +- **Rationale**: Prevents silent drift and keeps failures explicit. +- **Alternatives considered**: Ignore missing services (risk of hidden misconfigurations). diff --git a/specs/003-layered-overrides/spec.md b/specs/003-layered-overrides/spec.md new file mode 100644 index 0000000..5c06570 --- /dev/null +++ b/specs/003-layered-overrides/spec.md @@ -0,0 +1,297 @@ +# Feature Specification: Layered Overrides for Reusable Desired State + +**Feature Branch**: `[003-layered-overrides]` +**Created**: 2026-03-20 +**Status**: Draft +**Input**: User description: "Specify the next iteration of the Fedora CoreOS Quadlet GitOps controller around reusable desired state through native layered overrides rather than a custom templating language. Context: The current iteration already supports unattended systemd operation, journald audit integration, secure remote Git access, and reconciliation of container, volume, and socket artifacts on a single host. The next iteration should make desired state reusable across hosts while staying close to native Quadlet and systemd mechanisms. Goals: - allow shared service definitions to be reused across multiple hosts - support host-specific customization without introducing a general templating language - preserve native system semantics, deterministic evaluation, idempotence, and observability Core design direction: - reuse should be achieved through layered overrides - Quadlet-managed artifacts should support Quadlet-native drop-ins such as artifact.container.d/*.conf - native systemd artifacts should support systemd drop-ins such as artifact.socket.d/*.conf - the controller should evaluate shared base artifacts plus host-specific drop-ins into concrete desired state before diff/plan/apply - avoid custom placeholder syntax and avoid `${...}`-style substitution because it may collide with native systemd semantics Requirements: - define a repository model for shared artifacts and per-host overlays/drop-ins - define how the controller determines host identity for selecting host-specific overlays - define merge/evaluation semantics for base artifacts and drop-ins - distinguish clearly between Quadlet-native drop-ins and native systemd drop-ins - ensure the evaluation phase is deterministic, side-effect free, and testable - preserve the existing reconciliation model after evaluation produces concrete desired state - support common real-world reuse scenarios such as shared Traefik definitions with slight host-level variations Constraints: - do not introduce a general templating language - do not introduce arbitrary expression evaluation, loops, or conditionals - do not reuse `${...}` as a controller-specific parameter syntax - preserve functional core / imperative shell architecture - preserve explicit failure behavior and clear operator diagnostics - remain within the project’s existing scope of native host primitives rather than becoming a generic configuration management system Non-goals: - fleet coordination or cross-host orchestration - secret distribution systems - generic host file content management beyond the existing supported artifact model - arbitrary environment-file management as a first-class abstraction - full template engines or Helm-like rendering The specification should define: - the user/operator problem this iteration solves - the repository structure and desired-state model - evaluation/merge semantics - validation and failure rules for conflicting or invalid overlays - acceptance criteria for shared base definitions with host-specific overrides - risks and open questions for implementation" + +## Clarifications + +### Session 2026-03-20 +- Q: How should host service selection be expressed in host.yaml? → A: Explicit service list only (no groups/roles). +- Q: What ordering should be used when applying drop-ins? → A: Use native lexicographic drop-in ordering; host overrides apply after base drop-ins. +- Q: How should evaluation handle a host selecting a service that is not defined? → A: Fail evaluation with an explicit error. +- Q: How should the controller determine host identity for selecting hosts/? → A: Hostname default + explicit CLI/env override. + + +## User Scenarios & Testing *(mandatory)* + +### User Story 1 - Reuse shared base definitions across hosts (Priority: P1) + +As an operator, I want to define shared base artifacts once and reuse them across +multiple hosts while selecting which shared services each host should include so +that consistent services can be deployed without duplication or overreach. + +**Why this priority**: Reuse is the central value of this iteration and enables +consistent operations across hosts without a templating language. + +**Independent Test**: Apply the same repository to two hosts with different +service selections and verify each host converges only to its selected shared +services, with no host-specific overlays present. + +**Acceptance Scenarios**: + +1. **Given** a repository with base artifacts and service selections, **When** a + host evaluates desired state, **Then** the evaluation output contains only + the base artifacts selected for that host. +2. **Given** the same base repository, **When** two hosts evaluate desired + state with different selections, **Then** each produces a concrete desired + state limited to its selected services. + +--- + +### User Story 2 - Apply host-specific drop-ins without templating (Priority: P2) + +As an operator, I want host-specific drop-ins to override shared base artifacts +and config payloads without using a templating language so that each host can +adjust behavior using native Quadlet/systemd mechanisms and bounded host paths. + +**Why this priority**: Host-level customization is the key practical need for +reusable desired state while preserving native semantics. + +**Independent Test**: Add a host-specific drop-in and a config override and +verify only that host’s concrete desired state and config materialization change, +while other hosts remain unchanged. + +**Acceptance Scenarios**: + +1. **Given** base artifacts plus a host-specific drop-in directory, **When** the + host evaluates desired state after selecting services, **Then** the drop-in + is applied only to selected base artifacts in the resulting concrete desired + state. +2. **Given** multiple hosts with different overlays, **When** each host evaluates + desired state, **Then** each host’s concrete desired state includes only its + own overlays. +3. **Given** shared config payloads plus a host-specific config override, + **When** evaluation runs, **Then** the config materialized for that host + reflects base payloads with host overrides layered by replacement or + directory overlay rules. + +--- + +### User Story 3 - Deterministic, testable evaluation (Priority: P3) + +As an operator, I want evaluation to be deterministic and side-effect free so +that results are testable, repeatable, and safe to run before apply. + +**Why this priority**: Deterministic evaluation preserves GitOps trust and makes +reconciliation outcomes predictable. + +**Independent Test**: Run evaluation twice for the same host and verify identical +outputs and stable diagnostics. + +**Acceptance Scenarios**: + +1. **Given** unchanged base and overlay inputs, **When** evaluation runs twice, + **Then** the resulting desired state is byte-for-byte identical. +2. **Given** an invalid overlay, **When** evaluation runs, **Then** it fails + explicitly with a clear diagnostic before planning or applying. + +### Edge Cases + +- Two overlays define conflicting keys for the same artifact. +- Host identity cannot be determined. +- Drop-in directory exists but contains no valid files. +- A drop-in targets a base artifact that does not exist. +- A host selects a service that is not defined in the base repository. +- Overlays introduce unsupported file types or extensions. +- A managed config path is renamed or removed between revisions. +- Apply fails after writing some config files but before completing all deletes. + +## Requirements *(mandatory)* + +### Functional Requirements + +- **FR-001**: The system MUST support reusable base artifacts that can be shared + across multiple hosts without duplication. +- **FR-002**: The system MUST support host-specific overlays using native drop-in + formats for Quadlet artifacts (`artifact.container.d/*.conf`, `artifact.volume.d/*.conf`) + and native systemd drop-ins for socket units (`artifact.socket.d/*.conf`). +- **FR-003**: The system MUST define a repository model that distinguishes base + artifacts from host-specific overlays and drop-ins. +- **FR-004**: The system MUST determine host identity using the OS hostname by + default, with an explicit CLI/env override for selecting hosts/. +- **FR-005**: The repository model MUST include a host-level service selection + mechanism where host.yaml declares an explicit service list (no groups/roles). +- **FR-006**: The system MUST evaluate base artifacts plus applicable overlays + into a concrete desired state before diff/plan/apply, applying native drop-in + ordering (lexicographic) with host overrides layered after base drop-ins. +- **FR-006a**: For socket drop-ins, host override filenames MUST sort after base + drop-ins for the same target; otherwise evaluation MUST fail with a clear + validation error. +- **FR-007**: Evaluation MUST be deterministic, side-effect free, and testable. +- **FR-008**: The system MUST reject overlays that target nonexistent base + artifacts, invalid file types, or host selections that reference undefined + services, with explicit diagnostics. +- **FR-009**: The system MUST preserve the existing reconciliation model once + evaluation produces concrete desired state. +- **FR-010**: The system MUST NOT introduce a templating language or custom + placeholder substitution. +- **FR-011**: The system MUST clearly distinguish Quadlet drop-ins from native + systemd drop-ins in validation and evaluation rules. +- **FR-012**: Selected services MAY include managed config files and directories + that are materialized to bounded host paths (e.g., `/etc/`) and then + mounted or referenced by Quadlets. +- **FR-013**: Config payloads MUST support shared base definitions plus + host-specific overrides using whole-file replacement and directory layering. +- **FR-014**: The system MUST NOT implement semantic merging of TOML/YAML/JSON + config formats and MUST preserve explicit boundaries to avoid becoming a + general configuration management system. +- **FR-015**: Managed config roots for selected services (e.g., `/etc//`) + are authoritative, closed-world directories for core-ops and MUST NOT contain + unmanaged files. +- **FR-016**: During reconciliation, any file within a managed config root that + is not present in the concrete desired state MUST be removed. +- **FR-017**: Deletions of managed config files MUST be explicit in plan/apply + output and constrained to the managed config roots for selected services. +- **FR-018**: Partial apply or failed reconciliation MUST leave the system in a + recoverable state; a subsequent apply MUST converge by re-applying desired + config files and re-attempting deletions. + +### Key Entities *(include if feature involves data)* + +- **Repository Model**: The structure that separates shared base artifacts and + per-host overlays. +- **Host Identity**: The identifier used to select host-specific overlays. +- **Host Declaration**: The `hosts//host.yaml` file that declares the + host identity and selected services. +- **Base Artifact**: A shared Quadlet or systemd unit definition. +- **Drop-in Overlay**: A host-specific override applied via native drop-in + mechanisms. +- **Service Selection**: A host-scoped declaration of which shared services are + included in desired state. +- **Evaluated Desired State**: The concrete output after base + drop-in + evaluation, passed to planning and apply. +- **Service Catalog**: The repository `services/` directory containing reusable + base artifacts and their native drop-ins. +- **Host Overlay Set**: A `hosts//overrides/` tree containing host-specific + drop-ins layered after service selection. +- **Config Payload**: Managed files/directories for a service that are + materialized to bounded host paths and referenced by Quadlets. +- **Config Overlay**: Host-specific config files/directories that replace or + layer on top of base config payloads. + +## Repository Structure Example + +```text +repo/ +├── services/ +│ ├── traefik/ +│ │ ├── quadlet/ +│ │ │ ├── traefik.container +│ │ │ └── traefik.socket +│ │ ├── quadlet-overrides/ +│ │ │ └── traefik.container.d/ +│ │ │ └── 10-defaults.conf +│ │ └── config/ +│ │ └── etc/ +│ │ └── traefik/ +│ │ ├── traefik.toml +│ │ └── dynamic/ +│ │ └── routers.toml +│ │ +│ ├── immich/ +│ │ ├── quadlet/ +│ │ │ ├── immich.container +│ │ │ └── immich.volume +│ │ └── quadlet-overrides/ +│ │ └── immich.container.d/ +│ │ └── 10-defaults.conf +│ │ +│ ├── vector/ +│ │ └── quadlet/ +│ │ └── vector.container +│ │ +│ └── whoami/ +│ └── quadlet/ +│ └── whoami.container +│ +├── hosts/ +│ ├── kadath/ +│ │ ├── host.yaml +│ │ └── overrides/ +│ │ ├── quadlet/ +│ │ │ └── traefik.container.d/ +│ │ │ └── 20-host.conf +│ │ └── config/ +│ │ └── etc/ +│ │ └── traefik/ +│ │ ├── traefik.toml +│ │ └── dynamic/ +│ │ └── local.toml +│ │ +│ ├── rlyeh/ +│ │ ├── host.yaml +│ │ └── overrides/ +│ │ └── quadlet/ +│ │ └── vector.container.d/ +│ │ └── 20-host.conf +│ │ +│ └── ulthar/ +│ ├── host.yaml +│ └── overrides/ +│ └── quadlet/ +│ └── whoami.container.d/ +│ └── 20-host.conf +│ +└── README.md +``` + +Example `hosts/kadath/host.yaml`: + +```yaml +host: kadath +services: + - traefik + - immich +``` + +## Constitution Alignment *(mandatory)* + +- **Functional core vs. side effects**: Evaluation is pure and deterministic; + filesystem and systemd interactions remain in boundary layers. +- **Declarative state model**: Base, overlay, and evaluated desired state are + explicit data structures. +- **Idempotence & convergence**: Repeated evaluation and apply produce stable + outputs with no unintended changes. +- **Explicit effects/failures**: Overlay conflicts and invalid inputs fail with + clear diagnostics. +- **Observability**: Evaluation output and overlay application are reported in + plan/audit data. +- **Safe defaults**: No overlay is applied unless explicitly scoped to the host. +- **Compatibility**: Existing single-host reconciliation remains unchanged after + evaluation produces concrete state. +- **Test contract**: Tests cover overlay evaluation, conflict handling, and + deterministic output. +- **Regenerability**: Specs and tests define evaluation semantics explicitly. + +## Success Criteria *(mandatory)* + +### Measurable Outcomes + +- **SC-001**: 100% of evaluation runs on unchanged inputs produce identical + concrete desired state. +- **SC-002**: 100% of invalid overlay inputs fail during evaluation with explicit + diagnostics before planning. +- **SC-003**: Operators can apply a shared base plus host-specific service + selection and overlays to at least three hosts without duplicating base + artifacts. +- **SC-004**: Evaluation adds no more than 1 second of overhead per 50 artifacts + on a single host. + +## Assumptions + +- Host identity can be determined from a stable, operator-configurable source. +- Service selection can be declared in the repository per host. +- Operators will keep base artifacts and overlays in a single repository. + +## Risks & Open Questions + +- Determining a host identity source that is stable across reboots and + re-provisioning may require explicit operator configuration. +- Overlay conflicts may be common in real-world repos; diagnostics must remain + clear and actionable. +- Native drop-in semantics differ between Quadlet and systemd; evaluation rules + must avoid surprising merges. +- Managed config cleanup for deselected services requires a three-way state + model and is deferred to a later iteration. diff --git a/specs/003-layered-overrides/tasks.md b/specs/003-layered-overrides/tasks.md new file mode 100644 index 0000000..8b17b49 --- /dev/null +++ b/specs/003-layered-overrides/tasks.md @@ -0,0 +1,171 @@ +--- + +description: "Task list for layered overrides feature" +--- + +# Tasks: Layered Overrides for Reusable Desired State + +**Input**: Design documents from `/specs/003-layered-overrides/` +**Prerequisites**: plan.md (required), spec.md (required for user stories), research.md, data-model.md, contracts/ + +**Tests**: Tests are REQUIRED for this feature. + +**Organization**: Tasks are grouped by user story to enable independent implementation and testing of each story. + +## Format: `[ID] [P?] [Story] Description` + +- **[P]**: Can run in parallel (different files, no dependencies) +- **[Story]**: Which user story this task belongs to (e.g., US1, US2, US3) +- Include exact file paths in descriptions + +## Phase 1: Setup (Shared Infrastructure) + +**Purpose**: Project initialization and test fixture scaffolding + +- [X] T001 Create layered overrides fixture repo in `tests/fixtures/layered_overrides/README.md` +- [X] T002 [P] Add fixture service/host files under `tests/fixtures/layered_overrides/services/` and `tests/fixtures/layered_overrides/hosts/` + +--- + +## Phase 2: Foundational (Blocking Prerequisites) + +**Purpose**: Core data structures, repo loading, and evaluation pipeline primitives + +**⚠️ CRITICAL**: No user story work can begin until this phase is complete + +- [X] T003 Define layered override data structures in `src/core/types.rs` +- [X] T004 Implement host identity selection config (CLI/env) in `src/cli/args.rs` +- [X] T005 Wire host identity override into runtime config in `src/main.rs` +- [X] T006 Implement repository loader for `services/` and `hosts//host.yaml` in `src/io/repo.rs` +- [X] T007 Implement base/overlay drop-in discovery helpers in `src/io/repo.rs` +- [X] T008 Add validation rules for service selection + drop-in targets in `src/core/validation.rs` +- [X] T009 Add evaluation pipeline module scaffold in `src/core/evaluate.rs` +- [X] T010 Export evaluation module from `src/core/mod.rs` + +**Checkpoint**: Foundation ready - user story implementation can now begin + +--- + +## Phase 3: User Story 1 - Reuse shared base definitions across hosts (Priority: P1) 🎯 MVP + +**Goal**: Host-level service selection and base artifacts evaluation without overlays + +**Independent Test**: Two hosts with different service selections produce different concrete desired states limited to their selections. + +### Tests for User Story 1 ⚠️ + +- [X] T011 [P] [US1] Unit test for host selection parsing in `tests/unit/test_repo_selection.rs` +- [X] T012 [P] [US1] Integration test for host-specific service selection in `tests/integration/test_service_selection.rs` + +### Implementation for User Story 1 + +- [X] T013 [US1] Load host declaration and selected services in `src/io/repo.rs` +- [X] T014 [US1] Build service catalog from `services/` in `src/io/repo.rs` +- [X] T015 [US1] Produce evaluated desired state from selected base artifacts in `src/core/evaluate.rs` +- [X] T016 [US1] Integrate evaluation output into planner inputs in `src/core/planner.rs` +- [X] T017 [US1] Update reconcile flow to use evaluated desired state in `src/core/reconcile.rs` + +**Checkpoint**: User Story 1 functional and testable + +--- + +## Phase 4: User Story 2 - Apply host-specific drop-ins without templating (Priority: P2) + +**Goal**: Apply host-specific drop-ins after base drop-ins using native ordering rules + +**Independent Test**: A host-specific drop-in changes only the target host’s concrete desired state. + +### Tests for User Story 2 ⚠️ + +- [X] T018 [P] [US2] Unit test for drop-in ordering/precedence in `tests/unit/test_dropin_order.rs` +- [X] T019 [P] [US2] Integration test for host overlays application in `tests/integration/test_host_overrides.rs` +- [X] T041 [P] [US2] Integration test for stale managed config removal via root scanning in `tests/integration/test_config_cleanup.rs` +- [X] T042 [P] [US2] Integration test for managed config root closure behavior in `tests/integration/test_config_roots.rs` + +### Implementation for User Story 2 + +- [X] T020 [US2] Apply base drop-ins in lexicographic order in `src/core/evaluate.rs` +- [X] T021 [US2] Apply host overrides after base drop-ins in `src/core/evaluate.rs` +- [X] T022 [US2] Validate drop-in targets and file types in `src/core/validation.rs` +- [X] T023 [US2] Track applied source layers in evaluated artifacts in `src/core/types.rs` + + +- [X] T035 [US2] Define config payload model (base + host overrides) in `src/core/types.rs` +- [X] T036 [US2] Load config payloads from `services//config/` in `src/io/repo.rs` +- [X] T037 [US2] Load host config overrides from `hosts//overrides/config/` in `src/io/repo.rs` +- [X] T038 [US2] Apply config layering rules (whole-file replacement + directory overlay) in `src/core/evaluate.rs` +- [X] T039 [US2] Materialize config payloads to bounded host paths in `src/io/apply.rs` +- [X] T040 [US2] Validate bounded config roots and reject unsupported formats in `src/core/validation.rs` +- [X] T043 [US2] Add managed config roots to desired state in `src/core/types.rs` +- [X] T044 [US2] Read managed config roots when building observed state in `src/io/observed.rs` + +**Checkpoint**: User Story 2 functional and testable + +--- + +## Phase 5: User Story 3 - Deterministic, testable evaluation (Priority: P3) + +**Goal**: Deterministic evaluation with explicit failure behavior + +**Independent Test**: Repeated evaluation yields identical outputs; invalid inputs fail before planning. + +### Tests for User Story 3 ⚠️ + +- [X] T024 [P] [US3] Unit test for deterministic evaluation ordering in `tests/unit/test_evaluation_determinism.rs` +- [X] T025 [P] [US3] Integration test for invalid overlay failure in `tests/integration/test_overlay_validation.rs` + +### Implementation for User Story 3 + +- [X] T026 [US3] Ensure stable ordering of artifacts/drop-ins in `src/core/evaluate.rs` +- [X] T027 [US3] Surface evaluation diagnostics in `src/core/errors.rs` +- [X] T028 [US3] Add evaluation audit output for plan/apply in `src/core/audit.rs` + +**Checkpoint**: User Story 3 functional and testable + +--- + +## Phase 6: Polish & Cross-Cutting Concerns + +**Purpose**: Documentation, validation, and maintenance tasks + +- [X] T029 [P] Add performance benchmark for evaluation overhead in `tests/integration/test_performance.rs` +- [X] T030 [P] Add test that rejects templating placeholders in configs in `tests/unit/test_no_templating.rs` +- [X] T031 [P] Add validation test for Quadlet vs systemd drop-in distinction in `tests/unit/test_dropin_types.rs` +- [X] T032 [P] Update quickstart validation to include layered override fixtures in `tests/integration/test_quickstart_validation.rs` +- [X] T033 [P] Update developer documentation for layered overrides in `docs/development.md` +- [X] T034 Run quickstart validation and integration tests referenced in `specs/003-layered-overrides/quickstart.md` + +--- + +## Dependencies & Execution Order + +### Phase Dependencies + +- **Setup (Phase 1)**: No dependencies - can start immediately +- **Foundational (Phase 2)**: Depends on Setup completion - BLOCKS all user stories +- **User Stories (Phase 3+)**: All depend on Foundational phase completion +- **Polish (Phase 6)**: Depends on all desired user stories being complete + +### User Story Dependencies + +- **User Story 1 (P1)**: Can start after Foundational (Phase 2) +- **User Story 2 (P2)**: Can start after Foundational (Phase 2), builds on US1 evaluation output +- **User Story 3 (P3)**: Can start after Foundational (Phase 2), validates US1/US2 behavior + +### Within Each User Story + +- Tests MUST be written and FAIL before implementation +- Repository load/validation before evaluation logic +- Evaluation logic before planner/reconcile integration + +### Parallel Opportunities + +- T002 can run in parallel with T001 +- Unit tests (T011, T018, T024) can run in parallel with their paired integration tests (T012, T019, T025) +- Documentation updates (T032, T033) can run in parallel + +--- + +## Parallel Example: User Story 1 + +- Parallel set: T011 + T012 (tests), then T013 + T014, then T015, then T016 + T017 diff --git a/src/cli/apply.rs b/src/cli/apply.rs index ea5536a..04e802a 100644 --- a/src/cli/apply.rs +++ b/src/cli/apply.rs @@ -17,7 +17,9 @@ pub fn apply( let repo_source = repo_source.to_string(); let deps = ReconcileDependencies { load_desired: &|| load_desired_state(&repo_source, revision).map_err(map_plan_error), - read_observed: &|| read_observed_state(quadlet_dir, None).map_err(map_plan_error), + read_observed: &|desired| { + read_observed_state(quadlet_dir, Some(desired), None).map_err(map_plan_error) + }, apply_plan: &|plan, desired| { apply_plan(plan, &desired.workloads, quadlet_dir, reload_systemd) .map(|_| ()) @@ -40,7 +42,9 @@ pub fn apply_with_report( let repo_source = repo_source.to_string(); let deps = ReconcileDependencies { load_desired: &|| load_desired_state(&repo_source, revision).map_err(map_plan_error), - read_observed: &|| read_observed_state(quadlet_dir, None).map_err(map_plan_error), + read_observed: &|desired| { + read_observed_state(quadlet_dir, Some(desired), None).map_err(map_plan_error) + }, apply_plan: &|plan, desired| { apply_plan(plan, &desired.workloads, quadlet_dir, reload_systemd) .map(|_| ()) diff --git a/src/cli/args.rs b/src/cli/args.rs index 73c5c2e..a3512d2 100644 --- a/src/cli/args.rs +++ b/src/cli/args.rs @@ -29,6 +29,9 @@ pub struct PlanArgs { /// Git revision (branch, tag, or commit). #[arg(long)] pub rev: String, + /// Host identity override for selecting hosts/. + #[arg(long)] + pub host: Option, /// System-level Quadlet directory. #[arg(long, default_value = "/etc/containers/systemd")] pub quadlet_dir: PathBuf, @@ -48,6 +51,9 @@ pub struct ApplyArgs { /// Git revision (branch, tag, or commit). #[arg(long)] pub rev: String, + /// Host identity override for selecting hosts/. + #[arg(long)] + pub host: Option, /// System-level Quadlet directory. #[arg(long, default_value = "/etc/containers/systemd")] pub quadlet_dir: PathBuf, @@ -70,6 +76,9 @@ pub struct AgentArgs { /// Git revision (branch, tag, or commit). #[arg(long)] pub rev: Option, + /// Host identity override for selecting hosts/. + #[arg(long)] + pub host: Option, /// System-level Quadlet directory. #[arg(long)] pub quadlet_dir: Option, diff --git a/src/cli/plan.rs b/src/cli/plan.rs index 53a277f..8c14863 100644 --- a/src/cli/plan.rs +++ b/src/cli/plan.rs @@ -1,5 +1,5 @@ use crate::cli::report::format_plan_report; -use crate::core::audit::{build_audit_event, build_audit_record, AuditEvent}; +use crate::core::audit::{build_audit_event, build_audit_record, summarize_evaluation, AuditEvent}; use crate::core::errors::CoreError; use crate::core::reconcile::{reconcile_plan, ReconcileDependencies}; use crate::core::types::AuditRecord; @@ -13,7 +13,10 @@ pub struct PlanOutput { pub fn plan(deps: &ReconcileDependencies<'_>) -> Result { let result = reconcile_plan(deps)?; let diffs = result.diffs; - let audit = build_audit_record(&result.run.run_id, diffs.clone(), &result.plan, Vec::new()); + let mut audit = build_audit_record(&result.run.run_id, diffs.clone(), &result.plan, Vec::new()); + audit + .operator_messages + .push(summarize_evaluation(&result.desired)); let event = build_audit_event(&result.run, Some(&result.plan), &[]); Ok(PlanOutput { diff --git a/src/cli/report.rs b/src/cli/report.rs index c157aec..957d18e 100644 --- a/src/cli/report.rs +++ b/src/cli/report.rs @@ -31,6 +31,8 @@ fn quadlet_type_label(quadlet_type: Option) -> &'static str { match quadlet_type { Some(QuadletType::Container) => "container", Some(QuadletType::Socket) => "socket", + Some(QuadletType::SocketDropIn) => "socket-dropin", + Some(QuadletType::ConfigFile) => "config", Some(QuadletType::Volume) => "volume", Some(QuadletType::Pod) => "pod", Some(QuadletType::Network) => "network", @@ -44,14 +46,20 @@ fn action_label( ) -> String { match action { crate::core::types::PlanActionType::WriteQuadlet => { - if matches!(quadlet_type, Some(QuadletType::Socket)) { + if matches!( + quadlet_type, + Some(QuadletType::Socket) | Some(QuadletType::SocketDropIn) + ) { "WriteUnit".to_string() } else { "WriteQuadlet".to_string() } } crate::core::types::PlanActionType::RemoveQuadlet => { - if matches!(quadlet_type, Some(QuadletType::Socket)) { + if matches!( + quadlet_type, + Some(QuadletType::Socket) | Some(QuadletType::SocketDropIn) + ) { "RemoveUnit".to_string() } else { "RemoveQuadlet".to_string() diff --git a/src/core/audit.rs b/src/core/audit.rs index 2fb9841..2f879e7 100644 --- a/src/core/audit.rs +++ b/src/core/audit.rs @@ -1,6 +1,6 @@ use crate::core::types::{ AuditRecord, DiffItem, FailureClass, PlanAction, ReconcileRun, ReconciliationPlan, RunStatus, - VerificationResult, VerificationStatus, + DesiredState, QuadletType, VerificationResult, VerificationStatus, }; pub fn build_audit_record( @@ -52,6 +52,30 @@ pub fn format_audit_record(record: &AuditRecord) -> String { output } +pub fn summarize_evaluation(desired: &DesiredState) -> String { + let mut socket_dropins = 0; + let mut sockets = 0; + let mut containers = 0; + let mut volumes = 0; + for workload in &desired.workloads { + match workload.quadlet_type { + QuadletType::SocketDropIn => socket_dropins += 1, + QuadletType::Socket => sockets += 1, + QuadletType::Container => containers += 1, + QuadletType::Volume => volumes += 1, + _ => {} + } + } + format!( + "evaluation: workloads={}, containers={}, volumes={}, sockets={}, socket_dropins={}", + desired.workloads.len(), + containers, + volumes, + sockets, + socket_dropins + ) +} + #[derive(Clone, Debug, PartialEq, Eq)] pub struct AuditEvent { pub run_id: String, diff --git a/src/core/boundaries.rs b/src/core/boundaries.rs index 7926271..26d72b6 100644 --- a/src/core/boundaries.rs +++ b/src/core/boundaries.rs @@ -26,6 +26,7 @@ fn is_supported_action(action_type: &PlanActionType) -> bool { | PlanActionType::RemoveQuadlet | PlanActionType::ReloadSystemd | PlanActionType::StartUnit + | PlanActionType::RestartUnit | PlanActionType::StopUnit ) } diff --git a/src/core/errors.rs b/src/core/errors.rs index b4a3098..600847c 100644 --- a/src/core/errors.rs +++ b/src/core/errors.rs @@ -28,6 +28,9 @@ pub enum ValidationErrorKind { DuplicateWorkload, DuplicateUnitName, UnsupportedQuadletType, + UndefinedServiceSelection, + MissingArtifactTarget, + InvalidDropInOrdering, } #[derive(Clone, Debug, PartialEq, Eq, Error)] @@ -46,6 +49,20 @@ impl ValidationError { } } +#[derive(Clone, Debug, PartialEq, Eq, Error)] +#[error("{message}")] +pub struct EvaluationError { + pub message: String, +} + +impl EvaluationError { + pub fn new(message: impl Into) -> Self { + Self { + message: message.into(), + } + } +} + #[derive(Clone, Debug, PartialEq, Eq, Error)] pub enum RunLockError { #[error("run lock already held")] diff --git a/src/core/evaluate.rs b/src/core/evaluate.rs new file mode 100644 index 0000000..81633cb --- /dev/null +++ b/src/core/evaluate.rs @@ -0,0 +1,148 @@ +use crate::core::errors::EvaluationError; +use crate::core::types::{ + ConfigFileSource, DropInSource, EvaluatedArtifact, EvaluatedConfigFile, EvaluatedDropIn, + EvaluationInput, QuadletType, +}; +use std::path::Path; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct EvaluationOutput { + pub artifacts: Vec, + pub socket_dropins: Vec, + pub config_files: Vec, +} + +pub fn evaluate_desired_state(input: &EvaluationInput) -> Result { + let mut artifacts = Vec::new(); + let mut socket_dropins = Vec::new(); + for service_name in &input.host.services { + let service = input + .catalog + .services + .get(service_name) + .ok_or_else(|| EvaluationError::new(format!("missing service: {}", service_name)))?; + for artifact in &service.artifacts { + let mut contents = artifact.contents.clone(); + let mut source_layers = vec![artifact.source_path.clone()]; + + let base_dropins = collect_dropins(&service.base_dropins, &artifact.name); + let host_dropins = collect_dropins(&input.overlays.overrides, &artifact.name); + + if artifact.quadlet_type == QuadletType::Socket { + socket_dropins.extend(to_socket_dropins(&artifact.name, &base_dropins)); + socket_dropins.extend(to_socket_dropins(&artifact.name, &host_dropins)); + } else { + apply_dropins(&mut contents, &mut source_layers, &base_dropins); + apply_dropins(&mut contents, &mut source_layers, &host_dropins); + } + + artifacts.push(EvaluatedArtifact { + name: artifact.name.clone(), + quadlet_type: artifact.quadlet_type.clone(), + contents, + source_layers, + }); + } + + } + let base_configs: Vec = input + .host + .services + .iter() + .filter_map(|service_name| input.catalog.services.get(service_name)) + .flat_map(|service| service.config_files.iter().cloned()) + .collect(); + let host_configs = input + .overlays + .config_overrides + .iter() + .filter(|cfg| cfg.target_path.starts_with("/etc/")) + .cloned() + .collect::>(); + let config_files = overlay_config_files(&base_configs, &host_configs); + artifacts.sort_by(|a, b| a.name.cmp(&b.name)); + socket_dropins.sort_by(|a, b| { + (a.target.clone(), a.file_name.clone()).cmp(&(b.target.clone(), b.file_name.clone())) + }); + Ok(EvaluationOutput { + artifacts, + socket_dropins, + config_files, + }) +} + +fn collect_dropins(dropins: &[DropInSource], target: &str) -> Vec { + let mut matches: Vec = dropins + .iter() + .filter(|dropin| dropin.target == target) + .cloned() + .collect(); + matches.sort_by(|a, b| dropin_order_key(&a.source_path).cmp(&dropin_order_key(&b.source_path))); + matches +} + +fn apply_dropins(contents: &mut String, sources: &mut Vec, dropins: &[DropInSource]) { + for dropin in dropins { + if !contents.ends_with('\n') { + contents.push('\n'); + } + contents.push_str(&dropin.contents); + sources.push(dropin.source_path.clone()); + } +} + +fn dropin_order_key(path: &str) -> String { + Path::new(path) + .file_name() + .and_then(|name| name.to_str()) + .unwrap_or(path) + .to_string() +} + +fn to_socket_dropins(target: &str, dropins: &[DropInSource]) -> Vec { + dropins + .iter() + .map(|dropin| EvaluatedDropIn { + target: target.to_string(), + file_name: dropin_file_name(&dropin.source_path), + contents: dropin.contents.clone(), + source_path: dropin.source_path.clone(), + }) + .collect() +} + +fn dropin_file_name(path: &str) -> String { + Path::new(path) + .file_name() + .and_then(|name| name.to_str()) + .unwrap_or(path) + .to_string() +} + +fn overlay_config_files( + base_files: &[ConfigFileSource], + host_files: &[ConfigFileSource], +) -> Vec { + let mut map: std::collections::BTreeMap = std::collections::BTreeMap::new(); + for cfg in base_files { + map.insert( + cfg.target_path.clone(), + EvaluatedConfigFile { + target_path: cfg.target_path.clone(), + contents: cfg.contents.clone(), + source_layers: vec![cfg.source_path.clone()], + }, + ); + } + for cfg in host_files { + map.insert( + cfg.target_path.clone(), + EvaluatedConfigFile { + target_path: cfg.target_path.clone(), + contents: cfg.contents.clone(), + source_layers: vec![cfg.source_path.clone()], + }, + ); + } + map.into_values().collect() +} diff --git a/src/core/mod.rs b/src/core/mod.rs index 0e88be7..a8848ff 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1,6 +1,7 @@ pub mod audit; pub mod boundaries; pub mod diff; +pub mod evaluate; pub mod errors; pub mod planner; pub mod reconcile; diff --git a/src/core/planner.rs b/src/core/planner.rs index 0335eac..7ed3205 100644 --- a/src/core/planner.rs +++ b/src/core/planner.rs @@ -69,6 +69,8 @@ fn actions_for_diff( container_stems: &HashSet, ) -> Vec { let manage_unit = match quadlet_type { + Some(QuadletType::SocketDropIn) => false, + Some(QuadletType::ConfigFile) => false, Some(QuadletType::Volume) => false, Some(QuadletType::Container) => { let stem = stem_for_unit_name(name); @@ -79,15 +81,23 @@ fn actions_for_diff( } _ => true, }; + let reload_systemd = !matches!(quadlet_type, Some(QuadletType::ConfigFile)); match kind { DiffKind::Add => { let mut actions = vec![ action(PlanActionType::WriteQuadlet, name), - action(PlanActionType::ReloadSystemd, name), ]; + if reload_systemd { + actions.push(action(PlanActionType::ReloadSystemd, name)); + } if manage_unit { actions.push(action(PlanActionType::StartUnit, name)); } + if should_restart_socket_for_dropin(quadlet_type.as_ref(), name) { + if let Some(socket_unit) = socket_unit_from_dropin_name(name) { + actions.push(action(PlanActionType::RestartUnit, &socket_unit)); + } + } if should_start_service_for_socket(quadlet_type.as_ref(), name, container_stems) { actions.push(action( PlanActionType::StartUnit, @@ -102,16 +112,36 @@ fn actions_for_diff( actions.push(action(PlanActionType::StopUnit, name)); } actions.push(action(PlanActionType::RemoveQuadlet, name)); - actions.push(action(PlanActionType::ReloadSystemd, name)); + if reload_systemd { + actions.push(action(PlanActionType::ReloadSystemd, name)); + } + if should_restart_socket_for_dropin(quadlet_type.as_ref(), name) { + if let Some(socket_unit) = socket_unit_from_dropin_name(name) { + actions.push(action(PlanActionType::RestartUnit, &socket_unit)); + } + } actions } DiffKind::Change => { let mut actions = vec![ action(PlanActionType::WriteQuadlet, name), - action(PlanActionType::ReloadSystemd, name), ]; + if reload_systemd { + actions.push(action(PlanActionType::ReloadSystemd, name)); + } if manage_unit { - actions.push(action(PlanActionType::StartUnit, name)); + actions.push(action(PlanActionType::RestartUnit, name)); + } + if should_restart_socket_for_dropin(quadlet_type.as_ref(), name) { + if let Some(socket_unit) = socket_unit_from_dropin_name(name) { + actions.push(action(PlanActionType::RestartUnit, &socket_unit)); + } + } + if should_restart_service_for_container(quadlet_type.as_ref(), name, socket_stems) { + actions.push(action( + PlanActionType::RestartUnit, + &format!("{}.service", stem_for_unit_name(name).unwrap_or(name)), + )); } if should_start_service_for_socket(quadlet_type.as_ref(), name, container_stems) { actions.push(action( @@ -158,6 +188,33 @@ fn should_start_service_for_socket( } } +fn should_restart_service_for_container( + quadlet_type: Option<&QuadletType>, + name: &str, + socket_stems: &HashSet, +) -> bool { + if !matches!(quadlet_type, Some(QuadletType::Container)) { + return false; + } + match stem_for_unit_name(name) { + Some(stem) => socket_stems.contains(stem), + None => false, + } +} + +fn should_restart_socket_for_dropin( + quadlet_type: Option<&QuadletType>, + name: &str, +) -> bool { + matches!(quadlet_type, Some(QuadletType::SocketDropIn)) + && socket_unit_from_dropin_name(name).is_some() +} + +fn socket_unit_from_dropin_name(name: &str) -> Option { + let marker = ".socket.d/"; + name.find(marker).map(|idx| name[..idx + ".socket".len()].to_string()) +} + fn order_diffs(diffs: &mut [DiffItem]) { diffs.sort_by(|a, b| { let a_key = ordering_key(a); @@ -183,23 +240,27 @@ fn ordering_key(diff: &DiffItem) -> (u8, String) { fn order_for_type(quadlet_type: Option) -> u8 { match quadlet_type { - Some(QuadletType::Volume) => 0, - Some(QuadletType::Container) => 1, - Some(QuadletType::Socket) => 2, - Some(QuadletType::Pod) => 3, - Some(QuadletType::Network) => 4, - None => 5, + Some(QuadletType::ConfigFile) => 0, + Some(QuadletType::Volume) => 1, + Some(QuadletType::Network) => 2, + Some(QuadletType::Container) => 3, + Some(QuadletType::SocketDropIn) => 4, + Some(QuadletType::Socket) => 5, + Some(QuadletType::Pod) => 6, + None => 7, } } fn reverse_order_for_type(quadlet_type: Option) -> u8 { match quadlet_type { - Some(QuadletType::Socket) => 0, - Some(QuadletType::Container) => 1, - Some(QuadletType::Volume) => 2, - Some(QuadletType::Pod) => 3, + Some(QuadletType::SocketDropIn) => 0, + Some(QuadletType::Socket) => 1, + Some(QuadletType::Container) => 2, + Some(QuadletType::Volume) => 3, Some(QuadletType::Network) => 4, - None => 5, + Some(QuadletType::ConfigFile) => 5, + Some(QuadletType::Pod) => 6, + None => 7, } } diff --git a/src/core/reconcile.rs b/src/core/reconcile.rs index baf5b6d..c55ad1c 100644 --- a/src/core/reconcile.rs +++ b/src/core/reconcile.rs @@ -2,14 +2,16 @@ use crate::core::diff::diff_workloads; use crate::core::errors::CoreError; use crate::core::planner::plan; use crate::core::types::{ - DiffItem, FailureClass, ReconcileMode, ReconcileRun, ReconciliationPlan, RunStatus, - VerificationResult, VerificationStatus, + DesiredState, DiffItem, FailureClass, ReconcileMode, ReconcileRun, ReconciliationPlan, + RunStatus, VerificationResult, VerificationStatus, }; use crate::core::verify::verify_state; pub struct ReconcileDependencies<'a> { pub load_desired: &'a dyn Fn() -> Result, - pub read_observed: &'a dyn Fn() -> Result, + pub read_observed: &'a dyn Fn( + &crate::core::types::DesiredState, + ) -> Result, pub apply_plan: &'a dyn Fn(&crate::core::types::ReconciliationPlan, &crate::core::types::DesiredState) -> Result<(), CoreError>, @@ -19,16 +21,18 @@ pub struct PlanResult { pub run: ReconcileRun, pub plan: ReconciliationPlan, pub diffs: Vec, + pub desired: DesiredState, } pub struct ApplyResult { pub run: ReconcileRun, pub verification_results: Vec, + pub desired: DesiredState, } pub fn reconcile_plan(deps: &ReconcileDependencies<'_>) -> Result { let desired = (deps.load_desired)()?; - let observed = (deps.read_observed)()?; + let observed = (deps.read_observed)(&desired)?; let plan = plan(&desired, &observed)?; let diffs = diff_workloads(&desired.workloads, &observed.workloads); @@ -41,12 +45,17 @@ pub fn reconcile_plan(deps: &ReconcileDependencies<'_>) -> Result) -> Result { let desired = (deps.load_desired)()?; - let observed = (deps.read_observed)()?; + let observed = (deps.read_observed)(&desired)?; let plan = plan(&desired, &observed)?; @@ -54,7 +63,7 @@ pub fn reconcile_apply(deps: &ReconcileDependencies<'_>) -> Result) -> Result, + pub managed_config_paths: Vec, + pub managed_config_roots: Vec, pub invariants: Vec, pub boundaries: Boundaries, } +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ServiceCatalog { + pub services: BTreeMap, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ServiceDefinition { + pub name: String, + pub artifacts: Vec, + pub base_dropins: Vec, + pub config_files: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct HostDeclaration { + pub host: String, + pub services: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct HostOverlaySet { + pub host: String, + pub overrides: Vec, + pub config_overrides: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ArtifactSource { + pub name: String, + pub quadlet_type: QuadletType, + pub contents: String, + pub source_path: String, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct DropInSource { + pub target: String, + pub contents: String, + pub source_path: String, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ConfigFileSource { + pub target_path: String, + pub contents: String, + pub source_path: String, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct EvaluationInput { + pub host: HostDeclaration, + pub catalog: ServiceCatalog, + pub overlays: HostOverlaySet, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct EvaluatedArtifact { + pub name: String, + pub quadlet_type: QuadletType, + pub contents: String, + pub source_layers: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct EvaluatedDropIn { + pub target: String, + pub file_name: String, + pub contents: String, + pub source_path: String, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct EvaluatedConfigFile { + pub target_path: String, + pub contents: String, + pub source_layers: Vec, +} + #[derive(Clone, Debug, PartialEq, Eq)] pub struct Workload { pub name: String, @@ -104,6 +184,8 @@ pub enum DiffKind { pub enum QuadletType { Container, Socket, + SocketDropIn, + ConfigFile, Pod, Volume, Network, @@ -137,6 +219,7 @@ pub enum PlanActionType { DisableUnit, ReloadSystemd, StartUnit, + RestartUnit, StopUnit, Unknown(String), } diff --git a/src/core/validation.rs b/src/core/validation.rs index f8f693a..d9e7b08 100644 --- a/src/core/validation.rs +++ b/src/core/validation.rs @@ -1,7 +1,11 @@ -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; +use std::path::Path; use crate::core::errors::{ValidationError, ValidationErrorKind}; -use crate::core::types::{Boundaries, BoundaryScope, DesiredState, Invariant, Workload}; +use crate::core::types::{ + ArtifactSource, Boundaries, BoundaryScope, DesiredState, DropInSource, HostDeclaration, + Invariant, ServiceCatalog, Workload, +}; pub fn validate_desired_state(desired: &DesiredState) -> Result<(), ValidationError> { validate_invariants(&desired.invariants)?; @@ -10,6 +14,105 @@ pub fn validate_desired_state(desired: &DesiredState) -> Result<(), ValidationEr Ok(()) } +pub fn validate_service_selection( + host: &HostDeclaration, + catalog: &ServiceCatalog, +) -> Result<(), ValidationError> { + for service in &host.services { + if !catalog.services.contains_key(service) { + return Err(ValidationError::new( + ValidationErrorKind::UndefinedServiceSelection, + format!("undefined service selection: {}", service), + )); + } + } + Ok(()) +} + +pub fn validate_dropin_targets( + dropins: &[DropInSource], + artifacts: &[ArtifactSource], +) -> Result<(), ValidationError> { + let targets: HashSet = artifacts.iter().map(|a| a.name.clone()).collect(); + for dropin in dropins { + if !targets.contains(&dropin.target) { + return Err(ValidationError::new( + ValidationErrorKind::MissingArtifactTarget, + format!("drop-in target does not exist: {}", dropin.target), + )); + } + } + Ok(()) +} + +pub fn validate_socket_dropin_precedence( + base_dropins: &[DropInSource], + host_dropins: &[DropInSource], +) -> Result<(), ValidationError> { + let mut base_max: HashMap<&str, String> = HashMap::new(); + for dropin in base_dropins { + if !dropin.target.ends_with(".socket") { + continue; + } + let file_name = dropin_file_name(&dropin.source_path); + base_max + .entry(dropin.target.as_str()) + .and_modify(|current| { + if file_name > *current { + *current = file_name.clone(); + } + }) + .or_insert(file_name); + } + + for dropin in host_dropins { + if !dropin.target.ends_with(".socket") { + continue; + } + let Some(base_name) = base_max.get(dropin.target.as_str()) else { + continue; + }; + let file_name = dropin_file_name(&dropin.source_path); + if file_name <= *base_name { + return Err(ValidationError::new( + ValidationErrorKind::InvalidDropInOrdering, + format!( + "host socket drop-in must sort after base drop-ins: target={} host={} base_max={}", + dropin.target, file_name, base_name + ), + )); + } + } + + Ok(()) +} + +pub fn validate_config_paths(paths: &[String]) -> Result<(), ValidationError> { + for path in paths { + if !path.starts_with("/etc/") { + return Err(ValidationError::new( + ValidationErrorKind::MissingArtifactTarget, + format!("config path outside allowed root: {}", path), + )); + } + if path.contains("..") { + return Err(ValidationError::new( + ValidationErrorKind::MissingArtifactTarget, + format!("config path traversal not allowed: {}", path), + )); + } + } + Ok(()) +} + +fn dropin_file_name(path: &str) -> String { + Path::new(path) + .file_name() + .and_then(|name| name.to_str()) + .unwrap_or(path) + .to_string() +} + fn validate_invariants(invariants: &[Invariant]) -> Result<(), ValidationError> { if !invariants.contains(&Invariant::BoundariesDeclared) { return Err(ValidationError::new( diff --git a/src/core/verify.rs b/src/core/verify.rs index 9d0becc..ec3b8eb 100644 --- a/src/core/verify.rs +++ b/src/core/verify.rs @@ -8,6 +8,12 @@ pub fn verify_state(desired: &DesiredState, observed: &ObservedState) -> Vec = Vec::new(); + for action in &plan.actions { match &action.action_type { PlanActionType::WriteQuadlet => { let workload = find_workload(desired_workloads, &action.target)?; - let path = target_dir_for_workload(quadlet_dir, workload) - .join(&workload.systemd_unit_name); + let path = if workload.quadlet_type == QuadletType::ConfigFile { + PathBuf::from(&workload.systemd_unit_name) + } else { + target_dir_for_workload(quadlet_dir, workload).join(&workload.systemd_unit_name) + }; + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } fs::write(&path, &workload.quadlet_contents)?; files_written.push(path.display().to_string()); } PlanActionType::RemoveQuadlet => { - let target_dir = target_dir_for_name(quadlet_dir, &action.target); - let mut removed = false; - for entry in fs::read_dir(&target_dir)? { - let entry = entry?; - let path = entry.path(); - if let Some(file_name) = path.file_name().and_then(|name| name.to_str()) { - if file_name == action.target - || file_name.starts_with(&format!("{}.", action.target)) - { - fs::remove_file(&path)?; - files_removed.push(path.display().to_string()); - removed = true; + if action.target.contains(".socket.d/") { + let path = systemd_unit_dir().join(&action.target); + if path.exists() { + fs::remove_file(&path)?; + files_removed.push(path.display().to_string()); + if let Some(parent) = path.parent() { + if parent.read_dir()?.next().is_none() { + let _ = fs::remove_dir(parent); + } } + } else { + return Err(ApplyError::MissingWorkload(action.target.clone())); + } + } else if action.target.starts_with("/etc/") { + let path = PathBuf::from(&action.target); + if path.exists() { + fs::remove_file(&path)?; + files_removed.push(path.display().to_string()); + } else { + return Err(ApplyError::MissingWorkload(action.target.clone())); + } + } else { + let target_dir = target_dir_for_name(quadlet_dir, &action.target); + let mut removed = false; + for entry in fs::read_dir(&target_dir)? { + let entry = entry?; + let path = entry.path(); + if let Some(file_name) = path.file_name().and_then(|name| name.to_str()) { + if file_name == action.target + || file_name.starts_with(&format!("{}.", action.target)) + { + fs::remove_file(&path)?; + files_removed.push(path.display().to_string()); + removed = true; + } + } + } + if !removed { + return Err(ApplyError::MissingWorkload(action.target.clone())); } - } - if !removed { - return Err(ApplyError::MissingWorkload(action.target.clone())); } } PlanActionType::EnableUnit => { @@ -89,12 +120,14 @@ pub fn apply_plan( // Quadlet-generated units rely on [Install] processing; no disable call is needed. } PlanActionType::StartUnit => { - let unit = unit_name_for_start_stop(desired_workloads, quadlet_dir, &action.target)?; - run_systemctl(&["start", &unit])?; + deferred_units.push((PlanActionType::StartUnit, action.target.clone())); + } + PlanActionType::RestartUnit => { + deferred_units.push((PlanActionType::RestartUnit, action.target.clone())); } PlanActionType::StopUnit => { let unit = unit_name_for_start_stop(desired_workloads, quadlet_dir, &action.target)?; - run_systemctl(&["stop", &unit])?; + run_systemctl_allow_not_loaded(&["stop", &unit])?; } PlanActionType::ReloadSystemd => { if reload_systemd { @@ -110,6 +143,31 @@ pub fn apply_plan( } } + let mut restarted = std::collections::HashSet::new(); + let mut started = std::collections::HashSet::new(); + for (action_type, target) in deferred_units { + match action_type { + PlanActionType::RestartUnit => { + let unit = unit_name_for_start_stop(desired_workloads, quadlet_dir, &target)?; + run_systemctl(&["restart", &unit])?; + restarted.insert(target.clone()); + started.insert(target); + } + PlanActionType::StartUnit => { + if restarted.contains(&target) { + continue; + } + if started.contains(&target) { + continue; + } + let unit = unit_name_for_start_stop(desired_workloads, quadlet_dir, &target)?; + run_systemctl(&["start", &unit])?; + started.insert(target); + } + _ => {} + } + } + Ok(ApplyOutcome { actions_applied: plan.actions.clone(), files_written, @@ -119,18 +177,22 @@ pub fn apply_plan( fn target_dir_for_workload(quadlet_dir: &Path, workload: &Workload) -> PathBuf { match workload.quadlet_type { - QuadletType::Socket => systemd_unit_dir(), + QuadletType::Socket | QuadletType::SocketDropIn => systemd_unit_dir(), + QuadletType::ConfigFile => PathBuf::from("/"), _ => quadlet_dir.to_path_buf(), } } fn target_dir_for_name(quadlet_dir: &Path, target: &str) -> PathBuf { - if Path::new(target) - .extension() - .and_then(|ext| ext.to_str()) - == Some("socket") + if target.contains(".socket.d/") + || Path::new(target) + .extension() + .and_then(|ext| ext.to_str()) + == Some("socket") { systemd_unit_dir() + } else if target.starts_with("/etc/") { + PathBuf::from("/") } else { quadlet_dir.to_path_buf() } @@ -159,6 +221,21 @@ fn run_systemctl(args: &[&str]) -> Result<(), ApplyError> { Ok(()) } +fn run_systemctl_allow_not_loaded(args: &[&str]) -> Result<(), ApplyError> { + let output = std::process::Command::new("systemctl") + .args(args) + .output() + .map_err(|err| ApplyError::SystemdCommandFailed(err.to_string()))?; + if output.status.success() { + return Ok(()); + } + let stderr = String::from_utf8_lossy(&output.stderr); + if stderr.contains("not loaded") || stderr.contains("not found") { + return Ok(()); + } + Err(ApplyError::SystemdCommandFailed(stderr.to_string())) +} + fn unit_name_for_start_stop( workloads: &[Workload], diff --git a/src/io/observed.rs b/src/io/observed.rs index 48a87ad..7b00c02 100644 --- a/src/io/observed.rs +++ b/src/io/observed.rs @@ -1,9 +1,10 @@ +use std::collections::HashSet; use std::path::{Path, PathBuf}; use std::process::Command; use crate::core::types::{ - EnabledState, ObservedState, ObservedUnit, QuadletType, RestartPolicy, UnitActiveState, - Workload, + DesiredState, EnabledState, ObservedState, ObservedUnit, QuadletType, RestartPolicy, + UnitActiveState, Workload, }; use crate::io::quadlet::{ normalize_socket_contents, parse_quadlet_name, read_quadlet_dir, QuadletError, @@ -49,6 +50,7 @@ impl std::error::Error for ObservedError {} pub fn read_observed_state( quadlet_dir: &Path, + desired: Option<&DesiredState>, observed_revision_id: Option, ) -> Result { if !quadlet_dir.exists() { @@ -57,7 +59,16 @@ pub fn read_observed_state( let mut workloads: Vec = read_quadlet_dir(quadlet_dir)?; let socket_dir = systemd_unit_dir(); - workloads.extend(read_socket_units(&socket_dir)?); + let socket_units = read_socket_units(&socket_dir)?; + let allowed_socket_dropins = desired + .map(desired_socket_dropins) + .unwrap_or_default(); + let socket_dropins = read_socket_dropins(&socket_dir, &socket_units, &allowed_socket_dropins)?; + workloads.extend(socket_units); + workloads.extend(socket_dropins); + if let Some(desired) = desired { + workloads.extend(read_config_files(&desired.managed_config_roots)?); + } let units = read_systemd_units(&workloads)?; Ok(ObservedState { @@ -113,6 +124,60 @@ fn read_socket_units(dir: &Path) -> Result, ObservedError> { Ok(workloads) } +fn read_socket_dropins( + dir: &Path, + sockets: &[Workload], + allowed_dropins: &HashSet, +) -> Result, ObservedError> { + if allowed_dropins.is_empty() { + return Ok(Vec::new()); + } + let mut workloads = Vec::new(); + for socket in sockets { + let dropin_dir = dir.join(format!("{}.d", socket.systemd_unit_name)); + if !dropin_dir.exists() { + continue; + } + for entry in std::fs::read_dir(dropin_dir)? { + let entry = entry?; + let path = entry.path(); + if path.is_dir() { + continue; + } + let file_name = match path.file_name().and_then(|name| name.to_str()) { + Some(name) if !name.starts_with('.') => name.to_string(), + _ => continue, + }; + if !file_name.ends_with(".conf") { + continue; + } + let contents = std::fs::read_to_string(&path)?; + let unit_name = format!("{}.d/{}", socket.systemd_unit_name, file_name); + if !allowed_dropins.contains(&unit_name) { + continue; + } + workloads.push(Workload { + name: unit_name.clone(), + quadlet_type: QuadletType::SocketDropIn, + quadlet_contents: contents, + systemd_unit_name: unit_name, + enabled_state: EnabledState::Enabled, + restart_policy: RestartPolicy::Always, + }); + } + } + Ok(workloads) +} + +fn desired_socket_dropins(desired: &DesiredState) -> HashSet { + desired + .workloads + .iter() + .filter(|workload| workload.quadlet_type == QuadletType::SocketDropIn) + .map(|workload| workload.systemd_unit_name.clone()) + .collect() +} + fn read_systemd_units(workloads: &[Workload]) -> Result, ObservedError> { if !systemctl_available() { log::warn!("systemctl unavailable; skipping unit discovery"); @@ -121,6 +186,9 @@ fn read_systemd_units(workloads: &[Workload]) -> Result, Obser let mut units = Vec::new(); for workload in workloads { + if matches!(workload.quadlet_type, QuadletType::SocketDropIn | QuadletType::ConfigFile) { + continue; + } let unit_name = systemd_unit_for_quadlet_file(&workload.systemd_unit_name); match query_unit_state(&unit_name)? { Some(unit) => units.push(unit), @@ -131,6 +199,55 @@ fn read_systemd_units(workloads: &[Workload]) -> Result, Obser Ok(units) } +fn read_config_files(paths: &[String]) -> Result, ObservedError> { + let mut workloads = Vec::new(); + for config_path in paths { + let path = Path::new(config_path); + if !path.exists() { + continue; + } + if path.is_dir() { + read_config_dir(path, &mut workloads)?; + } else if path.is_file() { + let contents = std::fs::read_to_string(path)?; + workloads.push(Workload { + name: config_path.clone(), + quadlet_type: QuadletType::ConfigFile, + quadlet_contents: contents, + systemd_unit_name: config_path.clone(), + enabled_state: EnabledState::Enabled, + restart_policy: RestartPolicy::Always, + }); + } + } + Ok(workloads) +} + +fn read_config_dir(dir: &Path, workloads: &mut Vec) -> Result<(), ObservedError> { + for entry in std::fs::read_dir(dir)? { + let entry = entry?; + let path = entry.path(); + if path.is_dir() { + read_config_dir(&path, workloads)?; + continue; + } + if !path.is_file() { + continue; + } + let contents = std::fs::read_to_string(&path)?; + let path_str = path.display().to_string(); + workloads.push(Workload { + name: path_str.clone(), + quadlet_type: QuadletType::ConfigFile, + quadlet_contents: contents, + systemd_unit_name: path_str, + enabled_state: EnabledState::Enabled, + restart_policy: RestartPolicy::Always, + }); + } + Ok(()) +} + fn systemctl_available() -> bool { let output = Command::new("systemctl") .arg("is-system-running") diff --git a/src/io/repo.rs b/src/io/repo.rs index 0807851..2fd3b04 100644 --- a/src/io/repo.rs +++ b/src/io/repo.rs @@ -1,11 +1,33 @@ +use std::fs; use std::path::{Path, PathBuf}; use std::process::Command; use tempfile::TempDir; +use crate::core::evaluate::{evaluate_desired_state, EvaluationOutput}; use crate::core::types::{ - Boundaries, BoundaryScope, DesiredState, Invariant, Workload, + ArtifactSource, Boundaries, BoundaryScope, ConfigFileSource, DesiredState, DropInSource, + EnabledState, EvaluationInput, EvaluatedArtifact, EvaluatedConfigFile, EvaluatedDropIn, + HostDeclaration, HostOverlaySet, Invariant, QuadletType, RestartPolicy, ServiceCatalog, + ServiceDefinition, Workload, }; -use crate::io::quadlet::{read_quadlet_dir, QuadletError}; +use crate::io::quadlet::{parse_quadlet_name, read_quadlet_dir, QuadletError}; +use crate::core::validation::{ + validate_config_paths, validate_dropin_targets as validate_dropin_targets_fn, + validate_service_selection, +}; +use serde::Deserialize; + +pub const HOST_OVERRIDE_ENV: &str = "CORE_OPS_HOST"; + +#[derive(Debug)] +pub struct LayeredRepo { + _repo_temp: TempDir, + pub repo_path: PathBuf, + pub revision_id: String, + pub host: HostDeclaration, + pub catalog: ServiceCatalog, + pub overlays: HostOverlaySet, +} #[derive(Debug)] pub enum RepoError { @@ -14,7 +36,16 @@ pub enum RepoError { GitCheckoutFailed(String), InvalidRepoSource(String), MissingQuadletDir(PathBuf), + MissingServicesDir(PathBuf), + MissingHostsDir(PathBuf), + MissingHostDeclaration(PathBuf), + InvalidHostDeclaration(String), + MissingHostIdentity, Quadlet(QuadletError), + Io(String), + EvaluationFailed(String), + ValidationFailed(String), + InvalidDropIn(String), } impl From for RepoError { @@ -33,7 +64,24 @@ impl std::fmt::Display for RepoError { RepoError::MissingQuadletDir(path) => { write!(f, "missing quadlet dir: {}", path.display()) } + RepoError::MissingServicesDir(path) => { + write!(f, "missing services dir: {}", path.display()) + } + RepoError::MissingHostsDir(path) => { + write!(f, "missing hosts dir: {}", path.display()) + } + RepoError::MissingHostDeclaration(path) => { + write!(f, "missing host declaration: {}", path.display()) + } + RepoError::InvalidHostDeclaration(msg) => { + write!(f, "invalid host declaration: {}", msg) + } + RepoError::MissingHostIdentity => write!(f, "missing host identity"), RepoError::Quadlet(err) => write!(f, "quadlet error: {}", err), + RepoError::Io(err) => write!(f, "repo io error: {}", err), + RepoError::EvaluationFailed(err) => write!(f, "evaluation failed: {}", err), + RepoError::ValidationFailed(err) => write!(f, "validation failed: {}", err), + RepoError::InvalidDropIn(err) => write!(f, "invalid drop-in: {}", err), } } } @@ -56,6 +104,10 @@ pub fn load_desired_state(repo_source: &str, revision_id: &str) -> Result Result Result { + let temp = TempDir::new().map_err(|err| RepoError::GitCloneFailed(err.to_string()))?; + if looks_like_url(repo_source) { + git_clone(repo_source, temp.path())?; + } else { + let repo_path = Path::new(repo_source); + if !repo_path.exists() { + return Err(RepoError::InvalidRepoSource(repo_source.to_string())); + } + git_clone(repo_source, temp.path())?; + } + + git_fetch_revision(temp.path(), revision_id)?; + git_checkout_revision(temp.path())?; + + let repo_path = temp.path().to_path_buf(); + let services_dir = repo_path.join("services"); + if !services_dir.exists() { + return Err(RepoError::MissingServicesDir(services_dir)); + } + let hosts_dir = repo_path.join("hosts"); + if !hosts_dir.exists() { + return Err(RepoError::MissingHostsDir(hosts_dir)); + } + + let host_id = resolve_host_identity()?; + let host_dir = hosts_dir.join(&host_id); + let host_decl = load_host_declaration(&host_dir)?; + let catalog = load_service_catalog(&services_dir)?; + validate_service_selection(&host_decl, &catalog) + .map_err(|err| RepoError::ValidationFailed(err.to_string()))?; + let mut overlays = load_host_overrides(&host_dir)?; + let allowed_prefixes = config_prefixes_for_services(&host_decl.services); + if let Some(err) = validate_config_overrides(&overlays.config_overrides, &allowed_prefixes) { + return Err(RepoError::ValidationFailed(err)); + } + overlays.config_overrides = filter_config_overrides(&overlays.config_overrides, &allowed_prefixes); + let all_artifacts = selected_service_artifacts(&host_decl.services, &catalog); + validate_dropin_targets(&host_decl.services, &catalog, &overlays, &all_artifacts)?; + + Ok(LayeredRepo { + _repo_temp: temp, + repo_path, + revision_id: revision_id.to_string(), + host: host_decl, + catalog, + overlays, + }) +} + +pub fn load_host_declaration(host_dir: &Path) -> Result { + load_host_declaration_inner(host_dir) +} + +fn load_layered_desired_state( + repo_path: &Path, + revision_id: &str, +) -> Result { + let services_dir = repo_path.join("services"); + let hosts_dir = repo_path.join("hosts"); + if !hosts_dir.exists() { + return Err(RepoError::MissingHostsDir(hosts_dir)); + } + let host_id = resolve_host_identity()?; + let host_dir = hosts_dir.join(&host_id); + let host_decl = load_host_declaration_inner(&host_dir)?; + let catalog = load_service_catalog(&services_dir)?; + validate_service_selection(&host_decl, &catalog) + .map_err(|err| RepoError::ValidationFailed(err.to_string()))?; + let mut overlays = load_host_overrides(&host_dir)?; + let allowed_prefixes = config_prefixes_for_services(&host_decl.services); + if let Some(err) = validate_config_overrides(&overlays.config_overrides, &allowed_prefixes) { + return Err(RepoError::ValidationFailed(err)); + } + overlays.config_overrides = filter_config_overrides(&overlays.config_overrides, &allowed_prefixes); + let all_artifacts = selected_service_artifacts(&host_decl.services, &catalog); + validate_dropin_targets(&host_decl.services, &catalog, &overlays, &all_artifacts)?; + let mut config_paths = collect_config_paths(&host_decl.services, &catalog, &overlays); + config_paths.sort(); + config_paths.dedup(); + let mut config_roots = config_roots_for_services(&host_decl.services); + config_roots.sort(); + config_roots.dedup(); + validate_config_paths(&config_paths) + .map_err(|err| RepoError::ValidationFailed(err.to_string()))?; + + let input = EvaluationInput { + host: host_decl, + catalog, + overlays, + }; + let output = evaluate_desired_state(&input) + .map_err(|err| RepoError::EvaluationFailed(err.to_string()))?; + let workloads = workloads_from_evaluation(&output); + Ok(desired_state_from_workloads( + repo_path, + revision_id, + workloads, + config_paths, + config_roots, )) } @@ -72,11 +229,15 @@ pub fn desired_state_from_workloads( repo_path: &Path, revision_id: &str, workloads: Vec, + managed_config_paths: Vec, + managed_config_roots: Vec, ) -> DesiredState { DesiredState { repository_ref: repo_path.display().to_string(), revision_id: revision_id.to_string(), workloads, + managed_config_paths, + managed_config_roots, invariants: vec![Invariant::BoundariesDeclared, Invariant::DeterministicPlan], boundaries: Boundaries { scopes: vec![BoundaryScope::QuadletSystemd], @@ -92,6 +253,456 @@ fn looks_like_url(value: &str) -> bool { || value.contains('@') && value.contains(':') } +fn resolve_host_identity() -> Result { + if let Ok(value) = std::env::var(HOST_OVERRIDE_ENV) { + if !value.trim().is_empty() { + return Ok(value); + } + } + let hostname = read_hostname().map_err(|err| RepoError::Io(err.to_string()))?; + if hostname.trim().is_empty() { + return Err(RepoError::MissingHostIdentity); + } + Ok(hostname) +} + +fn load_host_declaration_inner(host_dir: &Path) -> Result { + let host_yaml_path = host_dir.join("host.yaml"); + if !host_yaml_path.exists() { + return Err(RepoError::MissingHostDeclaration(host_yaml_path)); + } + let contents = fs::read_to_string(&host_yaml_path) + .map_err(|err| RepoError::Io(err.to_string()))?; + let parsed: HostYaml = + serde_yaml::from_str(&contents).map_err(|err| RepoError::InvalidHostDeclaration(err.to_string()))?; + let host_name = host_dir + .file_name() + .and_then(|name| name.to_str()) + .ok_or_else(|| RepoError::InvalidHostDeclaration("invalid host directory".to_string()))?; + if parsed.host != host_name { + return Err(RepoError::InvalidHostDeclaration(format!( + "host field '{}' does not match directory '{}'", + parsed.host, host_name + ))); + } + Ok(HostDeclaration { + host: parsed.host, + services: parsed.services, + }) +} + +fn load_service_catalog(services_dir: &Path) -> Result { + let mut services = std::collections::BTreeMap::new(); + for entry in fs::read_dir(services_dir).map_err(|err| RepoError::Io(err.to_string()))? { + let entry = entry.map_err(|err| RepoError::Io(err.to_string()))?; + let path = entry.path(); + if !path.is_dir() { + continue; + } + let service_name = match path.file_name().and_then(|name| name.to_str()) { + Some(name) if !name.starts_with('.') => name.to_string(), + _ => continue, + }; + let service = load_service_definition(&service_name, &path)?; + services.insert(service_name, service); + } + + Ok(ServiceCatalog { services }) +} + +fn load_service_definition( + service_name: &str, + service_dir: &Path, +) -> Result { + let mut artifacts = Vec::new(); + let mut base_dropins = Vec::new(); + let mut config_files = Vec::new(); + for entry in fs::read_dir(service_dir).map_err(|err| RepoError::Io(err.to_string()))? { + let entry = entry.map_err(|err| RepoError::Io(err.to_string()))?; + let path = entry.path(); + let file_name = match path.file_name().and_then(|name| name.to_str()) { + Some(name) if !name.starts_with('.') => name.to_string(), + _ => continue, + }; + if path.is_dir() { + if file_name == "quadlet" { + artifacts.extend(read_quadlet_files(&path)?); + continue; + } + if file_name == "quadlet-overrides" { + base_dropins.extend(read_dropins_from_root(&path)?); + continue; + } + if let Some(target) = dropin_target_from_dir(&file_name) { + base_dropins.extend(read_dropins(&path, &target)?); + continue; + } + if file_name == "config" { + config_files.extend(read_config_files(&path)?); + continue; + } + continue; + } + if let Ok((_, quadlet_type)) = parse_quadlet_name(&file_name) { + let contents = + fs::read_to_string(&path).map_err(|err| RepoError::Io(err.to_string()))?; + artifacts.push(ArtifactSource { + name: file_name, + quadlet_type, + contents, + source_path: path.display().to_string(), + }); + } + } + + Ok(ServiceDefinition { + name: service_name.to_string(), + artifacts, + base_dropins, + config_files, + }) +} + +fn load_host_overrides(host_dir: &Path) -> Result { + let overrides_dir = host_dir.join("overrides"); + let host_name = host_dir + .file_name() + .and_then(|name| name.to_str()) + .ok_or_else(|| RepoError::InvalidHostDeclaration("invalid host directory".to_string()))?; + if !overrides_dir.exists() { + return Ok(HostOverlaySet { + host: host_name.to_string(), + overrides: Vec::new(), + config_overrides: Vec::new(), + }); + } + + let mut overrides = Vec::new(); + let mut config_overrides = Vec::new(); + for entry in fs::read_dir(&overrides_dir).map_err(|err| RepoError::Io(err.to_string()))? { + let entry = entry.map_err(|err| RepoError::Io(err.to_string()))?; + let path = entry.path(); + if !path.is_dir() { + continue; + } + let file_name = match path.file_name().and_then(|name| name.to_str()) { + Some(name) if !name.starts_with('.') => name.to_string(), + _ => continue, + }; + if file_name == "quadlet" { + overrides.extend(read_dropins_from_root(&path)?); + continue; + } + if let Some(target) = dropin_target_from_dir(&file_name) { + overrides.extend(read_dropins(&path, &target)?); + continue; + } + if file_name == "config" { + config_overrides.extend(read_config_files(&path)?); + } + } + + Ok(HostOverlaySet { + host: host_name.to_string(), + overrides, + config_overrides, + }) +} + +fn dropin_target_from_dir(dir_name: &str) -> Option { + dir_name.strip_suffix(".d").map(|name| name.to_string()) +} + +fn read_dropins(dir: &Path, target: &str) -> Result, RepoError> { + let mut dropins = Vec::new(); + for entry in fs::read_dir(dir).map_err(|err| RepoError::Io(err.to_string()))? { + let entry = entry.map_err(|err| RepoError::Io(err.to_string()))?; + let path = entry.path(); + if path.is_dir() { + continue; + } + let file_name = match path.file_name().and_then(|name| name.to_str()) { + Some(name) if !name.starts_with('.') => name, + _ => continue, + }; + if !file_name.ends_with(".conf") { + return Err(RepoError::InvalidDropIn(format!( + "unsupported drop-in extension: {}", + file_name + ))); + } + let contents = + fs::read_to_string(&path).map_err(|err| RepoError::Io(err.to_string()))?; + dropins.push(DropInSource { + target: target.to_string(), + contents, + source_path: path.display().to_string(), + }); + } + Ok(dropins) +} + +fn read_dropins_from_root(root: &Path) -> Result, RepoError> { + let mut dropins = Vec::new(); + if !root.exists() { + return Ok(dropins); + } + for entry in fs::read_dir(root).map_err(|err| RepoError::Io(err.to_string()))? { + let entry = entry.map_err(|err| RepoError::Io(err.to_string()))?; + let path = entry.path(); + if !path.is_dir() { + continue; + } + let file_name = match path.file_name().and_then(|name| name.to_str()) { + Some(name) if !name.starts_with('.') => name.to_string(), + _ => continue, + }; + if let Some(target) = dropin_target_from_dir(&file_name) { + dropins.extend(read_dropins(&path, &target)?); + } + } + Ok(dropins) +} + +fn read_hostname() -> Result { + let mut buf = [0u8; 256]; + let result = unsafe { libc::gethostname(buf.as_mut_ptr().cast(), buf.len()) }; + if result != 0 { + return Err(std::io::Error::last_os_error()); + } + let len = buf.iter().position(|b| *b == 0).unwrap_or(buf.len()); + Ok(String::from_utf8_lossy(&buf[..len]).trim().to_string()) +} + +fn workloads_from_evaluation(output: &EvaluationOutput) -> Vec { + let mut workloads: Vec = output + .artifacts + .iter() + .map(workload_from_artifact) + .collect(); + workloads.extend( + output + .socket_dropins + .iter() + .map(workload_from_socket_dropin), + ); + workloads.extend( + output + .config_files + .iter() + .map(workload_from_config_file), + ); + workloads +} + +fn workload_from_artifact(artifact: &EvaluatedArtifact) -> Workload { + let contents = if artifact.quadlet_type == QuadletType::Socket { + crate::io::quadlet::normalize_socket_contents(&artifact.contents) + } else { + artifact.contents.clone() + }; + let name = Path::new(&artifact.name) + .file_stem() + .and_then(|stem| stem.to_str()) + .unwrap_or(&artifact.name) + .to_string(); + Workload { + name, + quadlet_type: artifact.quadlet_type.clone(), + quadlet_contents: contents, + systemd_unit_name: artifact.name.clone(), + enabled_state: EnabledState::Enabled, + restart_policy: RestartPolicy::Always, + } +} + +fn workload_from_socket_dropin(dropin: &EvaluatedDropIn) -> Workload { + Workload { + name: format!("{}.d/{}", dropin.target, dropin.file_name), + quadlet_type: QuadletType::SocketDropIn, + quadlet_contents: dropin.contents.clone(), + systemd_unit_name: format!("{}.d/{}", dropin.target, dropin.file_name), + enabled_state: EnabledState::Enabled, + restart_policy: RestartPolicy::Always, + } +} + +fn workload_from_config_file(file: &EvaluatedConfigFile) -> Workload { + Workload { + name: file.target_path.clone(), + quadlet_type: QuadletType::ConfigFile, + quadlet_contents: file.contents.clone(), + systemd_unit_name: file.target_path.clone(), + enabled_state: EnabledState::Enabled, + restart_policy: RestartPolicy::Always, + } +} + +fn selected_service_artifacts( + selected_services: &[String], + catalog: &ServiceCatalog, +) -> Vec { + let mut artifacts = Vec::new(); + for service_name in selected_services { + if let Some(service) = catalog.services.get(service_name) { + artifacts.extend(service.artifacts.iter().cloned()); + } + } + artifacts +} + +fn validate_dropin_targets( + selected_services: &[String], + catalog: &ServiceCatalog, + overlays: &HostOverlaySet, + artifacts: &[ArtifactSource], +) -> Result<(), RepoError> { + let mut dropins = Vec::new(); + let mut base_dropins = Vec::new(); + for service_name in selected_services { + if let Some(service) = catalog.services.get(service_name) { + base_dropins.extend(service.base_dropins.iter().cloned()); + } + } + dropins.extend(base_dropins.iter().cloned()); + dropins.extend(overlays.overrides.iter().cloned()); + validate_dropin_targets_fn(&dropins, artifacts) + .and_then(|_| { + crate::core::validation::validate_socket_dropin_precedence( + &base_dropins, + &overlays.overrides, + ) + }) + .map_err(|err| RepoError::ValidationFailed(err.to_string())) +} + +fn read_config_files(config_root: &Path) -> Result, RepoError> { + let mut files = Vec::new(); + for entry in walk_config_dir(config_root)? { + let rel = entry.strip_prefix(config_root).map_err(|err| RepoError::Io(err.to_string()))?; + let rel_str = rel.to_string_lossy(); + if rel_str.starts_with("etc/") { + let contents = fs::read_to_string(&entry).map_err(|err| RepoError::Io(err.to_string()))?; + let target_path = format!("/{}", rel_str); + files.push(ConfigFileSource { + target_path, + contents, + source_path: entry.display().to_string(), + }); + } + } + Ok(files) +} + +fn walk_config_dir(root: &Path) -> Result, RepoError> { + let mut files = Vec::new(); + if !root.exists() { + return Ok(files); + } + for entry in fs::read_dir(root).map_err(|err| RepoError::Io(err.to_string()))? { + let entry = entry.map_err(|err| RepoError::Io(err.to_string()))?; + let path = entry.path(); + if path.is_dir() { + files.extend(walk_config_dir(&path)?); + } else { + files.push(path); + } + } + Ok(files) +} + +fn collect_config_paths( + selected_services: &[String], + catalog: &ServiceCatalog, + overlays: &HostOverlaySet, +) -> Vec { + let mut paths = Vec::new(); + for service_name in selected_services { + if let Some(service) = catalog.services.get(service_name) { + paths.extend(service.config_files.iter().map(|f| f.target_path.clone())); + } + } + paths.extend( + overlays + .config_overrides + .iter() + .map(|f| f.target_path.clone()), + ); + paths +} + +fn config_prefixes_for_services(services: &[String]) -> Vec { + services + .iter() + .map(|service| format!("/etc/{service}/")) + .collect() +} + +fn config_roots_for_services(services: &[String]) -> Vec { + services + .iter() + .map(|service| format!("/etc/{service}")) + .collect() +} + +fn validate_config_overrides( + overrides: &[ConfigFileSource], + allowed_prefixes: &[String], +) -> Option { + let invalid: Vec<&ConfigFileSource> = overrides + .iter() + .filter(|cfg| !allowed_prefixes.iter().any(|prefix| cfg.target_path.starts_with(prefix))) + .collect(); + if invalid.is_empty() { + return None; + } + Some(format!( + "host config override outside selected services: {}", + invalid + .iter() + .map(|cfg| cfg.target_path.as_str()) + .collect::>() + .join(", ") + )) +} + +fn filter_config_overrides( + overrides: &[ConfigFileSource], + allowed_prefixes: &[String], +) -> Vec { + overrides + .iter() + .filter(|cfg| allowed_prefixes.iter().any(|prefix| cfg.target_path.starts_with(prefix))) + .cloned() + .collect() +} + +fn read_quadlet_files(dir: &Path) -> Result, RepoError> { + let mut artifacts = Vec::new(); + for entry in fs::read_dir(dir).map_err(|err| RepoError::Io(err.to_string()))? { + let entry = entry.map_err(|err| RepoError::Io(err.to_string()))?; + let path = entry.path(); + if path.is_dir() { + continue; + } + let file_name = match path.file_name().and_then(|name| name.to_str()) { + Some(name) if !name.starts_with('.') => name.to_string(), + _ => continue, + }; + if let Ok((_, quadlet_type)) = parse_quadlet_name(&file_name) { + let contents = + fs::read_to_string(&path).map_err(|err| RepoError::Io(err.to_string()))?; + artifacts.push(ArtifactSource { + name: file_name, + quadlet_type, + contents, + source_path: path.display().to_string(), + }); + } + } + Ok(artifacts) +} + fn git_clone(repo: &str, dest: &Path) -> Result<(), RepoError> { let output = Command::new("git") .arg("clone") @@ -110,6 +721,12 @@ fn git_clone(repo: &str, dest: &Path) -> Result<(), RepoError> { Ok(()) } +#[derive(Debug, Deserialize)] +struct HostYaml { + host: String, + services: Vec, +} + fn git_fetch_revision(repo_path: &Path, revision: &str) -> Result<(), RepoError> { let output = Command::new("git") .arg("-C") diff --git a/src/main.rs b/src/main.rs index 3e8e4bc..06f9ed2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,11 +28,13 @@ fn run(cli: Cli) -> Result<(), CoreError> { let quadlet_dir = args.quadlet_dir; let audit_dir = args.audit_dir; set_systemd_unit_dir(&args.systemd_unit_dir); + set_host_override(&args.host); let deps = ReconcileDependencies { load_desired: &|| repo::load_desired_state(&repo_source, &rev).map_err(map_plan_error), - read_observed: &|| { - observed::read_observed_state(&quadlet_dir, None).map_err(map_plan_error) + read_observed: &|desired| { + observed::read_observed_state(&quadlet_dir, Some(desired), None) + .map_err(map_plan_error) }, apply_plan: &|_, _| Ok(()), }; @@ -55,6 +57,7 @@ fn run(cli: Cli) -> Result<(), CoreError> { let audit_dir = args.audit_dir; let no_reload = args.no_reload; set_systemd_unit_dir(&args.systemd_unit_dir); + set_host_override(&args.host); let (result, report, plan) = apply_cmd::apply_with_report(&repo_source, &rev, &quadlet_dir, !no_reload)?; @@ -66,12 +69,15 @@ fn run(cli: Cli) -> Result<(), CoreError> { ); audit_io::emit_journal_event(&event).map_err(map_apply_error)?; if let Some(dir) = audit_dir { - let record = core_ops::core::audit::build_audit_record( + let mut record = core_ops::core::audit::build_audit_record( &run.run_id, Vec::new(), &plan, result.verification_results, ); + record + .operator_messages + .push(core_ops::core::audit::summarize_evaluation(&result.desired)); let _ = audit_io::write_audit_record(&dir, &record).map_err(map_apply_error)?; } @@ -92,6 +98,12 @@ fn run(cli: Cli) -> Result<(), CoreError> { { std::env::set_var(SYSTEMD_UNIT_DIR_ENV, systemd_unit_dir); } + if let Some(host_override) = args + .host + .or_else(|| std::env::var("CORE_OPS_HOST").ok()) + { + std::env::set_var("CORE_OPS_HOST", host_override); + } let audit_dir = args .audit_dir .or_else(|| std::env::var_os("CORE_OPS_AUDIT_DIR").map(PathBuf::from)); @@ -159,3 +171,9 @@ fn set_systemd_unit_dir(value: &Option) { std::env::set_var(SYSTEMD_UNIT_DIR_ENV, dir); } } + +fn set_host_override(value: &Option) { + if let Some(host) = value { + std::env::set_var("CORE_OPS_HOST", host); + } +} diff --git a/tests/fixtures/layered_overrides/README.md b/tests/fixtures/layered_overrides/README.md new file mode 100644 index 0000000..a7e508d --- /dev/null +++ b/tests/fixtures/layered_overrides/README.md @@ -0,0 +1,5 @@ +# Layered Overrides Fixture + +This fixture mirrors the layered overrides repository layout used by the feature. +It contains base services under `services/` and host-specific selections plus +overrides under `hosts//`. diff --git a/tests/fixtures/layered_overrides/hosts/kadath/host.yaml b/tests/fixtures/layered_overrides/hosts/kadath/host.yaml new file mode 100644 index 0000000..95a5383 --- /dev/null +++ b/tests/fixtures/layered_overrides/hosts/kadath/host.yaml @@ -0,0 +1,4 @@ +host: kadath +services: + - traefik + - immich diff --git a/tests/fixtures/layered_overrides/hosts/kadath/overrides/immich.container.d/20-host.conf b/tests/fixtures/layered_overrides/hosts/kadath/overrides/immich.container.d/20-host.conf new file mode 100644 index 0000000..bcdc29f --- /dev/null +++ b/tests/fixtures/layered_overrides/hosts/kadath/overrides/immich.container.d/20-host.conf @@ -0,0 +1,2 @@ +[Container] +Environment=IMMICH_HOST=kadath diff --git a/tests/fixtures/layered_overrides/hosts/kadath/overrides/immich.volume.d/20-host.conf b/tests/fixtures/layered_overrides/hosts/kadath/overrides/immich.volume.d/20-host.conf new file mode 100644 index 0000000..28c7fc5 --- /dev/null +++ b/tests/fixtures/layered_overrides/hosts/kadath/overrides/immich.volume.d/20-host.conf @@ -0,0 +1,2 @@ +[Volume] +Label=host=kadath diff --git a/tests/fixtures/layered_overrides/hosts/kadath/overrides/traefik.container.d/20-host.conf b/tests/fixtures/layered_overrides/hosts/kadath/overrides/traefik.container.d/20-host.conf new file mode 100644 index 0000000..7fa91c8 --- /dev/null +++ b/tests/fixtures/layered_overrides/hosts/kadath/overrides/traefik.container.d/20-host.conf @@ -0,0 +1,2 @@ +[Container] +Environment=TRAEFIK_HOST=kadath diff --git a/tests/fixtures/layered_overrides/hosts/kadath/overrides/traefik.socket.d/20-host.conf b/tests/fixtures/layered_overrides/hosts/kadath/overrides/traefik.socket.d/20-host.conf new file mode 100644 index 0000000..94bf056 --- /dev/null +++ b/tests/fixtures/layered_overrides/hosts/kadath/overrides/traefik.socket.d/20-host.conf @@ -0,0 +1,2 @@ +[Socket] +ListenStream=127.0.0.1:8081 diff --git a/tests/fixtures/layered_overrides/hosts/rlyeh/host.yaml b/tests/fixtures/layered_overrides/hosts/rlyeh/host.yaml new file mode 100644 index 0000000..275b9d3 --- /dev/null +++ b/tests/fixtures/layered_overrides/hosts/rlyeh/host.yaml @@ -0,0 +1,4 @@ +host: rlyeh +services: + - traefik + - vector diff --git a/tests/fixtures/layered_overrides/hosts/rlyeh/overrides/traefik.container.d/20-host.conf b/tests/fixtures/layered_overrides/hosts/rlyeh/overrides/traefik.container.d/20-host.conf new file mode 100644 index 0000000..1433b35 --- /dev/null +++ b/tests/fixtures/layered_overrides/hosts/rlyeh/overrides/traefik.container.d/20-host.conf @@ -0,0 +1,2 @@ +[Container] +Environment=TRAEFIK_HOST=rlyeh diff --git a/tests/fixtures/layered_overrides/hosts/rlyeh/overrides/vector.container.d/20-host.conf b/tests/fixtures/layered_overrides/hosts/rlyeh/overrides/vector.container.d/20-host.conf new file mode 100644 index 0000000..8ea84c2 --- /dev/null +++ b/tests/fixtures/layered_overrides/hosts/rlyeh/overrides/vector.container.d/20-host.conf @@ -0,0 +1,2 @@ +[Container] +Environment=VECTOR_HOST=rlyeh diff --git a/tests/fixtures/layered_overrides/hosts/ulthar/host.yaml b/tests/fixtures/layered_overrides/hosts/ulthar/host.yaml new file mode 100644 index 0000000..016728b --- /dev/null +++ b/tests/fixtures/layered_overrides/hosts/ulthar/host.yaml @@ -0,0 +1,4 @@ +host: ulthar +services: + - traefik + - whoami diff --git a/tests/fixtures/layered_overrides/hosts/ulthar/overrides/traefik.socket.d/20-host.conf b/tests/fixtures/layered_overrides/hosts/ulthar/overrides/traefik.socket.d/20-host.conf new file mode 100644 index 0000000..b70688d --- /dev/null +++ b/tests/fixtures/layered_overrides/hosts/ulthar/overrides/traefik.socket.d/20-host.conf @@ -0,0 +1,2 @@ +[Socket] +ListenStream=127.0.0.1:8082 diff --git a/tests/fixtures/layered_overrides/hosts/ulthar/overrides/whoami.container.d/20-host.conf b/tests/fixtures/layered_overrides/hosts/ulthar/overrides/whoami.container.d/20-host.conf new file mode 100644 index 0000000..4c2dd70 --- /dev/null +++ b/tests/fixtures/layered_overrides/hosts/ulthar/overrides/whoami.container.d/20-host.conf @@ -0,0 +1,2 @@ +[Container] +Environment=WHOAMI_HOST=ulthar diff --git a/tests/fixtures/layered_overrides/services/immich/immich.container b/tests/fixtures/layered_overrides/services/immich/immich.container new file mode 100644 index 0000000..9f82f6c --- /dev/null +++ b/tests/fixtures/layered_overrides/services/immich/immich.container @@ -0,0 +1,3 @@ +[Container] +Image=example/immich:latest +ContainerName=immich diff --git a/tests/fixtures/layered_overrides/services/immich/immich.container.d/10-defaults.conf b/tests/fixtures/layered_overrides/services/immich/immich.container.d/10-defaults.conf new file mode 100644 index 0000000..ff7489a --- /dev/null +++ b/tests/fixtures/layered_overrides/services/immich/immich.container.d/10-defaults.conf @@ -0,0 +1,2 @@ +[Container] +Environment=IMMICH_LOG_LEVEL=info diff --git a/tests/fixtures/layered_overrides/services/immich/immich.volume b/tests/fixtures/layered_overrides/services/immich/immich.volume new file mode 100644 index 0000000..3c54469 --- /dev/null +++ b/tests/fixtures/layered_overrides/services/immich/immich.volume @@ -0,0 +1,2 @@ +[Volume] +VolumeName=immich-data diff --git a/tests/fixtures/layered_overrides/services/traefik/traefik.container b/tests/fixtures/layered_overrides/services/traefik/traefik.container new file mode 100644 index 0000000..9774ec4 --- /dev/null +++ b/tests/fixtures/layered_overrides/services/traefik/traefik.container @@ -0,0 +1,3 @@ +[Container] +Image=example/traefik:latest +ContainerName=traefik diff --git a/tests/fixtures/layered_overrides/services/traefik/traefik.container.d/10-defaults.conf b/tests/fixtures/layered_overrides/services/traefik/traefik.container.d/10-defaults.conf new file mode 100644 index 0000000..28ac4e8 --- /dev/null +++ b/tests/fixtures/layered_overrides/services/traefik/traefik.container.d/10-defaults.conf @@ -0,0 +1,2 @@ +[Container] +Environment=TRAEFIK_LOG_LEVEL=INFO diff --git a/tests/fixtures/layered_overrides/services/traefik/traefik.socket b/tests/fixtures/layered_overrides/services/traefik/traefik.socket new file mode 100644 index 0000000..5cb4119 --- /dev/null +++ b/tests/fixtures/layered_overrides/services/traefik/traefik.socket @@ -0,0 +1,3 @@ +[Socket] +ListenStream=127.0.0.1:8080 +Service=traefik.service diff --git a/tests/fixtures/layered_overrides/services/traefik/traefik.socket.d/10-defaults.conf b/tests/fixtures/layered_overrides/services/traefik/traefik.socket.d/10-defaults.conf new file mode 100644 index 0000000..f6fbda2 --- /dev/null +++ b/tests/fixtures/layered_overrides/services/traefik/traefik.socket.d/10-defaults.conf @@ -0,0 +1,2 @@ +[Socket] +NoDelay=true diff --git a/tests/fixtures/layered_overrides/services/vector/vector.container b/tests/fixtures/layered_overrides/services/vector/vector.container new file mode 100644 index 0000000..d94472b --- /dev/null +++ b/tests/fixtures/layered_overrides/services/vector/vector.container @@ -0,0 +1,3 @@ +[Container] +Image=example/vector:latest +ContainerName=vector diff --git a/tests/fixtures/layered_overrides/services/vector/vector.container.d/10-defaults.conf b/tests/fixtures/layered_overrides/services/vector/vector.container.d/10-defaults.conf new file mode 100644 index 0000000..966e290 --- /dev/null +++ b/tests/fixtures/layered_overrides/services/vector/vector.container.d/10-defaults.conf @@ -0,0 +1,2 @@ +[Container] +Environment=VECTOR_LOG_LEVEL=info diff --git a/tests/fixtures/layered_overrides/services/whoami/whoami.container b/tests/fixtures/layered_overrides/services/whoami/whoami.container new file mode 100644 index 0000000..fa42548 --- /dev/null +++ b/tests/fixtures/layered_overrides/services/whoami/whoami.container @@ -0,0 +1,3 @@ +[Container] +Image=example/whoami:latest +ContainerName=whoami diff --git a/tests/integration/mod.rs b/tests/integration/mod.rs index 3cf4c6e..89607fe 100644 --- a/tests/integration/mod.rs +++ b/tests/integration/mod.rs @@ -7,6 +7,9 @@ mod test_plan; mod test_apply_report; mod test_reconcile_apply; mod test_quickstart_validation; +mod test_service_selection; +mod test_host_overrides; +mod test_overlay_validation; mod test_idempotence; mod test_performance; mod test_repo_unavailable; @@ -21,3 +24,4 @@ mod test_validation_fail; mod test_systemd_units; mod test_verification; mod test_journald_audit; +mod test_socket_dropins; diff --git a/tests/integration/test_config_cleanup.rs b/tests/integration/test_config_cleanup.rs new file mode 100644 index 0000000..109da82 --- /dev/null +++ b/tests/integration/test_config_cleanup.rs @@ -0,0 +1,67 @@ +use std::path::PathBuf; +use std::time::{SystemTime, UNIX_EPOCH}; + +use core_ops::core::planner::plan; +use core_ops::core::types::{ + Boundaries, BoundaryScope, DesiredState, EnabledState, Invariant, QuadletType, RestartPolicy, + Workload, +}; +use core_ops::io::observed::read_observed_state; + +fn temp_dir(prefix: &str) -> PathBuf { + let mut path = std::env::temp_dir(); + let nanos = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time") + .as_nanos(); + path.push(format!("{prefix}_{nanos}")); + path +} + +fn config_workload(path: &str, contents: &str) -> Workload { + Workload { + name: path.to_string(), + quadlet_type: QuadletType::ConfigFile, + quadlet_contents: contents.to_string(), + systemd_unit_name: path.to_string(), + enabled_state: EnabledState::Enabled, + restart_policy: RestartPolicy::Always, + } +} + +#[test] +fn removes_stale_config_files_under_managed_root() { + let quadlet_dir = temp_dir("core_ops_config_cleanup_quadlets"); + std::fs::create_dir_all(&quadlet_dir).expect("create quadlet dir"); + + let root = temp_dir("core_ops_config_root").join("etc/service"); + std::fs::create_dir_all(&root).expect("create config root"); + + let keep_path = root.join("keep.conf"); + let stale_path = root.join("stale.conf"); + std::fs::write(&keep_path, "keep").expect("write keep"); + std::fs::write(&stale_path, "stale").expect("write stale"); + + let desired = DesiredState { + repository_ref: "repo".to_string(), + revision_id: "rev".to_string(), + workloads: vec![config_workload( + keep_path.to_string_lossy().as_ref(), + "keep", + )], + managed_config_paths: vec![keep_path.to_string_lossy().to_string()], + managed_config_roots: vec![root.to_string_lossy().to_string()], + invariants: vec![Invariant::BoundariesDeclared, Invariant::DeterministicPlan], + boundaries: Boundaries { + scopes: vec![BoundaryScope::QuadletSystemd], + }, + }; + + let observed = read_observed_state(&quadlet_dir, Some(&desired), Some("obs".to_string())) + .expect("observed"); + let plan = plan(&desired, &observed).expect("plan"); + + let targets: Vec<_> = plan.actions.iter().map(|a| a.target.as_str()).collect(); + assert!(targets.contains(&stale_path.to_string_lossy().as_ref())); + assert!(!targets.contains(&keep_path.to_string_lossy().as_ref())); +} diff --git a/tests/integration/test_config_roots.rs b/tests/integration/test_config_roots.rs new file mode 100644 index 0000000..d22ea43 --- /dev/null +++ b/tests/integration/test_config_roots.rs @@ -0,0 +1,67 @@ +use std::path::PathBuf; +use std::time::{SystemTime, UNIX_EPOCH}; + +use core_ops::core::planner::plan; +use core_ops::core::types::{ + Boundaries, BoundaryScope, DesiredState, EnabledState, Invariant, QuadletType, RestartPolicy, + Workload, +}; +use core_ops::io::observed::read_observed_state; + +fn temp_dir(prefix: &str) -> PathBuf { + let mut path = std::env::temp_dir(); + let nanos = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time") + .as_nanos(); + path.push(format!("{prefix}_{nanos}")); + path +} + +fn config_workload(path: &str, contents: &str) -> Workload { + Workload { + name: path.to_string(), + quadlet_type: QuadletType::ConfigFile, + quadlet_contents: contents.to_string(), + systemd_unit_name: path.to_string(), + enabled_state: EnabledState::Enabled, + restart_policy: RestartPolicy::Always, + } +} + +#[test] +fn does_not_manage_files_outside_config_roots() { + let quadlet_dir = temp_dir("core_ops_config_roots_quadlets"); + std::fs::create_dir_all(&quadlet_dir).expect("create quadlet dir"); + + let root = temp_dir("core_ops_config_root").join("etc/service"); + std::fs::create_dir_all(&root).expect("create config root"); + + let keep_path = root.join("keep.conf"); + std::fs::write(&keep_path, "keep").expect("write keep"); + + let outside_path = temp_dir("core_ops_unmanaged").join("unmanaged.conf"); + std::fs::write(&outside_path, "outside").expect("write outside"); + + let desired = DesiredState { + repository_ref: "repo".to_string(), + revision_id: "rev".to_string(), + workloads: vec![config_workload( + keep_path.to_string_lossy().as_ref(), + "keep", + )], + managed_config_paths: vec![keep_path.to_string_lossy().to_string()], + managed_config_roots: vec![root.to_string_lossy().to_string()], + invariants: vec![Invariant::BoundariesDeclared, Invariant::DeterministicPlan], + boundaries: Boundaries { + scopes: vec![BoundaryScope::QuadletSystemd], + }, + }; + + let observed = read_observed_state(&quadlet_dir, Some(&desired), Some("obs".to_string())) + .expect("observed"); + let plan = plan(&desired, &observed).expect("plan"); + + let targets: Vec<_> = plan.actions.iter().map(|a| a.target.as_str()).collect(); + assert!(!targets.contains(&outside_path.to_string_lossy().as_ref())); +} diff --git a/tests/integration/test_host_overrides.rs b/tests/integration/test_host_overrides.rs new file mode 100644 index 0000000..66c703a --- /dev/null +++ b/tests/integration/test_host_overrides.rs @@ -0,0 +1,123 @@ +use std::path::{Path, PathBuf}; +use std::time::{SystemTime, UNIX_EPOCH}; + +use core_ops::io::repo::load_desired_state; + +fn temp_repo() -> PathBuf { + let mut path = std::env::temp_dir(); + let nanos = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time") + .as_nanos(); + path.push(format!("core_ops_layered_overrides_{}", nanos)); + path +} + +fn copy_dir_all(src: &Path, dst: &Path) { + std::fs::create_dir_all(dst).expect("create dir"); + for entry in std::fs::read_dir(src).expect("read dir") { + let entry = entry.expect("entry"); + let path = entry.path(); + let dest = dst.join(entry.file_name()); + if path.is_dir() { + copy_dir_all(&path, &dest); + } else { + std::fs::copy(&path, &dest).expect("copy file"); + } + } +} + +fn init_layered_repo(repo: &PathBuf) -> String { + std::process::Command::new("git") + .arg("init") + .arg(repo) + .output() + .expect("git init"); + + let fixtures = PathBuf::from("tests/fixtures/layered_overrides"); + copy_dir_all(&fixtures.join("services"), &repo.join("services")); + copy_dir_all(&fixtures.join("hosts"), &repo.join("hosts")); + + std::process::Command::new("git") + .arg("-C") + .arg(repo) + .arg("add") + .arg(".") + .output() + .expect("git add"); + std::process::Command::new("git") + .arg("-C") + .arg(repo) + .arg("commit") + .arg("-m") + .arg("fixture") + .env("GIT_AUTHOR_NAME", "fixture") + .env("GIT_AUTHOR_EMAIL", "fixture@example.com") + .env("GIT_COMMITTER_NAME", "fixture") + .env("GIT_COMMITTER_EMAIL", "fixture@example.com") + .output() + .expect("git commit"); + + let output = std::process::Command::new("git") + .arg("-C") + .arg(repo) + .arg("rev-parse") + .arg("HEAD") + .output() + .expect("git rev-parse"); + + String::from_utf8_lossy(&output.stdout).trim().to_string() +} + +#[test] +fn applies_host_overrides_after_base_dropins() { + let repo = temp_repo(); + let rev = init_layered_repo(&repo); + + std::env::set_var("CORE_OPS_HOST", "kadath"); + let desired = load_desired_state(repo.to_str().unwrap(), &rev).expect("load desired"); + + let traefik = desired + .workloads + .iter() + .find(|w| w.systemd_unit_name == "traefik.container") + .expect("traefik container"); + + assert!(traefik.quadlet_contents.contains("TRAEFIK_LOG_LEVEL=INFO")); + assert!(traefik.quadlet_contents.contains("TRAEFIK_HOST=kadath")); + + let base_pos = traefik + .quadlet_contents + .find("TRAEFIK_LOG_LEVEL=INFO") + .expect("base drop-in"); + let host_pos = traefik + .quadlet_contents + .find("TRAEFIK_HOST=kadath") + .expect("host drop-in"); + assert!(base_pos < host_pos); + + let traefik_socket = desired + .workloads + .iter() + .find(|w| w.systemd_unit_name == "traefik.socket") + .expect("traefik socket"); + + assert!(traefik_socket.quadlet_contents.contains("ListenStream=127.0.0.1:8080")); + assert!(!traefik_socket.quadlet_contents.contains("ListenStream=127.0.0.1:8081")); + + let socket_base_dropin = desired + .workloads + .iter() + .find(|w| w.systemd_unit_name == "traefik.socket.d/10-defaults.conf") + .expect("socket base drop-in"); + assert!(socket_base_dropin.quadlet_contents.contains("NoDelay=true")); + + let socket_host_dropin = desired + .workloads + .iter() + .find(|w| w.systemd_unit_name == "traefik.socket.d/20-host.conf") + .expect("socket host drop-in"); + assert!(socket_host_dropin.quadlet_contents.contains("ListenStream=127.0.0.1:8081")); + + std::env::remove_var("CORE_OPS_HOST"); +} diff --git a/tests/integration/test_idempotence.rs b/tests/integration/test_idempotence.rs index a19b3b3..6b5048a 100644 --- a/tests/integration/test_idempotence.rs +++ b/tests/integration/test_idempotence.rs @@ -109,8 +109,8 @@ fn repeated_runs_remain_converged() { let deps = ReconcileDependencies { load_desired: &|| load_desired_state(repo.to_str().unwrap(), &rev).map_err(map_io_error), - read_observed: &|| { - read_observed_state(&host_quadlets, Some("obs".to_string())).map_err(map_io_error) + read_observed: &|desired| { + read_observed_state(&host_quadlets, Some(desired), Some("obs".to_string())).map_err(map_io_error) }, apply_plan: &|plan, desired| { apply_plan(plan, &desired.workloads, &host_quadlets, true) diff --git a/tests/integration/test_ordering.rs b/tests/integration/test_ordering.rs index 87faca2..5847c66 100644 --- a/tests/integration/test_ordering.rs +++ b/tests/integration/test_ordering.rs @@ -74,8 +74,8 @@ fn plan_orders_volume_before_container_before_socket() { let deps = ReconcileDependencies { load_desired: &|| load_desired_state(repo.to_str().unwrap(), &rev).map_err(map_io_error), - read_observed: &|| { - read_observed_state(&host_quadlets, Some("obs".to_string())).map_err(map_io_error) + read_observed: &|desired| { + read_observed_state(&host_quadlets, Some(desired), Some("obs".to_string())).map_err(map_io_error) }, apply_plan: &|plan, desired| { apply_plan(plan, &desired.workloads, &host_quadlets, false) diff --git a/tests/integration/test_overlay_validation.rs b/tests/integration/test_overlay_validation.rs new file mode 100644 index 0000000..e9f345f --- /dev/null +++ b/tests/integration/test_overlay_validation.rs @@ -0,0 +1,92 @@ +use std::path::{Path, PathBuf}; +use std::time::{SystemTime, UNIX_EPOCH}; + +use core_ops::io::repo::load_desired_state; + +fn temp_repo() -> PathBuf { + let mut path = std::env::temp_dir(); + let nanos = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time") + .as_nanos(); + path.push(format!("core_ops_overlay_invalid_{}", nanos)); + path +} + +fn copy_dir_all(src: &Path, dst: &Path) { + std::fs::create_dir_all(dst).expect("create dir"); + for entry in std::fs::read_dir(src).expect("read dir") { + let entry = entry.expect("entry"); + let path = entry.path(); + let dest = dst.join(entry.file_name()); + if path.is_dir() { + copy_dir_all(&path, &dest); + } else { + std::fs::copy(&path, &dest).expect("copy file"); + } + } +} + +fn init_layered_repo(repo: &PathBuf) -> String { + std::process::Command::new("git") + .arg("init") + .arg(repo) + .output() + .expect("git init"); + + let fixtures = PathBuf::from("tests/fixtures/layered_overrides"); + copy_dir_all(&fixtures.join("services"), &repo.join("services")); + copy_dir_all(&fixtures.join("hosts"), &repo.join("hosts")); + + let invalid_dir = repo + .join("hosts") + .join("kadath") + .join("overrides") + .join("missing.container.d"); + std::fs::create_dir_all(&invalid_dir).expect("invalid dir"); + std::fs::write(invalid_dir.join("10-invalid.conf"), "[Container]\nImage=bad") + .expect("write invalid drop-in"); + + std::process::Command::new("git") + .arg("-C") + .arg(repo) + .arg("add") + .arg(".") + .output() + .expect("git add"); + std::process::Command::new("git") + .arg("-C") + .arg(repo) + .arg("commit") + .arg("-m") + .arg("fixture") + .env("GIT_AUTHOR_NAME", "fixture") + .env("GIT_AUTHOR_EMAIL", "fixture@example.com") + .env("GIT_COMMITTER_NAME", "fixture") + .env("GIT_COMMITTER_EMAIL", "fixture@example.com") + .output() + .expect("git commit"); + + let output = std::process::Command::new("git") + .arg("-C") + .arg(repo) + .arg("rev-parse") + .arg("HEAD") + .output() + .expect("git rev-parse"); + + String::from_utf8_lossy(&output.stdout).trim().to_string() +} + +#[test] +fn fails_on_dropin_target_missing() { + let repo = temp_repo(); + let rev = init_layered_repo(&repo); + + std::env::set_var("CORE_OPS_HOST", "kadath"); + let err = load_desired_state(repo.to_str().unwrap(), &rev).expect_err("should fail"); + + assert!(err.to_string().contains("drop-in target does not exist")); + + std::env::remove_var("CORE_OPS_HOST"); +} diff --git a/tests/integration/test_performance.rs b/tests/integration/test_performance.rs index 9513e42..43fe99c 100644 --- a/tests/integration/test_performance.rs +++ b/tests/integration/test_performance.rs @@ -113,8 +113,8 @@ fn reconcile_apply_completes_under_budget() { let deps = ReconcileDependencies { load_desired: &|| load_desired_state(repo.to_str().unwrap(), &rev).map_err(map_io_error), - read_observed: &|| { - read_observed_state(&host_quadlets, Some("obs".to_string())).map_err(map_io_error) + read_observed: &|desired| { + read_observed_state(&host_quadlets, Some(desired), Some("obs".to_string())).map_err(map_io_error) }, apply_plan: &|plan, desired| { apply_plan(plan, &desired.workloads, &host_quadlets, true) diff --git a/tests/integration/test_plan.rs b/tests/integration/test_plan.rs index 6db166b..1688509 100644 --- a/tests/integration/test_plan.rs +++ b/tests/integration/test_plan.rs @@ -71,8 +71,10 @@ fn plan_does_not_apply_changes() { let deps = ReconcileDependencies { load_desired: &|| load_desired_state(repo.to_str().unwrap(), &rev).map_err(map_io_error), - read_observed: &|| read_observed_state(&host_quadlets, Some("obs".to_string())) - .map_err(map_io_error), + read_observed: &|desired| { + read_observed_state(&host_quadlets, Some(desired), Some("obs".to_string())) + .map_err(map_io_error) + }, apply_plan: &|plan, desired| { apply_plan(plan, &desired.workloads, &host_quadlets, false) .map(|_| ()) diff --git a/tests/integration/test_quadlet_artifacts.rs b/tests/integration/test_quadlet_artifacts.rs index 0cb60b2..c5ad855 100644 --- a/tests/integration/test_quadlet_artifacts.rs +++ b/tests/integration/test_quadlet_artifacts.rs @@ -117,8 +117,8 @@ fn reconcile_apply_supports_socket_and_volume_quadlets() { let deps = ReconcileDependencies { load_desired: &|| load_desired_state(repo.to_str().unwrap(), &rev).map_err(map_io_error), - read_observed: &|| { - read_observed_state(&host_quadlets, Some("obs".to_string())).map_err(map_io_error) + read_observed: &|desired| { + read_observed_state(&host_quadlets, Some(desired), Some("obs".to_string())).map_err(map_io_error) }, apply_plan: &|plan, desired| { apply_plan(plan, &desired.workloads, &host_quadlets, true) diff --git a/tests/integration/test_quickstart_validation.rs b/tests/integration/test_quickstart_validation.rs index 94eb2a1..8dd9739 100644 --- a/tests/integration/test_quickstart_validation.rs +++ b/tests/integration/test_quickstart_validation.rs @@ -12,3 +12,15 @@ fn quickstart_mentions_systemd_units_and_env() { assert!(contents.contains("CORE_OPS_REPO")); assert!(contents.contains("CORE_OPS_REV")); } + +#[test] +fn quickstart_mentions_layered_overrides_flow() { + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + let quickstart = root.join("specs/003-layered-overrides/quickstart.md"); + let contents = fs::read_to_string(&quickstart).expect("read quickstart"); + + assert!(contents.contains("services/")); + assert!(contents.contains("hosts/")); + assert!(contents.contains("CORE_OPS_HOST")); + assert!(contents.contains("Evaluation Flow")); +} diff --git a/tests/integration/test_reboot_recovery.rs b/tests/integration/test_reboot_recovery.rs index 5c01cf2..b91944e 100644 --- a/tests/integration/test_reboot_recovery.rs +++ b/tests/integration/test_reboot_recovery.rs @@ -109,8 +109,8 @@ fn reconcile_recovers_after_reboot() { let deps = ReconcileDependencies { load_desired: &|| load_desired_state(repo.to_str().unwrap(), &rev).map_err(map_io_error), - read_observed: &|| { - read_observed_state(&host_quadlets, None).map_err(map_io_error) + read_observed: &|desired| { + read_observed_state(&host_quadlets, Some(desired), None).map_err(map_io_error) }, apply_plan: &|plan, desired| { apply_plan(plan, &desired.workloads, &host_quadlets, true) diff --git a/tests/integration/test_reconcile_apply.rs b/tests/integration/test_reconcile_apply.rs index ea8cedf..864cc5f 100644 --- a/tests/integration/test_reconcile_apply.rs +++ b/tests/integration/test_reconcile_apply.rs @@ -126,8 +126,10 @@ fn reconcile_apply_converges_to_desired_state() { let deps = ReconcileDependencies { load_desired: &|| load_desired_state(repo.to_str().unwrap(), &rev).map_err(map_io_error), - read_observed: &|| read_observed_state(&host_quadlets, Some("obs".to_string())) - .map_err(map_io_error), + read_observed: &|desired| { + read_observed_state(&host_quadlets, Some(desired), Some("obs".to_string())) + .map_err(map_io_error) + }, apply_plan: &|plan, desired| { apply_plan(plan, &desired.workloads, &host_quadlets, false) .map(|_| ()) diff --git a/tests/integration/test_service_selection.rs b/tests/integration/test_service_selection.rs new file mode 100644 index 0000000..d39b8be --- /dev/null +++ b/tests/integration/test_service_selection.rs @@ -0,0 +1,107 @@ +use std::path::{Path, PathBuf}; +use std::time::{SystemTime, UNIX_EPOCH}; + +use core_ops::io::repo::load_desired_state; + +fn temp_repo() -> PathBuf { + let mut path = std::env::temp_dir(); + let nanos = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time") + .as_nanos(); + path.push(format!("core_ops_layered_{}", nanos)); + path +} + +fn copy_dir_all(src: &Path, dst: &Path) { + std::fs::create_dir_all(dst).expect("create dir"); + for entry in std::fs::read_dir(src).expect("read dir") { + let entry = entry.expect("entry"); + let path = entry.path(); + let dest = dst.join(entry.file_name()); + if path.is_dir() { + copy_dir_all(&path, &dest); + } else { + std::fs::copy(&path, &dest).expect("copy file"); + } + } +} + +fn init_layered_repo(repo: &PathBuf) -> String { + std::process::Command::new("git") + .arg("init") + .arg(repo) + .output() + .expect("git init"); + + let fixtures = PathBuf::from("tests/fixtures/layered_overrides"); + copy_dir_all(&fixtures.join("services"), &repo.join("services")); + copy_dir_all(&fixtures.join("hosts"), &repo.join("hosts")); + + std::process::Command::new("git") + .arg("-C") + .arg(repo) + .arg("add") + .arg(".") + .output() + .expect("git add"); + std::process::Command::new("git") + .arg("-C") + .arg(repo) + .arg("commit") + .arg("-m") + .arg("fixture") + .env("GIT_AUTHOR_NAME", "fixture") + .env("GIT_AUTHOR_EMAIL", "fixture@example.com") + .env("GIT_COMMITTER_NAME", "fixture") + .env("GIT_COMMITTER_EMAIL", "fixture@example.com") + .output() + .expect("git commit"); + + let output = std::process::Command::new("git") + .arg("-C") + .arg(repo) + .arg("rev-parse") + .arg("HEAD") + .output() + .expect("git rev-parse"); + + String::from_utf8_lossy(&output.stdout).trim().to_string() +} + +#[test] +fn selects_services_per_host() { + let repo = temp_repo(); + let rev = init_layered_repo(&repo); + + std::env::set_var("CORE_OPS_HOST", "kadath"); + let desired = load_desired_state(repo.to_str().unwrap(), &rev).expect("load desired"); + let names: Vec<_> = desired + .workloads + .iter() + .map(|w| w.systemd_unit_name.as_str()) + .collect(); + + assert!(names.contains(&"traefik.container")); + assert!(names.contains(&"traefik.socket")); + assert!(names.contains(&"immich.container")); + assert!(names.contains(&"immich.volume")); + assert!(!names.contains(&"vector.container")); + assert!(!names.contains(&"whoami.container")); + + std::env::set_var("CORE_OPS_HOST", "rlyeh"); + let desired = load_desired_state(repo.to_str().unwrap(), &rev).expect("load desired"); + let names: Vec<_> = desired + .workloads + .iter() + .map(|w| w.systemd_unit_name.as_str()) + .collect(); + + assert!(names.contains(&"traefik.container")); + assert!(names.contains(&"traefik.socket")); + assert!(names.contains(&"vector.container")); + assert!(!names.contains(&"immich.container")); + assert!(!names.contains(&"immich.volume")); + + std::env::remove_var("CORE_OPS_HOST"); +} diff --git a/tests/integration/test_socket_dropins.rs b/tests/integration/test_socket_dropins.rs new file mode 100644 index 0000000..2bee9a6 --- /dev/null +++ b/tests/integration/test_socket_dropins.rs @@ -0,0 +1,91 @@ +use std::path::PathBuf; +use std::time::{SystemTime, UNIX_EPOCH}; + +use core_ops::core::types::{ + Boundaries, BoundaryScope, DesiredState, EnabledState, Invariant, QuadletType, RestartPolicy, + Workload, +}; +use core_ops::io::observed::read_observed_state; +use crate::integration::env_lock::path_lock; + +fn temp_dir(prefix: &str) -> PathBuf { + let mut path = std::env::temp_dir(); + let nanos = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time") + .as_nanos(); + path.push(format!("{prefix}_{nanos}")); + path +} + +fn socket_dropin_workload(name: &str, contents: &str) -> Workload { + Workload { + name: name.to_string(), + quadlet_type: QuadletType::SocketDropIn, + quadlet_contents: contents.to_string(), + systemd_unit_name: name.to_string(), + enabled_state: EnabledState::Enabled, + restart_policy: RestartPolicy::Always, + } +} + +#[test] +fn observed_state_ignores_unmanaged_socket_dropins() { + let _lock = path_lock().lock().expect("path lock"); + let quadlet_dir = temp_dir("core_ops_socket_dropins_quadlets"); + std::fs::create_dir_all(&quadlet_dir).expect("create quadlet dir"); + + let systemd_dir = temp_dir("core_ops_socket_dropins_systemd"); + std::fs::create_dir_all(&systemd_dir).expect("create systemd dir"); + std::env::set_var("CORE_OPS_SYSTEMD_UNIT_DIR", &systemd_dir); + let _guard = EnvGuard { + key: "CORE_OPS_SYSTEMD_UNIT_DIR", + }; + + let socket_path = systemd_dir.join("alpha.socket"); + let socket_contents = "# managed-by: core-ops\n[Socket]\nListenStream=127.0.0.1:8080\n"; + std::fs::write(&socket_path, socket_contents).expect("write socket"); + + let dropin_dir = systemd_dir.join("alpha.socket.d"); + std::fs::create_dir_all(&dropin_dir).expect("create dropin dir"); + let known_path = dropin_dir.join("10-known.conf"); + let unknown_path = dropin_dir.join("20-unknown.conf"); + std::fs::write(&known_path, "Known=1").expect("write known dropin"); + std::fs::write(&unknown_path, "Unknown=1").expect("write unknown dropin"); + + let desired = DesiredState { + repository_ref: "repo".to_string(), + revision_id: "rev".to_string(), + workloads: vec![socket_dropin_workload( + "alpha.socket.d/10-known.conf", + "Known=1", + )], + managed_config_paths: Vec::new(), + managed_config_roots: Vec::new(), + invariants: vec![Invariant::BoundariesDeclared, Invariant::DeterministicPlan], + boundaries: Boundaries { + scopes: vec![BoundaryScope::QuadletSystemd], + }, + }; + + let observed = read_observed_state(&quadlet_dir, Some(&desired), Some("obs".to_string())) + .expect("observed"); + + let names: Vec<_> = observed + .workloads + .iter() + .map(|w| w.systemd_unit_name.as_str()) + .collect(); + assert!(names.contains(&"alpha.socket.d/10-known.conf")); + assert!(!names.contains(&"alpha.socket.d/20-unknown.conf")); +} + +struct EnvGuard { + key: &'static str, +} + +impl Drop for EnvGuard { + fn drop(&mut self) { + std::env::remove_var(self.key); + } +} diff --git a/tests/integration/test_unit_lifecycle.rs b/tests/integration/test_unit_lifecycle.rs index 3f4d7a1..a619267 100644 --- a/tests/integration/test_unit_lifecycle.rs +++ b/tests/integration/test_unit_lifecycle.rs @@ -85,6 +85,65 @@ fn apply_executes_unit_lifecycle_actions() { assert!(log_contents.contains("stop alpha.service")); } +#[test] +fn apply_preserves_start_restart_order() { + let _lock = path_lock().lock().expect("path lock"); + let temp = temp_dir("core_ops_order"); + fs::create_dir_all(&temp).expect("temp dir"); + + let log_path = temp.join("systemctl.log"); + write_systemctl_stub(&temp, &log_path); + + let old_path = std::env::var("PATH").unwrap_or_default(); + let new_path = format!("{}:{}", temp.display(), old_path); + std::env::set_var("PATH", new_path); + let _guard = PathGuard { previous: old_path }; + + let quadlet_dir = temp.join("quadlets"); + fs::create_dir_all(&quadlet_dir).expect("quadlet dir"); + + let dep = Workload { + name: "dep".to_string(), + quadlet_type: core_ops::core::types::QuadletType::Container, + quadlet_contents: "[Container]\nImage=alpine".to_string(), + systemd_unit_name: "dep.container".to_string(), + enabled_state: EnabledState::Enabled, + restart_policy: RestartPolicy::Always, + }; + let svc = Workload { + name: "svc".to_string(), + quadlet_type: core_ops::core::types::QuadletType::Container, + quadlet_contents: "[Container]\nImage=alpine".to_string(), + systemd_unit_name: "svc.container".to_string(), + enabled_state: EnabledState::Enabled, + restart_policy: RestartPolicy::Always, + }; + + let plan = ReconciliationPlan { + plan_id: "plan:test".to_string(), + desired_revision_id: "rev".to_string(), + observed_revision_id: None, + actions: vec![ + action(PlanActionType::WriteQuadlet, "dep.container"), + action(PlanActionType::WriteQuadlet, "svc.container"), + action(PlanActionType::ReloadSystemd, "dep.container"), + action(PlanActionType::ReloadSystemd, "svc.container"), + action(PlanActionType::StartUnit, "dep.container"), + action(PlanActionType::RestartUnit, "svc.container"), + ], + safety_checks: Vec::new(), + expected_outcomes: Vec::new(), + }; + + let result = apply_plan(&plan, &[dep, svc], &quadlet_dir, true); + assert!(result.is_ok()); + + let log_contents = fs::read_to_string(&log_path).expect("read log"); + let start_idx = log_contents.find("start dep.service").expect("start"); + let restart_idx = log_contents.find("restart svc.service").expect("restart"); + assert!(start_idx < restart_idx); +} + fn action(action_type: PlanActionType, target: &str) -> PlanAction { PlanAction { action_type, diff --git a/tests/integration/test_validation_fail.rs b/tests/integration/test_validation_fail.rs index 83350d4..1e52fca 100644 --- a/tests/integration/test_validation_fail.rs +++ b/tests/integration/test_validation_fail.rs @@ -9,6 +9,8 @@ fn validation_failure_is_reported_as_validation_class() { repository_ref: "fixture".to_string(), revision_id: "rev".to_string(), workloads: Vec::new(), + managed_config_paths: Vec::new(), + managed_config_roots: Vec::new(), invariants: vec![Invariant::DeterministicPlan], boundaries: Boundaries { scopes: Vec::new() }, }; @@ -23,7 +25,7 @@ fn validation_failure_is_reported_as_validation_class() { let deps = ReconcileDependencies { load_desired: &|| Ok(desired.clone()), - read_observed: &|| Ok(observed.clone()), + read_observed: &|_desired| Ok(observed.clone()), apply_plan: &|_, _| Ok(()), }; diff --git a/tests/integration/test_verification.rs b/tests/integration/test_verification.rs index fd1c348..408b115 100644 --- a/tests/integration/test_verification.rs +++ b/tests/integration/test_verification.rs @@ -109,8 +109,8 @@ fn reconcile_apply_reports_verification_failure() { let deps = ReconcileDependencies { load_desired: &|| load_desired_state(repo.to_str().unwrap(), &rev).map_err(map_io_error), - read_observed: &|| { - read_observed_state(&host_quadlets, Some("obs".to_string())).map_err(map_io_error) + read_observed: &|desired| { + read_observed_state(&host_quadlets, Some(desired), Some("obs".to_string())).map_err(map_io_error) }, apply_plan: &|plan, desired| { apply_plan(plan, &desired.workloads, &host_quadlets, true) diff --git a/tests/integration/test_verification_rules.rs b/tests/integration/test_verification_rules.rs index 5b52471..99ad804 100644 --- a/tests/integration/test_verification_rules.rs +++ b/tests/integration/test_verification_rules.rs @@ -123,8 +123,8 @@ fn verification_rules_accept_volume_inactive() { let deps = ReconcileDependencies { load_desired: &|| load_desired_state(repo.to_str().unwrap(), &rev).map_err(map_io_error), - read_observed: &|| { - read_observed_state(&host_quadlets, Some("obs".to_string())).map_err(map_io_error) + read_observed: &|desired| { + read_observed_state(&host_quadlets, Some(desired), Some("obs".to_string())).map_err(map_io_error) }, apply_plan: &|plan, desired| { apply_plan(plan, &desired.workloads, &host_quadlets, true) diff --git a/tests/unit/mod.rs b/tests/unit/mod.rs index 56e8c52..4a50340 100644 --- a/tests/unit/mod.rs +++ b/tests/unit/mod.rs @@ -1,6 +1,9 @@ mod test_audit; mod test_invariants; mod test_planner; +mod test_repo_selection; +mod test_dropin_order; +mod test_evaluation_determinism; mod test_types; mod test_verification; mod test_validation; diff --git a/tests/unit/test_audit.rs b/tests/unit/test_audit.rs index 5543b78..7c97cd0 100644 --- a/tests/unit/test_audit.rs +++ b/tests/unit/test_audit.rs @@ -17,6 +17,8 @@ fn desired_state() -> DesiredState { enabled_state: EnabledState::Enabled, restart_policy: RestartPolicy::Always, }], + managed_config_paths: Vec::new(), + managed_config_roots: Vec::new(), invariants: vec![Invariant::BoundariesDeclared, Invariant::DeterministicPlan], boundaries: Boundaries { scopes: vec![BoundaryScope::QuadletSystemd], diff --git a/tests/unit/test_dropin_order.rs b/tests/unit/test_dropin_order.rs new file mode 100644 index 0000000..7aeeaf7 --- /dev/null +++ b/tests/unit/test_dropin_order.rs @@ -0,0 +1,66 @@ +use core_ops::core::evaluate::evaluate_desired_state; +use core_ops::core::types::{ + ArtifactSource, DropInSource, EvaluationInput, HostDeclaration, HostOverlaySet, QuadletType, + ServiceCatalog, ServiceDefinition, +}; +use std::collections::BTreeMap; + +#[test] +fn applies_dropins_in_lexicographic_order_with_host_overrides_last() { + let service = ServiceDefinition { + name: "alpha".to_string(), + artifacts: vec![ArtifactSource { + name: "alpha.container".to_string(), + quadlet_type: QuadletType::Container, + contents: "BASE".to_string(), + source_path: "/services/alpha/alpha.container".to_string(), + }], + base_dropins: vec![ + DropInSource { + target: "alpha.container".to_string(), + contents: "B".to_string(), + source_path: "/services/alpha/alpha.container.d/20-b.conf".to_string(), + }, + DropInSource { + target: "alpha.container".to_string(), + contents: "A".to_string(), + source_path: "/services/alpha/alpha.container.d/10-a.conf".to_string(), + }, + ], + config_files: Vec::new(), + }; + + let mut services = BTreeMap::new(); + services.insert("alpha".to_string(), service); + + let input = EvaluationInput { + host: HostDeclaration { + host: "kadath".to_string(), + services: vec!["alpha".to_string()], + }, + catalog: ServiceCatalog { services }, + overlays: HostOverlaySet { + host: "kadath".to_string(), + overrides: vec![DropInSource { + target: "alpha.container".to_string(), + contents: "HOST".to_string(), + source_path: "/hosts/kadath/overrides/alpha.container.d/20-host.conf".to_string(), + }], + config_overrides: Vec::new(), + }, + }; + + let output = evaluate_desired_state(&input).expect("evaluate"); + let artifact = &output.artifacts[0]; + + assert_eq!(artifact.contents, "BASE\nA\nB\nHOST"); + assert_eq!( + artifact.source_layers, + vec![ + "/services/alpha/alpha.container", + "/services/alpha/alpha.container.d/10-a.conf", + "/services/alpha/alpha.container.d/20-b.conf", + "/hosts/kadath/overrides/alpha.container.d/20-host.conf", + ] + ); +} diff --git a/tests/unit/test_evaluation_determinism.rs b/tests/unit/test_evaluation_determinism.rs new file mode 100644 index 0000000..687c14a --- /dev/null +++ b/tests/unit/test_evaluation_determinism.rs @@ -0,0 +1,60 @@ +use core_ops::core::evaluate::evaluate_desired_state; +use core_ops::core::types::{ + ArtifactSource, DropInSource, EvaluationInput, HostDeclaration, HostOverlaySet, QuadletType, + ServiceCatalog, ServiceDefinition, +}; +use std::collections::BTreeMap; + +#[test] +fn evaluation_is_deterministic_for_same_input() { + let service = ServiceDefinition { + name: "alpha".to_string(), + artifacts: vec![ + ArtifactSource { + name: "beta.container".to_string(), + quadlet_type: QuadletType::Container, + contents: "B".to_string(), + source_path: "/services/alpha/beta.container".to_string(), + }, + ArtifactSource { + name: "alpha.container".to_string(), + quadlet_type: QuadletType::Container, + contents: "A".to_string(), + source_path: "/services/alpha/alpha.container".to_string(), + }, + ], + base_dropins: vec![DropInSource { + target: "alpha.container".to_string(), + contents: "X".to_string(), + source_path: "/services/alpha/alpha.container.d/10-x.conf".to_string(), + }], + config_files: Vec::new(), + }; + + let mut services = BTreeMap::new(); + services.insert("alpha".to_string(), service); + + let input = EvaluationInput { + host: HostDeclaration { + host: "ulthar".to_string(), + services: vec!["alpha".to_string()], + }, + catalog: ServiceCatalog { services }, + overlays: HostOverlaySet { + host: "ulthar".to_string(), + overrides: vec![DropInSource { + target: "alpha.container".to_string(), + contents: "Y".to_string(), + source_path: "/hosts/ulthar/overrides/alpha.container.d/20-y.conf".to_string(), + }], + config_overrides: Vec::new(), + }, + }; + + let first = evaluate_desired_state(&input).expect("evaluate"); + let second = evaluate_desired_state(&input).expect("evaluate"); + + assert_eq!(first, second); + assert_eq!(first.artifacts[0].name, "alpha.container"); + assert_eq!(first.artifacts[1].name, "beta.container"); +} diff --git a/tests/unit/test_invariants.rs b/tests/unit/test_invariants.rs index 259db46..ba77e1b 100644 --- a/tests/unit/test_invariants.rs +++ b/tests/unit/test_invariants.rs @@ -9,6 +9,8 @@ fn idempotent_apply_invariant_is_allowed() { repository_ref: "fixture".to_string(), revision_id: "rev".to_string(), workloads: Vec::new(), + managed_config_paths: Vec::new(), + managed_config_roots: Vec::new(), invariants: vec![ Invariant::BoundariesDeclared, Invariant::DeterministicPlan, diff --git a/tests/unit/test_planner.rs b/tests/unit/test_planner.rs index 5dc9e10..76a42c6 100644 --- a/tests/unit/test_planner.rs +++ b/tests/unit/test_planner.rs @@ -12,6 +12,8 @@ fn workload_with_type(name: &str, quadlet_type: QuadletType) -> Workload { let extension = match quadlet_type { QuadletType::Container => "container", QuadletType::Socket => "socket", + QuadletType::SocketDropIn => "socket-dropin", + QuadletType::ConfigFile => "config", QuadletType::Volume => "volume", QuadletType::Pod => "pod", QuadletType::Network => "network", @@ -31,6 +33,8 @@ fn desired_state(workloads: Vec) -> DesiredState { repository_ref: "repo".to_string(), revision_id: "rev".to_string(), workloads, + managed_config_paths: Vec::new(), + managed_config_roots: Vec::new(), invariants: vec![Invariant::BoundariesDeclared, Invariant::DeterministicPlan], boundaries: Boundaries { scopes: vec![BoundaryScope::QuadletSystemd], @@ -48,6 +52,17 @@ fn observed_state(workloads: Vec) -> ObservedState { } } +fn socket_dropin_workload(name: &str) -> Workload { + Workload { + name: name.to_string(), + quadlet_type: QuadletType::SocketDropIn, + quadlet_contents: "[Socket]".to_string(), + systemd_unit_name: name.to_string(), + enabled_state: EnabledState::Enabled, + restart_policy: RestartPolicy::Always, + } +} + #[test] fn plan_is_deterministic_by_name_order() { let desired = desired_state(vec![workload("beta"), workload("alpha")]); @@ -81,6 +96,7 @@ fn plan_orders_actions_by_quadlet_type() { let desired = desired_state(vec![ workload_with_type("socket", QuadletType::Socket), workload_with_type("container", QuadletType::Container), + workload_with_type("network", QuadletType::Network), workload_with_type("volume", QuadletType::Volume), ]); let observed = observed_state(Vec::new()); @@ -97,6 +113,11 @@ fn plan_orders_actions_by_quadlet_type() { "container.container".to_string(), "container.container".to_string(), ]; + let network_prefix = vec![ + "network.network".to_string(), + "network.network".to_string(), + "network.network".to_string(), + ]; let socket_prefix = vec![ "socket.socket".to_string(), "socket.socket".to_string(), @@ -104,6 +125,35 @@ fn plan_orders_actions_by_quadlet_type() { ]; assert_eq!(&targets[..2], &volume_prefix[..]); - assert_eq!(&targets[2..5], &container_prefix[..]); - assert_eq!(&targets[5..8], &socket_prefix[..]); + assert_eq!(&targets[2..5], &network_prefix[..]); + assert_eq!(&targets[5..8], &container_prefix[..]); + assert_eq!(&targets[8..11], &socket_prefix[..]); +} + +#[test] +fn plan_restarts_socket_when_socket_dropin_removed() { + let desired = desired_state(Vec::new()); + let observed = observed_state(vec![socket_dropin_workload( + "alpha.socket.d/10-host.conf", + )]); + + let plan = plan(&desired, &observed).expect("plan should succeed"); + + let actions: Vec<_> = plan + .actions + .iter() + .map(|a| (a.action_type.clone(), a.target.as_str())) + .collect(); + assert!(actions.iter().any(|(action, target)| { + *action == core_ops::core::types::PlanActionType::RemoveQuadlet + && *target == "alpha.socket.d/10-host.conf" + })); + assert!(actions.iter().any(|(action, target)| { + *action == core_ops::core::types::PlanActionType::ReloadSystemd + && *target == "alpha.socket.d/10-host.conf" + })); + assert!(actions.iter().any(|(action, target)| { + *action == core_ops::core::types::PlanActionType::RestartUnit + && *target == "alpha.socket" + })); } diff --git a/tests/unit/test_repo_selection.rs b/tests/unit/test_repo_selection.rs new file mode 100644 index 0000000..3aeb9e3 --- /dev/null +++ b/tests/unit/test_repo_selection.rs @@ -0,0 +1,36 @@ +use std::path::Path; + +use core_ops::io::repo::load_host_declaration; + +#[test] +fn loads_host_declaration_when_dir_and_field_match() { + let temp = tempfile::tempdir().expect("tempdir"); + let host_dir = temp.path().join("kadath"); + std::fs::create_dir_all(&host_dir).expect("host dir"); + std::fs::write( + host_dir.join("host.yaml"), + "host: kadath\nservices:\n - traefik\n", + ) + .expect("write host.yaml"); + + let host = load_host_declaration(Path::new(&host_dir)).expect("load host declaration"); + + assert_eq!(host.host, "kadath"); + assert_eq!(host.services, vec!["traefik".to_string()]); +} + +#[test] +fn rejects_host_declaration_when_host_mismatch() { + let temp = tempfile::tempdir().expect("tempdir"); + let host_dir = temp.path().join("ulthar"); + std::fs::create_dir_all(&host_dir).expect("host dir"); + std::fs::write( + host_dir.join("host.yaml"), + "host: kadath\nservices:\n - traefik\n", + ) + .expect("write host.yaml"); + + let err = load_host_declaration(Path::new(&host_dir)).expect_err("should fail"); + + assert!(format!("{err}").contains("does not match")); +} diff --git a/tests/unit/test_socket_dropin_precedence.rs b/tests/unit/test_socket_dropin_precedence.rs new file mode 100644 index 0000000..27d5d87 --- /dev/null +++ b/tests/unit/test_socket_dropin_precedence.rs @@ -0,0 +1,37 @@ +use core_ops::core::validation::validate_socket_dropin_precedence; +use core_ops::core::types::DropInSource; + +#[test] +fn rejects_host_socket_dropins_that_sort_before_base() { + let base = vec![DropInSource { + target: "alpha.socket".to_string(), + contents: "BASE".to_string(), + source_path: "/services/alpha/alpha.socket.d/10-defaults.conf".to_string(), + }]; + let host = vec![DropInSource { + target: "alpha.socket".to_string(), + contents: "HOST".to_string(), + source_path: "/hosts/kadath/overrides/alpha.socket.d/05-host.conf".to_string(), + }]; + + let err = validate_socket_dropin_precedence(&base, &host).expect_err("should fail"); + assert!(err + .message + .contains("host socket drop-in must sort after base drop-ins")); +} + +#[test] +fn accepts_host_socket_dropins_that_sort_after_base() { + let base = vec![DropInSource { + target: "alpha.socket".to_string(), + contents: "BASE".to_string(), + source_path: "/services/alpha/alpha.socket.d/10-defaults.conf".to_string(), + }]; + let host = vec![DropInSource { + target: "alpha.socket".to_string(), + contents: "HOST".to_string(), + source_path: "/hosts/kadath/overrides/alpha.socket.d/90-host.conf".to_string(), + }]; + + validate_socket_dropin_precedence(&base, &host).expect("should pass"); +} diff --git a/tests/unit/test_validation.rs b/tests/unit/test_validation.rs index 5ae0f48..8927a97 100644 --- a/tests/unit/test_validation.rs +++ b/tests/unit/test_validation.rs @@ -17,6 +17,8 @@ fn base_desired() -> DesiredState { enabled_state: EnabledState::Enabled, restart_policy: RestartPolicy::Always, }], + managed_config_paths: Vec::new(), + managed_config_roots: Vec::new(), invariants: vec![Invariant::BoundariesDeclared, Invariant::DeterministicPlan], boundaries: Boundaries { scopes: vec![BoundaryScope::QuadletSystemd], diff --git a/tests/unit/test_verification.rs b/tests/unit/test_verification.rs index 5bd2680..2144940 100644 --- a/tests/unit/test_verification.rs +++ b/tests/unit/test_verification.rs @@ -20,6 +20,8 @@ fn desired_state(workloads: Vec) -> DesiredState { repository_ref: "repo".to_string(), revision_id: "rev".to_string(), workloads, + managed_config_paths: Vec::new(), + managed_config_roots: Vec::new(), invariants: vec![Invariant::BoundariesDeclared, Invariant::DeterministicPlan], boundaries: Boundaries { scopes: vec![BoundaryScope::QuadletSystemd],