diff --git a/.gitignore b/.gitignore index d7715538..f3893486 100644 --- a/.gitignore +++ b/.gitignore @@ -275,3 +275,4 @@ Web/Resgrid.WebCore/wwwroot/lib/* .claude/settings.local.json .claude/settings.local.json /Web/Resgrid.Web/wwwroot/js/ng/chunks +opencode.json diff --git a/.opencode/skills/80-20-review/SKILL.md b/.opencode/skills/80-20-review/SKILL.md new file mode 100644 index 00000000..e90c711b --- /dev/null +++ b/.opencode/skills/80-20-review/SKILL.md @@ -0,0 +1,276 @@ +--- +name: 80-20-review +description: > + Focus code review effort on the 20% of code that causes 80% of issues. + Prioritizes data access, security, concurrency, and integration boundaries + over formatting and style. Uses blast radius scoring to determine review + depth. Includes checkpoint schedules, critical path identification, and + a batch review checklist. Load this skill when reviewing code, PRs, or + architecture, or when the user mentions "review", "code review", "PR review", + "what should I review", "review priorities", "blast radius", or "critical path". +--- + +# 80/20 Review + +## Core Principles + +1. **Review at checkpoints, not continuously** — Constant review interrupts flow. Schedule reviews at natural breakpoints: post-implementation, pre-PR, post-integration, and post-deploy. Each checkpoint has a different focus. + +2. **Focus on data access, security, concurrency, integration** — These are the 20% of code areas that cause 80% of production incidents. A missing `CancellationToken` is more dangerous than a misnamed variable. Review depth should match risk. + +3. **Blast radius determines depth** — A utility function used in one place gets a glance. A middleware change that affects every request gets a thorough review. Score changes by blast radius and invest review time proportionally. + +4. **Automate the trivial** — Formatting, import ordering, naming conventions, and basic anti-patterns should be caught by tools (formatters, analyzers, hooks), not humans. Save human attention for things tools can't catch: logic errors, design flaws, and missing edge cases. + +## Patterns + +### Checkpoint Schedule + +Review at these natural breakpoints, each with a specific focus: + +``` +CHECKPOINT 1: Post-Implementation (self-review) +WHEN: After completing a feature or fix, before committing +FOCUS: Does it work? Does it compile? Do tests pass? +DEPTH: Quick — 5 minutes +CHECKLIST: +□ dotnet build passes +□ dotnet test passes (all existing + new tests) +□ get_diagnostics shows no new warnings +□ No obvious anti-patterns (DateTime.Now, new HttpClient, async void) + +CHECKPOINT 2: Pre-PR (focused review) +WHEN: Before creating a pull request +FOCUS: Would a staff engineer approve this? +DEPTH: Thorough on critical paths, glance at routine code — 15-30 minutes +CHECKLIST: +□ Data access: N+1 queries, missing Include, no tracking where possible +□ Security: Auth checks, input validation, no secrets in code +□ Concurrency: CancellationToken propagated, no deadlocks, thread-safe state +□ Error handling: Result pattern used, no swallowed exceptions +□ API surface: TypedResults, proper status codes, response DTOs (not entities) +□ Integration: Events published correctly, consumer idempotency +□ Tests: Integration tests cover the happy path + main error case +□ Breaking changes: Public API surface unchanged (or intentionally changed) + +CHECKPOINT 3: Post-Integration (system review) +WHEN: After merging to main or integrating with other modules +FOCUS: Does it play well with the rest of the system? +DEPTH: Targeted — check integration points — 10 minutes +CHECKLIST: +□ Cross-module events consumed correctly +□ Database migrations applied cleanly +□ No circular dependencies introduced +□ CI pipeline passes + +CHECKPOINT 4: Post-Deploy (production readiness) +WHEN: Before or immediately after deploying +FOCUS: Is it safe in production? +DEPTH: Quick but critical — 5 minutes +CHECKLIST: +□ Health checks pass +□ Logs produce structured output (no PII) +□ Retry/circuit breaker policies configured for external calls +□ Feature flags in place for risky changes (if applicable) +``` + +### Critical Path Identification + +Use MCP tools to identify the code that matters most: + +``` +HIGH-RISK CODE (review thoroughly): +1. Data access layer + → find_references for DbContext usage + → Check for N+1 (missing Include/AsSplitQuery), missing CancellationToken + → Check for raw SQL injection risks + +2. Authentication & authorization + → find_implementations of IAuthorizationHandler + → Check every endpoint has [Authorize] or explicit [AllowAnonymous] + → Verify token validation configuration + +3. External service integration + → find_references for HttpClient, IHttpClientFactory + → Check for retry policies (Polly), timeout configuration + → Verify error handling for external failures + +4. Concurrency & shared state + → find_references for static fields, ConcurrentDictionary + → Check BackgroundService implementations for scope management + → Verify CancellationToken propagation in async chains + +5. Message consumers + → find_implementations of IConsumer + → Check for idempotency (handle duplicate messages) + → Verify error handling and dead letter configuration + +LOW-RISK CODE (glance or skip): +- DTOs and record definitions +- Extension method registration (AddXxx pattern) +- Configuration binding (Options pattern) +- Simple CRUD with no business logic +- Test helper/fixture code +``` + +### Blast Radius Scoring + +Score each change to determine review investment: + +``` +CRITICAL (30+ min review): +- Middleware changes (affects every request) +- Authentication/authorization changes +- Database schema changes (migrations) +- Shared kernel / cross-cutting concern changes +- CI/CD pipeline changes +Signal: Many dependents, hard to roll back, security implications + +HIGH (15-30 min review): +- New module or subsystem +- Public API surface changes +- Message consumer changes (affects async workflows) +- EF Core configuration changes (query behavior, indexes) +Signal: Multiple consumers, behavioral changes, data integrity + +MEDIUM (5-15 min review): +- New feature within existing module (follows patterns) +- Test additions or modifications +- New endpoint following established conventions +Signal: Localized impact, follows existing patterns + +LOW (glance or auto-approve): +- Documentation updates +- Formatting / import ordering +- Adding logging statements +- Renaming internal variables +Signal: No behavioral change, cosmetic only +``` + +### Batch Review Checklist + +The 10 highest-value checks for any .NET code review: + +``` +THE TOP 10 (in priority order): + +1. SQL INJECTION — Any raw SQL or string-interpolated queries? + → EF parameterizes by default, but check for FromSqlRaw with user input + +2. AUTH GAPS — Every endpoint has explicit auth? No open endpoints by accident? + → Check for missing [Authorize] on new controllers/endpoint groups + +3. N+1 QUERIES — Loading collections without Include/join? + → Check any LINQ that accesses navigation properties after the query + +4. CANCELLATION PROPAGATION — CancellationToken passed through the full chain? + → From endpoint → handler → service → EF query. Breaking the chain = uninterruptible + +5. SECRET EXPOSURE — Any connection strings, API keys, or tokens in code? + → Check for hardcoded strings that look like credentials + +6. EXCEPTION SWALLOWING — Catch blocks that silently discard errors? + → Empty catch, catch with only a log, catch(Exception) without rethrow + +7. ASYNC DEADLOCKS — .Result, .Wait(), .GetAwaiter().GetResult()? + → Any synchronous blocking on async code = potential deadlock + +8. ENTITY LEAKS — Domain entities returned directly from API endpoints? + → Entities should map to response DTOs/records at the API boundary + +9. MISSING VALIDATION — User input reaching business logic unchecked? + → Every command/request DTO should have a corresponding validator + +10. RESOURCE LEAKS — Disposable objects not in using/await using blocks? + → HttpClient, DbContext, FileStream, etc. created without disposal +``` + +### Review with MCP Tools + +Leverage Roslyn MCP tools for efficient, targeted review: + +``` +REVIEW WORKFLOW WITH MCP: + +1. get_project_graph → Understand what changed in the solution structure +2. get_diagnostics → Catch compiler warnings (CS warnings often signal real issues) +3. detect_antipatterns → Automated anti-pattern scan +4. find_dead_code → Check if the change left any dead code behind +5. detect_circular_dependencies → Verify no new cycles introduced +6. get_test_coverage_map → Verify changed code has test coverage + +This MCP-first approach reviews the system-level impact in ~300 tokens +before you even read a single file. +``` + +## Anti-patterns + +### Reviewing Every Trivial Change + +``` +// BAD — spending 20 minutes reviewing a rename +PR: Rename `OrderSvc` → `OrderService` across 8 files +Reviewer spends 20 minutes verifying each rename is correct. +*This is what Find & Replace + tests are for* + +// GOOD — trust the tooling for mechanical changes +PR: Rename `OrderSvc` → `OrderService` across 8 files +Reviewer: "Tests pass? Build passes? Auto-approve." +*Spend that 20 minutes reviewing the authentication change instead* +``` + +### Skipping Reviews Because "It's Just a Small Change" + +``` +// BAD — one-line change to auth middleware, no review +"It's just adding a header, no need to review" +*That header now leaks internal server info to every response* + +// GOOD — blast radius determines review, not line count +One-line change to middleware → CRITICAL blast radius → thorough review +"This adds a header to every HTTP response. Is it safe? Does it leak info? +Does it affect caching? Does it break CORS?" +``` + +### Style Over Substance + +``` +// BAD — reviewer focuses on naming while missing the N+1 +"Line 15: rename 'x' to 'order' for clarity" +"Line 23: add a blank line between methods" +*Meanwhile, line 28 has an N+1 query that will hammer the database* + +// GOOD — substance first, style if time permits +"Line 28: This will produce an N+1 — add .Include(o => o.Items)" +"Line 42: Missing CancellationToken in the EF query" +*Only after critical issues are addressed: "Line 15: consider renaming 'x'"* +``` + +### Manual Review of Automatable Checks + +``` +// BAD — manually checking formatting in every PR +Reviewer spends 5 minutes checking import ordering, bracket placement, +and whitespace consistency. +*Formatters and analyzers do this in milliseconds* + +// GOOD — automate the trivial, review the meaningful +Pre-commit hook: dotnet format --verify-no-changes +CI step: dotnet format --verify-no-changes (catches anything the hook missed) +Reviewer: focuses on logic, security, performance, and design +``` + +## Decision Guide + +| Scenario | Review Depth | Focus Area | +|----------|-------------|------------| +| New endpoint following existing pattern | Medium (5-15 min) | Auth, validation, response mapping | +| Authentication/authorization change | Critical (30+ min) | Every code path, edge cases, token handling | +| Database migration | Critical (30+ min) | Data loss risk, rollback strategy, index impact | +| New module or subsystem | High (15-30 min) | Architecture, boundaries, integration points | +| Bug fix with clear root cause | Medium (5-15 min) | Root cause correctness, regression test | +| Rename/formatting/docs PR | Low (glance) | Tests pass, build passes, auto-approve | +| EF Core query changes | High (15-30 min) | N+1, tracking, cancellation, SQL generated | +| Middleware or filter changes | Critical (30+ min) | Blast radius — affects every request | +| Test additions | Low-Medium | Test quality, are they testing behavior not implementation | +| CI/CD pipeline changes | High (15-30 min) | Security (secrets), deployment safety, rollback | diff --git a/.opencode/skills/api-versioning/SKILL.md b/.opencode/skills/api-versioning/SKILL.md new file mode 100644 index 00000000..c4173be4 --- /dev/null +++ b/.opencode/skills/api-versioning/SKILL.md @@ -0,0 +1,158 @@ +--- +name: api-versioning +description: > + API versioning strategies for ASP.NET Core. Covers Asp.Versioning library, + URL segment, header, and query string strategies, version deprecation, and + OpenAPI integration. + Load this skill when adding versioning to an API, evolving an API with breaking + changes, or when the user mentions "API version", "versioning", "v1/v2", + "Asp.Versioning", "deprecation", "breaking change", or "backward compatibility". +--- + +# API Versioning + +## Core Principles + +1. **Version from day one** — Adding versioning later is painful. Start with a version in the URL even if you only have v1. +2. **URL segment versioning is the default** — `/api/v1/orders` is the most discoverable and cache-friendly strategy. +3. **Never break existing versions** — Add a new version for breaking changes. Deprecate the old version with a timeline. +4. **Version the API, not individual endpoints** — All endpoints in a version group share the same version number. + +## Patterns + +### Setup with Asp.Versioning + +```csharp +// Program.cs +builder.Services.AddApiVersioning(options => +{ + options.DefaultApiVersion = new ApiVersion(1, 0); + options.AssumeDefaultVersionWhenUnspecified = true; + options.ReportApiVersions = true; + options.ApiVersionReader = new UrlSegmentApiVersionReader(); +}) +.AddApiExplorer(options => +{ + options.GroupNameFormat = "'v'VVV"; + options.SubstituteApiVersionInUrl = true; +}); +``` + +### URL Segment Versioning (Recommended) + +```csharp +var v1 = app.NewApiVersionSet() + .HasApiVersion(new ApiVersion(1, 0)) + .Build(); + +var v2 = app.NewApiVersionSet() + .HasApiVersion(new ApiVersion(2, 0)) + .Build(); + +app.MapGroup("/api/v{version:apiVersion}/orders") + .WithApiVersionSet(v1) + .WithTags("Orders") + .MapOrderEndpointsV1(); + +app.MapGroup("/api/v{version:apiVersion}/orders") + .WithApiVersionSet(v2) + .WithTags("Orders") + .MapOrderEndpointsV2(); +``` + +### Header Versioning (Alternative) + +```csharp +options.ApiVersionReader = new HeaderApiVersionReader("X-Api-Version"); + +// Client sends: X-Api-Version: 2.0 +``` + +### Deprecating a Version + +```csharp +var v1 = app.NewApiVersionSet() + .HasDeprecatedApiVersion(new ApiVersion(1, 0)) + .HasApiVersion(new ApiVersion(2, 0)) + .Build(); + +// Response headers will include: api-deprecated-versions: 1.0 +``` + +### Version-Specific Endpoint Groups + +```csharp +public static class OrderEndpointsV1 +{ + public static RouteGroupBuilder MapOrderEndpointsV1(this RouteGroupBuilder group) + { + group.MapGet("/{id:guid}", GetOrderV1); + group.MapPost("/", CreateOrderV1); + return group; + } + + private static async Task, NotFound>> GetOrderV1( + Guid id, ISender sender, CancellationToken ct) + { + // V1 response shape + var result = await sender.Send(new GetOrder.Query(id), ct); + return result.IsSuccess + ? TypedResults.Ok(result.Value.ToV1()) + : TypedResults.NotFound(); + } +} + +public static class OrderEndpointsV2 +{ + public static RouteGroupBuilder MapOrderEndpointsV2(this RouteGroupBuilder group) + { + group.MapGet("/{id:guid}", GetOrderV2); + group.MapPost("/", CreateOrderV2); + return group; + } + + private static async Task, NotFound>> GetOrderV2( + Guid id, ISender sender, CancellationToken ct) + { + // V2 response shape — includes new fields + var result = await sender.Send(new GetOrder.Query(id), ct); + return result.IsSuccess + ? TypedResults.Ok(result.Value.ToV2()) + : TypedResults.NotFound(); + } +} +``` + +## Anti-patterns + +### Don't Version Individual Endpoints + +```csharp +// BAD — inconsistent versioning within a group +app.MapGet("/api/v1/orders", ListOrdersV1); +app.MapGet("/api/v2/orders/{id}", GetOrderV2); // V2 only for this endpoint? + +// GOOD — version the entire group +app.MapGroup("/api/v1/orders").MapOrderEndpointsV1(); +app.MapGroup("/api/v2/orders").MapOrderEndpointsV2(); +``` + +### Don't Use Query String Versioning as Default + +```csharp +// BAD for REST APIs — version hidden in query string, not cache-friendly +GET /api/orders?api-version=2.0 + +// GOOD — version in URL, discoverable and cacheable +GET /api/v2/orders +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| New public API | URL segment versioning from day one | +| Internal API between services | Header versioning (cleaner URLs) | +| Breaking response shape change | New version | +| Adding new optional fields | Same version (backwards compatible) | +| Deprecating a version | Mark deprecated, set sunset date, document migration path | diff --git a/.opencode/skills/architecture-advisor/SKILL.md b/.opencode/skills/architecture-advisor/SKILL.md new file mode 100644 index 00000000..14fcbf92 --- /dev/null +++ b/.opencode/skills/architecture-advisor/SKILL.md @@ -0,0 +1,249 @@ +--- +name: architecture-advisor +description: > + Architecture selection advisor for .NET applications. Asks structured questions + about domain complexity, team size, system lifetime, compliance, and integration + needs, then recommends the best-fit architecture: Vertical Slice, Clean Architecture, + DDD + Clean Architecture, or Modular Monolith. + Load this skill when the user asks "which architecture", "choose architecture", + "set up project", "new project", "architecture decision", "restructure", or + "how should I organize". Always load BEFORE any architecture-specific skill. +--- + +# Architecture Advisor + +## Core Principles + +1. **Ask before recommending** — Never prescribe an architecture without understanding the project. Run the questionnaire first to gather context about domain, team, lifetime, and constraints. +2. **Right-size the architecture** — The best architecture is the simplest one that handles the project's actual complexity. CRUD apps do not need DDD. Startups do not need Clean Architecture. Match complexity to real requirements, not aspirations. +3. **Architecture is not permanent** — Every architecture has an evolution path. Start simple and add structure when complexity demands it. Document the decision so the team knows when to evolve. +4. **Four supported architectures** — dotnet-claude-kit provides first-class patterns for Vertical Slice Architecture (VSA), Clean Architecture (CA), DDD + Clean Architecture, and Modular Monolith. Each has specific strengths and trade-offs. + +## The Architecture Questionnaire + +Before recommending an architecture, ask questions across these 6 categories. Not every question applies to every project — skip irrelevant ones. + +### Category 1: Domain Complexity + +| # | Question | Low Signal | High Signal | +|---|----------|-----------|-------------| +| 1 | How many distinct business entities does the system manage? | < 10 entities | 20+ entities with relationships | +| 2 | Do business rules involve multiple entities interacting? | Rules are per-entity CRUD | Complex invariants across entity groups | +| 3 | Are there business workflows with multiple steps? | Simple request → response | Sagas, approval chains, state machines | +| 4 | Do domain experts use specialized vocabulary? | Generic terms (create, update) | Ubiquitous language (underwrite, adjudicate) | + +### Category 2: Team & Organization + +| # | Question | Low Signal | High Signal | +|---|----------|-----------|-------------| +| 5 | How large is the development team? | 1-3 developers | 8+ developers, multiple teams | +| 6 | Do different teams own different parts of the system? | Single team owns everything | Teams aligned to business domains | +| 7 | What is the team's experience level with .NET? | Junior or mixed | Senior, experienced with patterns | + +### Category 3: System Lifetime & Scale + +| # | Question | Low Signal | High Signal | +|---|----------|-----------|-------------| +| 8 | Expected system lifetime? | < 2 years, MVP/prototype | 5+ years, long-lived product | +| 9 | How many concurrent users or requests per second? | < 100 RPS | 1000+ RPS, variable load | +| 10 | Will the system need to scale independently by feature area? | Uniform load | Hot spots need independent scaling | + +### Category 4: Regulatory & Compliance + +| # | Question | Low Signal | High Signal | +|---|----------|-----------|-------------| +| 11 | Are there audit trail or compliance requirements? | Basic logging sufficient | Full audit trail, SOX/HIPAA/PCI | +| 12 | Do different parts of the system have different security boundaries? | Single auth boundary | Multi-tenant, data isolation | + +### Category 5: Existing Codebase + +| # | Question | Low Signal | High Signal | +|---|----------|-----------|-------------| +| 13 | Is this greenfield or brownfield? | Greenfield, starting fresh | Brownfield, migrating from legacy | +| 14 | Are there existing architectural patterns the team follows? | No established patterns | Strong conventions in place | + +### Category 6: Integration Complexity + +| # | Question | Low Signal | High Signal | +|---|----------|-----------|-------------| +| 15 | How many external systems does this integrate with? | 0-2 simple APIs | 5+ systems with complex contracts | +| 16 | Are there event-driven or async communication needs? | Synchronous request/response | Event sourcing, pub/sub, eventual consistency | + +## Decision Matrix + +Map questionnaire answers to architecture recommendations: + +| Profile | Recommended Architecture | Why | +|---------|------------------------|-----| +| Low domain complexity, small team, short lifetime | **Vertical Slice Architecture** | Minimal ceremony, fast feature delivery, easy to understand | +| Low-medium domain complexity, any team size, API-focused | **Vertical Slice Architecture** | Feature cohesion, one file per operation, natural fit for minimal APIs | +| Medium domain complexity, medium team, long lifetime | **Clean Architecture** | Enforced boundaries via project references, testable domain, clear dependency direction | +| High domain complexity, specialized vocabulary, complex invariants | **DDD + Clean Architecture** | Aggregates protect invariants, value objects model domain concepts, domain events decouple side effects | +| Multiple bounded contexts, team-per-domain, independent deployment potential | **Modular Monolith** | Module isolation, independent data stores, evolution path to microservices | +| Brownfield with existing layered architecture | **Clean Architecture** | Familiar to teams coming from N-tier, preserves layer separation with better dependency direction | + +### When Signals Conflict + +If signals point to different architectures: + +1. **Default to simpler** — When in doubt, start with VSA and evolve +2. **Domain complexity wins** — High domain complexity overrides team size and lifetime signals +3. **Team familiarity matters** — A team experienced with CA will be more productive with CA than learning VSA, even if VSA is technically simpler +4. **Compliance drives structure** — Regulatory requirements often force stricter boundaries (CA or DDD) + +## Patterns + +### Vertical Slice Architecture (VSA) + +Organize by feature, not by layer. Each operation is a self-contained slice. + +``` +src/MyApp.Api/ + Features/ + Orders/CreateOrder.cs # Request + Handler + Response + Endpoint + Orders/GetOrder.cs + Common/ + Behaviors/ValidationBehavior.cs + Persistence/AppDbContext.cs +``` + +**Best for:** CRUD-heavy apps, APIs, MVPs, small-medium teams, short-medium lifetime. +**Load skill:** `vertical-slice` + +### Clean Architecture (CA) + +Concentric layers with dependency inversion. Domain at the center, infrastructure at the edge. + +``` +src/ + MyApp.Domain/ # Entities, interfaces, domain logic + MyApp.Application/ # Use cases, DTOs, validation + MyApp.Infrastructure/ # EF Core, external services + MyApp.Api/ # Endpoints, middleware +``` + +**Best for:** Medium complexity, long-lived systems, teams familiar with layered patterns. +**Load skill:** `clean-architecture` + +### DDD + Clean Architecture + +Clean Architecture with tactical DDD patterns: aggregates, value objects, domain events. + +``` +src/ + MyApp.Domain/ # Aggregates, value objects, domain events, domain services + MyApp.Application/ # Use cases orchestrating aggregates + MyApp.Infrastructure/ # Persistence, external service adapters + MyApp.Api/ # Thin endpoints +``` + +**Best for:** Complex domains, specialized vocabulary, strict invariants, experienced teams. +**Load skill:** `ddd` + `clean-architecture` + +### Modular Monolith + +Independent modules in a single deployable unit, each with its own architecture internally. + +``` +src/ + MyApp.Host/ # Wires modules together + Modules/ + Orders/ # Own features, own DbContext, own architecture + Catalog/ # Can use VSA, CA, or DDD internally + MyApp.Shared/ # Integration event contracts only +``` + +**Best for:** Multiple bounded contexts, team-per-domain, future microservices extraction. +**Load template:** `modular-monolith` + +## Evolution Paths + +Architecture is not a one-time decision. Systems evolve. Here are the common migration paths: + +| From | To | Trigger | How | +|------|----|---------|-----| +| VSA | CA | Domain logic growing beyond handlers | Extract Domain + Application layers, keep features as use cases | +| VSA | Modular Monolith | Multiple bounded contexts emerging | Group features into modules, add module boundaries | +| CA | DDD + CA | Invariants becoming complex, primitive obsession | Introduce aggregates, value objects, domain events | +| Monolith | Modular Monolith | Teams stepping on each other, shared database coupling | Split into modules with own DbContexts and schemas | +| Modular Monolith | Microservices | Independent scaling needs, independent deployment | Extract modules into separate deployable services | + +## Anti-patterns + +### Picking Clean Architecture for a CRUD App + +``` +// BAD — 4 projects, 6+ files per feature for simple CRUD +src/MyApp.Domain/Entities/Product.cs +src/MyApp.Application/Products/CreateProduct/CreateProductCommand.cs +src/MyApp.Application/Products/CreateProduct/CreateProductHandler.cs +src/MyApp.Application/Products/CreateProduct/CreateProductValidator.cs +src/MyApp.Infrastructure/Persistence/ProductRepository.cs +src/MyApp.Api/Endpoints/ProductEndpoints.cs + +// GOOD — VSA: 1 file for a simple CRUD feature +src/MyApp.Api/Features/Products/CreateProduct.cs +``` + +### DDD Everywhere + +``` +// BAD — value objects and aggregates for a settings table +public class UserSettings : AggregateRoot // overkill +{ + public ThemeName Theme { get; private set; } // value object for "dark"/"light"? + public void ChangeTheme(ThemeName theme) { /* domain event? really? */ } +} + +// GOOD — simple entity for simple data +public class UserSettings +{ + public Guid UserId { get; init; } + public string Theme { get; set; } = "light"; +} +``` + +### Premature Microservices + +``` +// BAD — splitting into 5 microservices on day one with 2 developers +OrderService (own repo, own DB, own CI/CD) +CatalogService (own repo, own DB, own CI/CD) +IdentityService (own repo, own DB, own CI/CD) +NotificationService (own repo, own DB, own CI/CD) +GatewayService (own repo, own CI/CD) + +// GOOD — start as a modular monolith, extract when you have evidence +src/ + Modules/Orders/ + Modules/Catalog/ + Modules/Identity/ + Modules/Notifications/ +``` + +### Skipping the Questionnaire + +``` +// BAD — "I always use Clean Architecture" +User: "Set up a new project for a todo app" +Agent: *immediately scaffolds 4-project CA solution* + +// GOOD — ask first, then recommend +User: "Set up a new project for a todo app" +Agent: "Let me ask a few questions about your project to recommend the best architecture..." +Agent: *runs questionnaire, recommends VSA for low-complexity app* +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| New project, unknown requirements | Run the questionnaire | +| Simple CRUD API, 1-3 developers | VSA | +| Medium complexity, long-lived, experienced team | Clean Architecture | +| Complex domain, specialized vocabulary | DDD + Clean Architecture | +| Multiple bounded contexts, multiple teams | Modular Monolith | +| Existing N-tier codebase, needs modernization | Clean Architecture (familiar migration) | +| MVP / startup, speed is priority | VSA | +| Regulatory / compliance-heavy | Clean Architecture or DDD (enforced boundaries) | +| Will need independent scaling later | Modular Monolith (extraction-ready) | diff --git a/.opencode/skills/aspire/SKILL.md b/.opencode/skills/aspire/SKILL.md new file mode 100644 index 00000000..a810bcbd --- /dev/null +++ b/.opencode/skills/aspire/SKILL.md @@ -0,0 +1,172 @@ +--- +name: aspire +description: > + .NET Aspire for cloud-native orchestration. Covers AppHost configuration, + service defaults, resource configuration, service discovery, and the Aspire + dashboard. + Load this skill when setting up local development orchestration, service + discovery, or Aspire-managed infrastructure, or when the user mentions + "Aspire", "AppHost", "service defaults", "service discovery", "orchestration", + "Aspire dashboard", "AddProject", "WithReference", or "cloud-native .NET". +--- + +# .NET Aspire + +## Core Principles + +1. **Aspire is for orchestration, not deployment** — Aspire manages your local development experience: starting services, databases, and message brokers together. Production deployment is a separate concern. +2. **Service defaults are your baseline** — The `ServiceDefaults` project configures OpenTelemetry, health checks, and resilience for all services in one place. +3. **Use Aspire integrations** — Aspire has built-in integrations for PostgreSQL, Redis, RabbitMQ, SQL Server, and more. They handle connection strings, health checks, and tracing automatically. +4. **The dashboard is your observability tool** — Use the Aspire dashboard for local development tracing, logging, and metrics instead of setting up Seq/Grafana locally. + +## Patterns + +### AppHost Configuration + +```csharp +// AppHost/Program.cs +var builder = DistributedApplication.CreateBuilder(args); + +// Infrastructure resources +var postgres = builder.AddPostgres("postgres") + .WithPgAdmin() + .AddDatabase("myappdb"); + +var redis = builder.AddRedis("redis") + .WithRedisInsight(); + +var rabbitmq = builder.AddRabbitMQ("messaging") + .WithManagementPlugin(); + +// Application projects +var api = builder.AddProject("api") + .WithReference(postgres) + .WithReference(redis) + .WithReference(rabbitmq) + .WithExternalHttpEndpoints(); + +var worker = builder.AddProject("worker") + .WithReference(postgres) + .WithReference(rabbitmq); + +builder.Build().Run(); +``` + +### Service Defaults + +```csharp +// ServiceDefaults/Extensions.cs — Standard Aspire service defaults +// Configures OpenTelemetry (metrics + tracing), health checks, service discovery, and resilience +public static class Extensions +{ + public static IHostApplicationBuilder AddServiceDefaults(this IHostApplicationBuilder builder) + { + builder.ConfigureOpenTelemetry(); + builder.AddDefaultHealthChecks(); + builder.Services.AddServiceDiscovery(); + + builder.Services.ConfigureHttpClientDefaults(http => + { + http.AddStandardResilienceHandler(); + http.AddServiceDiscovery(); + }); + + return builder; + } + + // ConfigureOpenTelemetry: adds logging, metrics (ASP.NET, HttpClient, Runtime), + // tracing (ASP.NET, HttpClient, EF Core), and OTLP exporter if configured + // AddDefaultHealthChecks: adds a "self" liveness check tagged ["live"] +} +``` + +### Using Service Defaults in a Project + +```csharp +// MyApp.Api/Program.cs +var builder = WebApplication.CreateBuilder(args); +builder.AddServiceDefaults(); + +// Add Aspire integrations +builder.AddNpgsqlDbContext("myappdb"); +builder.AddRedisDistributedCache("redis"); + +var app = builder.Build(); +app.MapDefaultEndpoints(); // health check endpoints +app.Run(); +``` + +### Service-to-Service Communication + +```csharp +// AppHost — configure service references +var orderApi = builder.AddProject("order-api"); +var paymentApi = builder.AddProject("payment-api") + .WithReference(orderApi); // paymentApi can discover orderApi + +// In PaymentApi — use service discovery +builder.Services.AddHttpClient(client => +{ + client.BaseAddress = new Uri("https+http://order-api"); +}); +``` + +### Solution Structure with Aspire + +``` +MyApp.slnx +├── MyApp.AppHost/ # Aspire orchestrator +│ └── Program.cs +├── MyApp.ServiceDefaults/ # Shared service configuration +│ └── Extensions.cs +├── src/ +│ ├── MyApp.Api/ # Web API project +│ └── MyApp.Worker/ # Background worker +└── tests/ + └── MyApp.Api.Tests/ +``` + +## Anti-patterns + +### Don't Use Aspire for Production Deployment + +```csharp +// BAD — Aspire AppHost is not a production deployment tool +// Don't try to deploy the AppHost to Kubernetes + +// GOOD — Use Aspire for local dev, deploy with Docker/K8s/Azure separately +``` + +### Don't Hardcode Connection Strings with Aspire + +```csharp +// BAD — hardcoding connection strings defeats Aspire's purpose +builder.Services.AddDbContext(o => + o.UseNpgsql("Host=localhost;Database=myapp;...")); + +// GOOD — use Aspire integration (connection string injected automatically) +builder.AddNpgsqlDbContext("myappdb"); +``` + +### Don't Skip Service Defaults + +```csharp +// BAD — manually configuring each service +builder.Services.AddOpenTelemetry()... +builder.Services.AddHealthChecks()... + +// GOOD — use shared service defaults +builder.AddServiceDefaults(); +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Local dev with multiple services | Aspire AppHost | +| Single-project local dev | `dotnet run` is fine, Aspire optional | +| Shared service configuration | ServiceDefaults project | +| Database for local dev | Aspire `AddPostgres()` / `AddSqlServer()` | +| Service discovery | Aspire's built-in service discovery | +| Production deployment | Docker / Kubernetes / Azure Container Apps | +| Observability in local dev | Aspire dashboard (auto-configured) | diff --git a/.opencode/skills/authentication/SKILL.md b/.opencode/skills/authentication/SKILL.md new file mode 100644 index 00000000..2779f2db --- /dev/null +++ b/.opencode/skills/authentication/SKILL.md @@ -0,0 +1,224 @@ +--- +name: authentication +description: > + Authentication and authorization for ASP.NET Core. Covers JWT bearer tokens, + OpenID Connect, ASP.NET Identity, authorization policies, role and claim-based + authorization, and API key authentication. + Load this skill when implementing login, protecting endpoints, designing + authorization rules, or when the user mentions "auth", "JWT", "bearer token", + "OIDC", "OpenID Connect", "Identity", "claims", "roles", "authorize", + "RequireAuthorization", "API key", or "cookie auth". +--- + +# Authentication & Authorization + +## Core Principles + +1. **Use ASP.NET Identity for user management** — Don't build your own user store. Identity handles password hashing, lockout, two-factor, and email confirmation. +2. **JWT for APIs, cookies for web apps** — APIs use Bearer token authentication; Blazor/MVC apps use cookie authentication. +3. **Policy-based authorization over roles** — Policies are testable, composable, and more expressive than `[Authorize(Roles = "Admin")]`. +4. **Never store secrets in code** — Use user secrets in development, Azure Key Vault / environment variables in production. + +## Patterns + +### JWT Bearer Authentication + +```csharp +// Program.cs +builder.Services.AddAuthentication(JwtBearerDefaults.AuthenticationScheme) + .AddJwtBearer(options => + { + options.TokenValidationParameters = new TokenValidationParameters + { + ValidateIssuer = true, + ValidateAudience = true, + ValidateLifetime = true, + ValidateIssuerSigningKey = true, + ValidIssuer = builder.Configuration["Jwt:Issuer"], + ValidAudience = builder.Configuration["Jwt:Audience"], + IssuerSigningKey = new SymmetricSecurityKey( + Encoding.UTF8.GetBytes(builder.Configuration["Jwt:Key"]!)), + ClockSkew = TimeSpan.Zero + }; + }); + +builder.Services.AddAuthorization(); +``` + +### Token Generation + +```csharp +public class TokenService(IConfiguration config, TimeProvider clock) +{ + public string GenerateToken(User user, IEnumerable roles) + { + var claims = new List + { + new(ClaimTypes.NameIdentifier, user.Id), + new(ClaimTypes.Email, user.Email!), + new(ClaimTypes.Name, user.UserName!) + }; + + claims.AddRange(roles.Select(role => new Claim(ClaimTypes.Role, role))); + + var key = new SymmetricSecurityKey(Encoding.UTF8.GetBytes(config["Jwt:Key"]!)); + var credentials = new SigningCredentials(key, SecurityAlgorithms.HmacSha256); + + var token = new JwtSecurityToken( + issuer: config["Jwt:Issuer"], + audience: config["Jwt:Audience"], + claims: claims, + expires: clock.GetUtcNow().AddHours(1).DateTime, + signingCredentials: credentials); + + return new JwtSecurityTokenHandler().WriteToken(token); + } +} +``` + +### Policy-Based Authorization + +```csharp +// Define policies +builder.Services.AddAuthorizationBuilder() + .AddPolicy("AdminOnly", policy => policy.RequireRole("Admin")) + .AddPolicy("CanManageOrders", policy => policy + .RequireAuthenticatedUser() + .RequireClaim("permission", "orders:write")) + .AddPolicy("MinimumAge", policy => policy + .AddRequirements(new MinimumAgeRequirement(18))); + +// Custom requirement + handler +public class MinimumAgeRequirement(int minimumAge) : IAuthorizationRequirement +{ + public int MinimumAge => minimumAge; +} + +public class MinimumAgeHandler(TimeProvider clock) : AuthorizationHandler +{ + protected override Task HandleRequirementAsync( + AuthorizationHandlerContext context, + MinimumAgeRequirement requirement) + { + var dateOfBirthClaim = context.User.FindFirst("date_of_birth"); + if (dateOfBirthClaim is not null && + DateOnly.TryParse(dateOfBirthClaim.Value, out var dob) && + dob.AddYears(requirement.MinimumAge) <= DateOnly.FromDateTime(clock.GetUtcNow().DateTime)) + { + context.Succeed(requirement); + } + return Task.CompletedTask; + } +} +``` + +### Protecting Endpoints + +```csharp +// Protect an entire group +app.MapGroup("/api/admin") + .WithTags("Admin") + .RequireAuthorization("AdminOnly") + .MapAdminEndpoints(); + +// Protect individual endpoints +group.MapPost("/", CreateOrder) + .RequireAuthorization("CanManageOrders"); + +// Allow anonymous on a protected group +group.MapGet("/public-info", GetPublicInfo) + .AllowAnonymous(); +``` + +### OpenID Connect (External Identity Provider) + +```csharp +builder.Services.AddAuthentication(options => +{ + options.DefaultScheme = CookieAuthenticationDefaults.AuthenticationScheme; + options.DefaultChallengeScheme = OpenIdConnectDefaults.AuthenticationScheme; +}) +.AddCookie() +.AddOpenIdConnect(options => +{ + options.Authority = builder.Configuration["Oidc:Authority"]; + options.ClientId = builder.Configuration["Oidc:ClientId"]; + options.ClientSecret = builder.Configuration["Oidc:ClientSecret"]; + options.ResponseType = "code"; + options.SaveTokens = true; + options.Scope.Add("openid"); + options.Scope.Add("profile"); + options.Scope.Add("email"); +}); +``` + +### Accessing Current User + +```csharp +// In minimal API handlers — inject ClaimsPrincipal or HttpContext +group.MapGet("/me", (ClaimsPrincipal user) => +{ + var userId = user.FindFirstValue(ClaimTypes.NameIdentifier); + var email = user.FindFirstValue(ClaimTypes.Email); + return TypedResults.Ok(new { userId, email }); +}).RequireAuthorization(); +``` + +## Anti-patterns + +### Don't Use Role Strings Everywhere + +```csharp +// BAD — magic strings, hard to refactor, not testable +[Authorize(Roles = "Admin,SuperAdmin,Manager")] +public class AdminController { } + +// GOOD — policy-based +builder.Services.AddAuthorizationBuilder() + .AddPolicy("AdminAccess", p => p.RequireRole("Admin", "SuperAdmin", "Manager")); + +group.MapGet("/", Handler).RequireAuthorization("AdminAccess"); +``` + +### Don't Store Secrets in appsettings.json + +```json +// BAD — committed to source control +{ + "Jwt": { + "Key": "super-secret-key-12345" + } +} +``` + +```bash +# GOOD — use user secrets in development +dotnet user-secrets set "Jwt:Key" "super-secret-key-12345" +``` + +### Don't Skip Token Validation + +```csharp +// BAD — disabling validation +options.TokenValidationParameters = new TokenValidationParameters +{ + ValidateIssuer = false, // DON'T + ValidateAudience = false, // DON'T + ValidateLifetime = false, // DEFINITELY DON'T +}; + +// GOOD — validate everything (see JWT Bearer Authentication pattern above for full setup) +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| REST API | JWT Bearer authentication | +| Blazor Server / MVC | Cookie authentication | +| External identity provider | OpenID Connect | +| User registration / login | ASP.NET Identity | +| Permission checking | Policy-based authorization | +| Multi-tenant API | Claims-based with tenant claim | +| API-to-API communication | Client credentials (OAuth 2.0) | +| Simple API keys | Custom `AuthenticationHandler` | diff --git a/.opencode/skills/autonomous-loops/SKILL.md b/.opencode/skills/autonomous-loops/SKILL.md new file mode 100644 index 00000000..d123eb1d --- /dev/null +++ b/.opencode/skills/autonomous-loops/SKILL.md @@ -0,0 +1,393 @@ +--- +name: autonomous-loops +description: > + Autonomous iteration loops for .NET development: build-fix, test-fix, refactor, + and scaffold loops. Each loop has bounded iterations, progress detection, and + fail-safe guards that prevent infinite retries and wasted tokens. Load this skill + when Claude needs to fix build errors, fix failing tests, perform multi-step + refactoring, scaffold a new feature, or when the user says "fix the build", + "make the tests pass", "refactor this", "scaffold", "generate and verify", + "keep going until it works", "autonomous", or "loop". +--- + +# Autonomous Loops + +## Core Principles + +1. **Bounded iteration, always** — Every loop has a maximum iteration count. Default is 5, hard cap is 10. No loop runs forever. If 5 iterations cannot solve a build error, the problem needs human judgment, not a 6th attempt at the same approach. + +2. **Progress tracking or exit** — Each iteration must make measurable progress: fewer errors, fewer failing tests, fewer warnings. If an iteration produces the same error count as the previous one, the loop exits with a STUCK status. Retrying without progress is token waste. + +3. **Fail-safe guards are non-negotiable** — Loops exit on: max iterations reached, no progress detected, critical error encountered, more errors introduced than fixed, or user interruption. These guards exist to prevent the most common failure mode: Claude stubbornly retrying the same broken approach 20 times. + +4. **Transparency at every iteration** — Report what changed and why after each iteration. The user should be able to follow the loop's reasoning without reading every file. "Iteration 3: fixed CS0246 by adding `using System.Text.Json`, 2 errors remain" is transparent. Silently modifying files is not. + +5. **Atomicity per iteration** — Each iteration's changes should leave the codebase in a valid state (or at least no worse than before). Never make partial changes that depend on a future iteration succeeding. If iteration 3 fails, the code should still be in the state that iteration 2 left it in. + +## Patterns + +### Build-Fix Loop + +The most common loop. Fix compilation errors iteratively until `dotnet build` succeeds. + +``` +BUILD-FIX LOOP: + max_iterations = 5 + previous_errors = [] + + for iteration in 1..max_iterations: + result = dotnet build [project/solution] + + if result.exit_code == 0: + report "BUILD PASS after {iteration} iteration(s)" + return PASS + + errors = parse_errors(result.output) + + if errors == previous_errors: + report "STUCK — same {len(errors)} error(s) after fix attempt" + report "Errors: {errors}" + return STUCK + + if len(errors) > len(previous_errors) and iteration > 1: + report "REGRESSING — {len(errors)} errors, up from {len(previous_errors)}" + report "Last fix introduced new errors. Reverting iteration {iteration}." + revert_last_changes() + return REGRESSION + + report "Iteration {iteration}: {len(errors)} error(s) found" + for error in errors: + category = categorize(error) + fix = determine_fix(error, category) + apply(fix) + report " Fixed {error.code}: {error.message} → {fix.description}" + + previous_errors = errors + + report "MAX ITERATIONS reached with {len(errors)} error(s) remaining" + return FAIL +``` + +**Error Categories and Fix Strategies:** + +``` +CATEGORY EXAMPLE CODE FIX STRATEGY +Missing using CS0246 Add the correct using directive +Missing reference CS0246 Add NuGet package or project reference +Type mismatch CS0029 Check expected type, cast or convert +API change CS0117 Check new API signature, update call +Nullable warning CS8600-CS8604 Add null check, use ?. or ?? operator +Ambiguous reference CS0104 Add full namespace qualifier +Missing member CS1061 Check spelling, verify type has the member +Obsolete API CS0618 Replace with recommended alternative +Missing implementation CS0535 Implement missing interface members +Syntax error CS1002-CS1003 Fix syntax based on error context +``` + +### Test-Fix Loop + +Fix failing tests iteratively. Critically, this loop must determine whether the bug is in the test or in the production code. + +``` +TEST-FIX LOOP: + max_iterations = 5 + previous_failures = [] + + for iteration in 1..max_iterations: + result = dotnet test [project/solution] --no-build + + if result.all_passed: + report "TESTS PASS after {iteration} iteration(s)" + return PASS + + failures = parse_failures(result.output) + + if failures == previous_failures: + report "STUCK — same {len(failures)} failure(s) after fix attempt" + return STUCK + + report "Iteration {iteration}: {len(failures)} failure(s)" + for failure in failures: + # CRITICAL: diagnose before fixing + diagnosis = diagnose(failure) + report " {failure.test_name}: {failure.message}" + report " Diagnosis: {diagnosis.root_cause}" + report " Fix target: {diagnosis.fix_in}" # "test" or "production" + + if diagnosis.fix_in == "test": + fix = fix_test(failure, diagnosis) + else: + fix = fix_production_code(failure, diagnosis) + + apply(fix) + report " Applied: {fix.description}" + + previous_failures = failures + + report "MAX ITERATIONS with {len(failures)} failure(s) remaining" + return FAIL +``` + +**Diagnosis Protocol:** + +``` +DIAGNOSING A TEST FAILURE: +1. Read the test code — understand the assertion and setup +2. Read the production code — understand the actual behavior +3. Determine root cause: + a. Test expects wrong value → fix the test + b. Production code has a bug → fix the production code + c. Test setup is incomplete → fix the test setup + d. API contract changed → update test to match new contract +4. NEVER fix a test by weakening the assertion without understanding why + BAD: Assert.Equal(expected, actual) → Assert.NotNull(actual) + GOOD: Assert.Equal(expected, actual) → fix production code to return expected +``` + +### Refactor Loop + +Multi-step refactoring with verification at each step. + +``` +REFACTOR LOOP: + targets = identify_refactoring_targets() # via MCP tools + max_iterations = min(len(targets), 10) + + for iteration, target in enumerate(targets, 1): + if iteration > max_iterations: + report "MAX TARGETS reached, {len(targets) - iteration} remaining" + return PARTIAL + + report "Refactoring {iteration}/{len(targets)}: {target.description}" + + # 1. Apply the refactoring + apply_refactoring(target) + + # 2. Verify it builds + build_result = build_fix_loop(max_iterations=3) # nested, smaller budget + if build_result != PASS: + report "Build failed after refactoring {target}. Reverting." + revert_changes() + return FAIL + + # 3. Verify tests pass + test_result = test_fix_loop(max_iterations=3) # nested, smaller budget + if test_result != PASS: + report "Tests failed after refactoring {target}. Reverting." + revert_changes() + return FAIL + + # 4. Check diagnostics for new warnings + diagnostics = get_diagnostics() + if diagnostics.new_warnings > 0: + report "New warnings introduced. Fixing..." + fix_warnings(diagnostics.new_warnings) + + report "Refactoring {iteration} complete. Build: PASS, Tests: PASS" + + return PASS +``` + +### Scaffold Loop + +Generate a new feature end-to-end and verify everything compiles and tests pass. + +``` +SCAFFOLD LOOP: + 1. GENERATE source files + → Create feature file (endpoint + handler + validator) + → Create EF configuration if needed + → Create DTOs/contracts + + 2. BUILD VERIFICATION + → Run build-fix loop (max 5 iterations) + → If FAIL: report and stop — generated code has fundamental issues + + 3. GENERATE test files + → Create unit tests for handler logic + → Create integration tests for endpoint + → Match project's test conventions (via instinct-system) + + 4. TEST VERIFICATION + → Run test-fix loop (max 5 iterations) + → If FAIL: report which tests fail and why + + 5. QUALITY CHECK + → Run get_diagnostics — zero new warnings + → Run detect_antipatterns — zero new anti-patterns + → Verify naming matches project conventions + + FINAL REPORT: + "Scaffold complete: + - Source files: [list with paths] + - Test files: [list with paths] + - Build: PASS + - Tests: [N/N] passing + - Warnings: 0 new + - Anti-patterns: 0 new" +``` + +### Progress Detection + +How to measure whether a loop iteration made progress: + +``` +PROGRESS METRICS: + Build-Fix: error_count[N] < error_count[N-1] + Test-Fix: failure_count[N] < failure_count[N-1] + Refactor: target_count[N] < target_count[N-1] + Scaffold: phase advances (generate → build → test → verify) + +STUCK DETECTION: + Same errors/failures after a fix attempt → STUCK + Error count oscillates (3 → 2 → 3 → 2) → STUCK (after 2 oscillations) + Fix introduces errors in previously passing code → REGRESSION + +NO-PROGRESS RESPONSE: + 1. Report the stuck state clearly + 2. List the errors/failures that could not be fixed + 3. Suggest what a human should investigate + 4. Do NOT retry the same approach +``` + +### Emergency Exit Conditions + +Conditions that cause immediate loop termination: + +``` +EMERGENCY EXITS: + 1. MORE ERRORS THAN BEFORE — an iteration introduced more errors than it fixed + → Revert the iteration's changes + → Report: "Fix attempt introduced {N} new errors. Reverted." + + 2. CRITICAL ERROR — error indicates a fundamental problem (wrong SDK, missing + project file, corrupted solution) + → Stop immediately + → Report: "Critical error detected: {description}. Human intervention needed." + + 3. CASCADING FAILURES — fixing one error causes 3+ new errors repeatedly + → Stop after 2 cascades + → Report: "Cascading failure pattern detected. The fix approach is wrong." + + 4. TEST INFRASTRUCTURE FAILURE — test runner itself fails (not test assertions) + → Stop immediately + → Report: "Test infrastructure error: {description}. Check test setup." + + 5. USER INTERRUPTION — user sends any message during the loop + → Complete current iteration + → Report progress so far + → Ask how to proceed +``` + +### Loop Nesting and Reporting + +When loops call other loops (e.g., refactor loop calls build-fix loop): + +``` +NESTING RULES: + - Nested loops get a SMALLER budget (parent max 5 → nested max 3) + - Maximum nesting depth: 2 (Refactor → Build-Fix → no further) + - Nested loop failure = parent loop iteration failure (revert the target) + - Total iteration budget across all nesting: 15 + +ITERATION REPORT FORMAT: + [Loop Type] Iteration {N}/{MAX}: {error/failure count} + → {file}: {what changed and why} + → Result: {new count} | Status: {CONTINUE/PASS/STUCK/FAIL} +``` + +## Anti-patterns + +### Unbounded Loops + +``` +# BAD — no iteration limit +"Keep fixing build errors until it compiles" +*Claude tries 47 iterations, burns through context window, + keeps retrying the same broken approach* + +# GOOD — explicit bounds with progress checks +build_fix_loop(max_iterations=5) +*After 5 iterations or zero progress, stops and reports* +``` + +### Retrying the Same Fix + +``` +# BAD — applying the same fix that failed +Iteration 1: Add `using System.Linq;` → CS0246 persists +Iteration 2: Add `using System.Linq;` → CS0246 persists +Iteration 3: Add `using System.Linq;` → CS0246 persists + +# GOOD — detect no progress, try a different approach or exit +Iteration 1: Add `using System.Linq;` → CS0246 persists +Iteration 2: STUCK — same error after fix. + "CS0246 persists after adding System.Linq. The type may be in a + different namespace or require a NuGet package. Checking..." + → Search for the type using find_symbol +``` + +### Fixing by Deletion + +``` +# BAD — making code compile by removing functionality +Error: CS0246 'OrderValidator' not found +Fix: Delete all validation code +*Builds successfully! ...but the feature is broken* + +# GOOD — fix the root cause +Error: CS0246 'OrderValidator' not found +Fix: Add missing reference to Validation project, or create the missing class +*Builds successfully with all functionality intact* +``` + +### Silent Loops + +``` +# BAD — loop runs silently, user sees nothing for 2 minutes +*...silence...* +"Done! Fixed 7 build errors." +*User has no idea what changed or why* + +# GOOD — transparent reporting per iteration +"Iteration 1/5: 4 errors found + Fixed CS0246 in OrderHandler.cs → added using FluentValidation + Fixed CS0029 in OrderResponse.cs → changed return type to match + 2 errors remain. + Iteration 2/5: 2 errors found + Fixed CS1061 in OrderEndpoint.cs → updated method name to CreateAsync + Fixed CS8600 in OrderHandler.cs → added null check + 0 errors remain. + BUILD PASS after 2 iterations." +``` + +### Over-Aggressive Test Fixing + +``` +# BAD — weakening assertions to make tests pass +Assert.Equal(200, response.StatusCode) // fails with 404 +→ Changed to: Assert.NotNull(response) // passes but hides the bug + +# GOOD — diagnose the failure, fix the right code +Assert.Equal(200, response.StatusCode) // fails with 404 +→ Diagnosis: endpoint routing is wrong, missing MapGet registration +→ Fix: add endpoint registration in OrderModule.cs +→ Test passes with correct 200 status +``` + +## Decision Guide + +| Scenario | Loop Type | Max Iterations | Notes | +|----------|-----------|---------------|-------| +| Build fails after code changes | Build-Fix | 5 | Categorize errors, fix systematically | +| Tests fail after code changes | Test-Fix | 5 | Diagnose test vs production bug first | +| Tests fail after build-fix loop | Test-Fix | 3 | Smaller budget — build-fix may have introduced issues | +| Multi-file refactoring | Refactor | 10 (or target count) | Verify build+tests after each target | +| Generating a new feature | Scaffold | 1 (phases, not iterations) | Build-fix and test-fix nested inside | +| Same error persists after fix | Exit with STUCK | N/A | Report error, suggest human investigation | +| Fix introduces more errors | Emergency exit | N/A | Revert changes, report regression | +| Nested loop needed | Use smaller budget | Parent - 2 | Max nesting depth: 2 | +| User says "keep going" | Extend by 3 iterations | Current + 3 | Never exceed hard cap of 10 | +| User says "stop" | Exit immediately | N/A | Report progress, preserve current state | +| Error is in test infrastructure | Exit immediately | N/A | Test runner issues need human attention | +| 3+ cascading failures | Exit immediately | N/A | The approach is fundamentally wrong | diff --git a/.opencode/skills/caching/SKILL.md b/.opencode/skills/caching/SKILL.md new file mode 100644 index 00000000..2780db21 --- /dev/null +++ b/.opencode/skills/caching/SKILL.md @@ -0,0 +1,183 @@ +--- +name: caching +description: > + Caching strategies for .NET 10 applications. Covers HybridCache (the default), + output caching, response caching, and distributed cache patterns. + Load this skill when implementing caching, optimizing read performance, reducing + database load, or when the user mentions "cache", "HybridCache", "Redis", + "output cache", "response cache", "distributed cache", "IMemoryCache", + "cache invalidation", "stampede protection", or "cache-aside". +--- + +# Caching + +## Core Principles + +1. **HybridCache is the default** — .NET 9+ introduced `HybridCache` as the unified caching abstraction. It combines in-memory (L1) and distributed (L2) caching with stampede protection. See ADR-004. +2. **Cache reads, not writes** — Cache GET operations. Invalidate on mutations. Never cache POST/PUT/DELETE responses. +3. **Output caching for entire responses** — When the full HTTP response can be cached (public APIs, static data), use output caching middleware. +4. **Set explicit TTLs** — Every cached item needs an expiration. No unbounded caches. + +## Patterns + +### HybridCache (Recommended Default) + +```csharp +// Program.cs +builder.Services.AddHybridCache(options => +{ + options.DefaultEntryOptions = new HybridCacheEntryOptions + { + Expiration = TimeSpan.FromMinutes(5), + LocalCacheExpiration = TimeSpan.FromMinutes(2) + }; +}); + +// Optional: Add Redis as the L2 distributed cache +builder.Services.AddStackExchangeRedisCache(options => +{ + options.Configuration = builder.Configuration.GetConnectionString("Redis"); +}); +``` + +```csharp +// Usage in a handler +public class GetProduct +{ + public record Query(Guid Id); + public record Response(Guid Id, string Name, decimal Price); + + internal class Handler(AppDbContext db, HybridCache cache) + { + public async Task Handle(Query query, CancellationToken ct) + { + return await cache.GetOrCreateAsync( + $"products:{query.Id}", + async token => await db.Products + .Where(p => p.Id == query.Id) + .Select(p => new Response(p.Id, p.Name, p.Price)) + .FirstOrDefaultAsync(token), + new HybridCacheEntryOptions + { + Expiration = TimeSpan.FromMinutes(10) + }, + cancellationToken: ct); + } + } +} +``` + +### Cache Invalidation + +```csharp +// Invalidate on mutation +public class UpdateProduct +{ + internal class Handler(AppDbContext db, HybridCache cache) + { + public async Task Handle(Command command, CancellationToken ct) + { + var product = await db.Products.FindAsync([command.Id], ct); + if (product is null) return Result.Failure("Product not found"); + + product.Update(command.Name, command.Price); + await db.SaveChangesAsync(ct); + + // Invalidate the cached entry + await cache.RemoveAsync($"products:{command.Id}", ct); + + return Result.Success(); + } + } +} +``` + +### Output Caching (Full Response Caching) + +```csharp +// Program.cs +builder.Services.AddOutputCache(options => +{ + options.AddBasePolicy(b => b.NoCache()); // Don't cache by default + + options.AddPolicy("ProductList", b => b + .Expire(TimeSpan.FromMinutes(5)) + .Tag("products")); + + options.AddPolicy("ProductById", b => b + .Expire(TimeSpan.FromMinutes(10)) + .SetVaryByRouteValue("id") + .Tag("products")); +}); + +app.UseOutputCache(); + +// Apply to endpoints +group.MapGet("/", ListProducts).CacheOutput("ProductList"); +group.MapGet("/{id:guid}", GetProduct).CacheOutput("ProductById"); + +// Invalidate by tag on mutations +group.MapPut("/{id:guid}", async (Guid id, UpdateProductRequest request, + IOutputCacheStore store, CancellationToken ct) => +{ + // ... update logic ... + await store.EvictByTagAsync("products", ct); + return TypedResults.NoContent(); +}); +``` + +### Cache-Aside Pattern (Legacy) + +> **Prefer HybridCache** for all new code. Manual `IDistributedCache` cache-aside lacks stampede +> protection, requires manual serialization, and has no L1/L2 layering. Use only when +> integrating with existing code that already uses `IDistributedCache` directly. + +## Anti-patterns + +### Don't Cache Without Expiration + +```csharp +// BAD — cache lives forever, stale data guaranteed +await cache.SetStringAsync(key, value); + +// GOOD — always set TTL +await cache.SetStringAsync(key, value, new DistributedCacheEntryOptions +{ + AbsoluteExpirationRelativeToNow = TimeSpan.FromMinutes(10) +}); +``` + +### Don't Cache Mutable User-Specific Data + +```csharp +// BAD — caching user's cart with a global key +await cache.GetOrCreateAsync("shopping-cart", ...); + +// GOOD — include user ID in key +await cache.GetOrCreateAsync($"shopping-cart:{userId}", ...); +``` + +### Don't Build Your Own Stampede Protection + +```csharp +// BAD — manual lock to prevent cache stampede +private static readonly SemaphoreSlim Lock = new(1, 1); +await Lock.WaitAsync(); +try { /* check cache, populate if missing */ } +finally { Lock.Release(); } + +// GOOD — HybridCache has built-in stampede protection +await hybridCache.GetOrCreateAsync(key, factory); +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| General data caching | HybridCache (`GetOrCreateAsync`) | +| Full HTTP response | Output caching with `.CacheOutput()` | +| Frequently read, rarely written | HybridCache with longer TTL | +| User-specific data | HybridCache with user-scoped key | +| Cache invalidation on write | `cache.RemoveAsync()` or output cache tags | +| Distributed deployment | HybridCache + Redis L2 backend | +| Single-server deployment | HybridCache with in-memory only | diff --git a/.opencode/skills/ci-cd/SKILL.md b/.opencode/skills/ci-cd/SKILL.md new file mode 100644 index 00000000..47ee0ea0 --- /dev/null +++ b/.opencode/skills/ci-cd/SKILL.md @@ -0,0 +1,216 @@ +--- +name: ci-cd +description: > + CI/CD pipelines for .NET applications. Covers GitHub Actions and Azure DevOps + YAML pipelines with build, test, publish, and deploy stages. + Load this skill when setting up continuous integration, automated testing, + deployment workflows, or when the user mentions "CI/CD", "pipeline", + "GitHub Actions", "Azure DevOps", "workflow", "deploy", "build pipeline", + "publish", "NuGet push", "release", or "continuous integration". +--- + +# CI/CD + +## Core Principles + +1. **Pipeline as code** — YAML pipelines committed to the repo. No click-ops in the UI. +2. **Fast feedback** — Build and test on every push. Cache NuGet packages. Fail fast. +3. **Build once, deploy many** — Build the artifact once, promote it through environments (dev → staging → production). +4. **Never skip tests** — Tests gate the pipeline. No deployment without passing tests. + +## Patterns + +### GitHub Actions — Build + Test + +```yaml +# .github/workflows/ci.yml +name: CI + +on: + push: + branches: [main] + pull_request: + branches: [main] + +env: + DOTNET_VERSION: '10.0.x' + DOTNET_NOLOGO: true + DOTNET_CLI_TELEMETRY_OPTOUT: true + +jobs: + build-and-test: + runs-on: ubuntu-latest + + services: + postgres: + image: postgres:17 + env: + POSTGRES_DB: testdb + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + ports: + - 5432:5432 + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - uses: actions/checkout@v4 + + - name: Setup .NET + uses: actions/setup-dotnet@v4 + with: + dotnet-version: ${{ env.DOTNET_VERSION }} + + - name: Restore + run: dotnet restore + + - name: Build + run: dotnet build --no-restore --configuration Release + + - name: Format check + run: dotnet format --verify-no-changes --no-restore + + - name: Test + run: dotnet test --no-build --configuration Release --logger trx --results-directory TestResults + env: + ConnectionStrings__Default: "Host=localhost;Database=testdb;Username=postgres;Password=postgres" + + - name: Publish test results + uses: actions/upload-artifact@v4 + if: always() + with: + name: test-results + path: TestResults/*.trx +``` + +### GitHub Actions — Build + Publish Docker Image + +```yaml +# .github/workflows/publish.yml +name: Publish + +on: + push: + tags: ['v*'] + +jobs: + publish: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - uses: actions/checkout@v4 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract version from tag + id: version + run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_OUTPUT + + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: . + push: true + tags: | + ghcr.io/${{ github.repository }}:${{ steps.version.outputs.VERSION }} + ghcr.io/${{ github.repository }}:latest +``` + +### Azure DevOps — Build + Test + +Same restore → build → format → test flow as GitHub Actions. Key differences: + +```yaml +# azure-pipelines.yml +trigger: + branches: + include: [main] + paths: + exclude: ['*.md', docs/] + +pool: + vmImage: 'ubuntu-latest' # vs runs-on: ubuntu-latest + +variables: + dotnetVersion: '10.0.x' + +# Key task differences from GitHub Actions: +# Setup .NET: task: UseDotNet@2 (inputs: version: $(dotnetVersion)) +# Test results: task: PublishTestResults@2 (testResultsFormat: VSTest) +# Steps use `script:` + `displayName:` instead of `- name:` + `run:` +# Services (e.g., Postgres) require a separate Docker task or pipeline service connection +``` + +### NuGet Package Publishing + +```yaml +# Part of GitHub Actions workflow +- name: Pack + run: dotnet pack src/MyLibrary -c Release -o ./nupkg --no-build + +- name: Push to NuGet + run: dotnet nuget push ./nupkg/*.nupkg --api-key ${{ secrets.NUGET_API_KEY }} --source https://api.nuget.org/v3/index.json +``` + +## Anti-patterns + +### Don't Build Different Artifacts per Environment + +```yaml +# BAD — building separately for each environment +- script: dotnet publish -c Debug # for dev +- script: dotnet publish -c Release # for prod + +# GOOD — build once, deploy everywhere +- script: dotnet publish -c Release -o ./publish +# Then deploy the same ./publish artifact to dev, staging, prod +``` + +### Don't Skip Format Checks in CI + +```yaml +# BAD — no format enforcement +steps: + - run: dotnet build + - run: dotnet test + +# GOOD — format check catches style issues early +steps: + - run: dotnet build + - run: dotnet format --verify-no-changes + - run: dotnet test +``` + +### Don't Hardcode Secrets in Pipelines + +```yaml +# BAD — secret in pipeline YAML +env: + DB_PASSWORD: "my-secret-password" + +# GOOD — use pipeline secrets +env: + DB_PASSWORD: ${{ secrets.DB_PASSWORD }} +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Open source project | GitHub Actions | +| Enterprise with Azure | Azure DevOps Pipelines | +| Docker deployment | Multi-stage build in CI, push to container registry | +| NuGet library | Build → Test → Pack → Push on tag | +| Database migrations | Run in CI test stage, script for production | +| Environment promotion | Same artifact, different configuration | diff --git a/.opencode/skills/clean-architecture/SKILL.md b/.opencode/skills/clean-architecture/SKILL.md new file mode 100644 index 00000000..42d898ef --- /dev/null +++ b/.opencode/skills/clean-architecture/SKILL.md @@ -0,0 +1,377 @@ +--- +name: clean-architecture +description: > + Clean Architecture for .NET applications. Covers the 4-project layout (Domain, + Application, Infrastructure, Api), dependency inversion, use case handlers, + domain entities with behavior, and infrastructure as a plugin. + Load this skill when building a project with Clean Architecture, discussing + layered architecture, dependency inversion, use cases, or when the + architecture-advisor recommends Clean Architecture. +--- + +# Clean Architecture + +## Core Principles + +1. **Dependency inversion is the foundation** — All dependencies point inward. Domain has zero project references. Application references only Domain. Infrastructure references Application and Domain. Api references all but depends on abstractions. The compiler enforces this via project references. +2. **Domain owns the rules** — Business logic lives in the Domain layer as entity methods, domain services, or specifications. The Domain layer has no knowledge of databases, HTTP, or any framework — only pure C# and .NET primitives. +3. **Use cases are the unit of work** — Each use case (command or query) is a single class in the Application layer. It orchestrates domain objects, persists through abstractions, and returns a result. No "service" classes with 20 methods. +4. **Infrastructure is a plugin** — EF Core, external APIs, email senders, file storage — all live in Infrastructure and implement interfaces defined in Application or Domain. Swap implementations without touching business logic. +5. **The API layer is thin** — Endpoints map HTTP to use cases and use cases to HTTP responses. No business logic in endpoints. + +## Patterns + +### Project Layout + +``` +src/ + MyApp.Domain/ + Entities/ + Order.cs # Entity with behavior + OrderItem.cs + Enums/ + OrderStatus.cs + Exceptions/ + DomainException.cs # Base domain exception + Interfaces/ + IOrderRepository.cs # Only if query needs go beyond DbSet + Common/ + Entity.cs # Base entity with Id + Result.cs # Result pattern type + + MyApp.Application/ + Common/ + Behaviors/ + ValidationBehavior.cs # Mediator pipeline behavior + Interfaces/ + IAppDbContext.cs # DbContext abstraction (preferred over repository) + Orders/ + Commands/ + CreateOrder/ + CreateOrderCommand.cs + CreateOrderHandler.cs + CreateOrderValidator.cs + Queries/ + GetOrder/ + GetOrderQuery.cs + GetOrderHandler.cs + OrderDto.cs + + MyApp.Infrastructure/ + Persistence/ + AppDbContext.cs # Implements IAppDbContext + Configurations/ + OrderConfiguration.cs + Migrations/ + Services/ + EmailSender.cs # Implements IEmailSender from Application + DependencyInjection.cs # AddInfrastructure extension + + MyApp.Api/ + Endpoints/ + OrderEndpoints.cs # Thin, maps HTTP ↔ use cases + Program.cs +``` + +### DbContext Abstraction (Preferred Over Repository) + +Define a minimal interface in Application; implement in Infrastructure: + +```csharp +// Application/Common/Interfaces/IAppDbContext.cs +public interface IAppDbContext +{ + DbSet Orders { get; } + DbSet Products { get; } + Task SaveChangesAsync(CancellationToken ct = default); +} + +// Infrastructure/Persistence/AppDbContext.cs +public class AppDbContext(DbContextOptions options) + : DbContext(options), IAppDbContext +{ + public DbSet Orders => Set(); + public DbSet Products => Set(); + + protected override void OnModelCreating(ModelBuilder modelBuilder) + { + modelBuilder.ApplyConfigurationsFromAssembly(typeof(AppDbContext).Assembly); + } +} +``` + +Why IAppDbContext over IRepository? EF Core's DbSet already IS a repository. Adding another abstraction on top adds indirection without value in most cases. + +### Use Case Handler (Command) + +```csharp +// Application/Orders/Commands/CreateOrder/CreateOrderCommand.cs +public record CreateOrderCommand( + string CustomerId, + List Items) : IRequest>; + +public record OrderItemDto(string ProductId, int Quantity, decimal UnitPrice); + +// Application/Orders/Commands/CreateOrder/CreateOrderHandler.cs — uses Mediator (source-generated, MIT) +internal sealed class CreateOrderHandler( + IAppDbContext db, + TimeProvider clock) : IRequestHandler> +{ + public async ValueTask> Handle(CreateOrderCommand request, CancellationToken ct) + { + var order = Order.Create( + request.CustomerId, + request.Items.Select(i => new OrderItem(i.ProductId, i.Quantity, i.UnitPrice)), + clock.GetUtcNow()); + + db.Orders.Add(order); + await db.SaveChangesAsync(ct); + + return Result.Success(order.Id); + } +} + +// Application/Orders/Commands/CreateOrder/CreateOrderValidator.cs +public class CreateOrderValidator : AbstractValidator +{ + public CreateOrderValidator() + { + RuleFor(x => x.CustomerId).NotEmpty(); + RuleFor(x => x.Items).NotEmpty(); + RuleForEach(x => x.Items).ChildRules(item => + { + item.RuleFor(x => x.ProductId).NotEmpty(); + item.RuleFor(x => x.Quantity).GreaterThan(0); + item.RuleFor(x => x.UnitPrice).GreaterThan(0); + }); + } +} +``` + +### Use Case Handler (Query) + +```csharp +// Application/Orders/Queries/GetOrder/GetOrderQuery.cs +public record GetOrderQuery(Guid OrderId) : IRequest>; + +public record OrderDto(Guid Id, string CustomerId, decimal Total, string Status, DateTimeOffset CreatedAt); + +// Application/Orders/Queries/GetOrder/GetOrderHandler.cs +internal sealed class GetOrderHandler(IAppDbContext db) : IRequestHandler> +{ + public async ValueTask> Handle(GetOrderQuery request, CancellationToken ct) + { + var order = await db.Orders + .Where(o => o.Id == request.OrderId) + .Select(o => new OrderDto(o.Id, o.CustomerId, o.Total, o.Status.ToString(), o.CreatedAt)) + .FirstOrDefaultAsync(ct); + + return order is not null + ? Result.Success(order) + : Result.Failure("Order not found"); + } +} +``` + +### Domain Entity with Behavior + +```csharp +// Domain/Entities/Order.cs +public class Order : Entity +{ + private readonly List _items = []; + + private Order() { } // EF Core + + public string CustomerId { get; private set; } = null!; + public OrderStatus Status { get; private set; } + public decimal Total { get; private set; } + public DateTimeOffset CreatedAt { get; private set; } + public IReadOnlyList Items => _items.AsReadOnly(); + + public static Order Create(string customerId, IEnumerable items, DateTimeOffset now) + { + var order = new Order + { + Id = Guid.CreateVersion7(), + CustomerId = customerId, + Status = OrderStatus.Pending, + CreatedAt = now + }; + + foreach (var item in items) + order.AddItem(item); + + return order; + } + + public void AddItem(OrderItem item) + { + _items.Add(item); + Total = _items.Sum(i => i.Quantity * i.UnitPrice); + } + + public Result Cancel() + { + if (Status is not OrderStatus.Pending) + return Result.Failure("Only pending orders can be cancelled"); + + Status = OrderStatus.Cancelled; + return Result.Success(); + } +} +``` + +### Thin Endpoint Wiring (IEndpointGroup Auto-Discovery) + +Every endpoint group implements `IEndpointGroup` and is auto-discovered via `app.MapEndpoints()`. Program.cs never changes when adding new endpoints. See the **minimal-api** skill for the full `IEndpointGroup` interface and `EndpointExtensions` setup. + +```csharp +// Api/Endpoints/OrderEndpoints.cs +public sealed class OrderEndpoints : IEndpointGroup +{ + public void Map(IEndpointRouteBuilder app) + { + var group = app.MapGroup("/api/orders").WithTags("Orders"); + + group.MapPost("/", CreateOrder) + .WithName("CreateOrder"); + + group.MapGet("/{id:guid}", GetOrder) + .WithName("GetOrder"); + + group.MapGet("/", ListOrders) + .WithName("ListOrders"); + } + + private static async Task CreateOrder( + CreateOrderCommand command, ISender sender, CancellationToken ct) + { + var result = await sender.Send(command, ct); + return result.IsSuccess + ? TypedResults.Created($"/api/orders/{result.Value}", result.Value) + : result.ToProblemDetails(); + } + + private static async Task GetOrder( + Guid id, ISender sender, CancellationToken ct) + { + var result = await sender.Send(new GetOrderQuery(id), ct); + return result.IsSuccess + ? TypedResults.Ok(result.Value) + : TypedResults.NotFound(); + } + + private static async Task ListOrders( + [AsParameters] ListOrdersQuery query, ISender sender, CancellationToken ct) + { + var result = await sender.Send(query, ct); + return TypedResults.Ok(result); + } +} +``` + +### Infrastructure DI Registration + +```csharp +// Infrastructure/DependencyInjection.cs +public static class DependencyInjection +{ + public static IServiceCollection AddInfrastructure( + this IServiceCollection services, + IConfiguration config) + { + services.AddDbContext(options => + options.UseNpgsql(config.GetConnectionString("DefaultConnection"))); + + services.AddScoped(sp => sp.GetRequiredService()); + + return services; + } +} +``` + +## Anti-patterns + +### Anemic Domain Model + +```csharp +// BAD — entity is just a data bag, all logic in handler +public class Order +{ + public Guid Id { get; set; } + public string CustomerId { get; set; } = null!; + public decimal Total { get; set; } + public List Items { get; set; } = []; +} + +// Handler sets everything directly +order.Total = order.Items.Sum(i => i.Quantity * i.UnitPrice); +order.Status = OrderStatus.Pending; + +// GOOD — entity encapsulates its own rules (see Domain Entity pattern above) +var order = Order.Create(customerId, items, clock.GetUtcNow()); +``` + +### DbContext in Domain Layer + +```csharp +// BAD — Domain references EF Core +// Domain/Services/OrderService.cs +public class OrderService(AppDbContext db) { } // Domain depends on Infrastructure! + +// GOOD — Domain defines interfaces, Infrastructure implements +// Domain/Interfaces/IOrderRepository.cs (only if you need query abstraction beyond DbSet) +// Application/Common/Interfaces/IAppDbContext.cs (preferred) +``` + +### Fat Endpoints + +```csharp +// BAD — business logic in the endpoint +app.MapPost("/orders", async (CreateOrderRequest req, AppDbContext db) => +{ + var order = new Order { CustomerId = req.CustomerId }; + foreach (var item in req.Items) + { + order.Items.Add(new OrderItem { ProductId = item.ProductId, Quantity = item.Quantity }); + } + order.Total = order.Items.Sum(i => i.Quantity * i.UnitPrice); + db.Orders.Add(order); + await db.SaveChangesAsync(); + return TypedResults.Created($"/orders/{order.Id}", order); +}); + +// GOOD — endpoint delegates to a use case +app.MapPost("/orders", async (CreateOrderCommand command, ISender sender, CancellationToken ct) => +{ + var result = await sender.Send(command, ct); + return result.IsSuccess + ? TypedResults.Created($"/orders/{result.Value}", result.Value) + : result.ToProblemDetails(); +}); +``` + +### Repository for Every Entity + +```csharp +// BAD — repository per entity duplicates DbSet functionality +public interface IOrderRepository { Task GetByIdAsync(Guid id); } +public interface IProductRepository { Task GetByIdAsync(Guid id); } +public interface ICustomerRepository { Task GetByIdAsync(Guid id); } + +// GOOD — use IAppDbContext with DbSet directly +// Only create a repository interface when you have complex query logic +// that you want to test in isolation or reuse across multiple use cases +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| When to use CA over VSA | Medium+ domain complexity, long-lived system, team familiar with layers | +| When to add a Domain layer | Business rules involve invariants across entity groups | +| IAppDbContext vs repositories | Prefer IAppDbContext; add repository only for complex reusable queries | +| Mediator vs raw handlers in CA | Mediator for pipeline behaviors (validation, logging); raw handlers for simplicity | +| When to add Domain events | When side effects (notifications, audit) should be decoupled from the main flow | +| Evolving from VSA to CA | When handlers start needing shared domain logic that does not belong in Common/ | diff --git a/.opencode/skills/code-review-workflow/SKILL.md b/.opencode/skills/code-review-workflow/SKILL.md new file mode 100644 index 00000000..283523b6 --- /dev/null +++ b/.opencode/skills/code-review-workflow/SKILL.md @@ -0,0 +1,233 @@ +--- +name: code-review-workflow +description: > + Structured code review workflow for .NET projects using Roslyn MCP tools. + Multi-dimensional review covering correctness, security, performance, + architecture compliance, and test coverage. + Load when: "review PR", "review code", "code review", "PR review", + "review changes", "review my code", "check code quality". +--- + +# Code Review Workflow + +## Core Principles + +1. **MCP-first analysis** — Use Roslyn MCP tools before reading source files. `detect_antipatterns` catches more than manual scanning, `get_diagnostics` finds what the compiler knows, and `find_references` reveals blast radius. Only read files for context that tools can't provide. +2. **Structured output** — Every review follows the same format: Summary → Critical → Warnings → Suggestions → Architecture Compliance → Test Coverage → What's Good. Consistent structure makes reviews actionable and scannable. +3. **Severity-based findings** — Categorize every finding as Critical (must fix before merge), Warning (should fix, creates tech debt), or Suggestion (nice to have). Never mix severities — a cosmetic issue next to a security bug buries the important finding. +4. **Actionable suggestions** — Every finding includes: what's wrong, why it matters, and how to fix it. "This is bad" is not a review comment. "This creates N+1 queries because X. Fix by adding `.Include()` or using a projection" is. +5. **Acknowledge good work** — Always include a "What's Good" section. Positive reinforcement of good patterns is as important as flagging bad ones. + +## Patterns + +### Full PR Review Flow + +Use for non-trivial PRs (3+ files changed, new features, refactors). Execute steps in order: + +**Step 1: Understand the change scope** +Get changed files from git diff or user input. Categorize: +- New files (features, tests, configs) +- Modified files (which layers? domain, application, infrastructure, API?) +- Deleted files (was anything depending on them?) + +**Step 2: Automated analysis** +Run MCP tools on changed files: +``` +→ detect_antipatterns (file: each changed .cs file) + Catch: async void, sync-over-async, DateTime.Now, new HttpClient(), broad catch, etc. + +→ get_diagnostics (scope: file, path: each changed file) + Catch: new compiler warnings, nullability issues, unused variables + +→ get_public_api (typeName: each modified type) + Check: API surface changes — new public members, removed members, signature changes +``` + +**Step 3: Blast radius assessment** +For each changed public API: +``` +→ find_references (symbolName: changedMethod) + Count callers. High count = high risk. Flag breaking changes. +``` + +**Step 4: Architecture compliance** +``` +→ get_project_graph + Verify: dependency direction is correct (Domain → nothing, Infra → Domain, Api → Application) + Flag: circular references, wrong-direction dependencies +``` + +**Step 5: Test coverage check** +``` +→ get_test_coverage_map (projectFilter: changed project) + Check: do test files exist for every changed type? + Flag: new types without tests, modified logic without test updates +``` + +**Step 6: Manual review** +Read changed files for things tools can't catch: +- Business logic correctness +- Naming clarity and consistency +- Error handling completeness +- Concurrency safety +- Security: input validation, authorization checks, data exposure + +**Step 7: Produce review** + +```markdown +## Review Summary +[1-2 sentence overall assessment: scope, risk level, recommendation] + +## Critical (must fix) +- **[File:Line] [Title]** — [What's wrong]. [Why it matters]. [How to fix]. +- ... + +## Warnings (should fix) +- **[File:Line] [Title]** — [What's wrong]. [Impact if not fixed]. [Suggested fix]. +- ... + +## Suggestions (nice to have) +- **[File:Line] [Title]** — [Current approach]. [Better alternative]. [Why]. +- ... + +## Architecture Compliance +[Dependency direction check results. Layer violation findings. Module boundary enforcement.] + +## Test Coverage +[Which changed types have tests. Which are missing. Specific test scenarios to add.] + +## What's Good +- [Positive finding 1 — reinforce good patterns] +- [Positive finding 2] +- ... +``` + +### Quick Review + +Use for small changes (1-2 files, bug fixes, config changes). Lightweight — skip blast radius and architecture checks. + +**Steps:** +1. Run `detect_antipatterns` on changed files +2. Run `get_diagnostics` on changed files +3. Read the changed code for correctness +4. Produce abbreviated review (Summary + Issues + What's Good) + +```markdown +## Quick Review +[1 sentence assessment] + +### Issues +- [Finding with severity tag: 🔴 Critical / 🟡 Warning / 🔵 Suggestion] + +### What's Good +- [Positive note] +``` + +### Architecture Compliance Check + +Standalone check for architecture-level concerns. Use when reviewing project structure changes, new project additions, or module boundary modifications. + +**Steps:** +1. Run `get_project_graph` — visualize the full dependency tree +2. Verify dependency rules per architecture: + +| Architecture | Rule | Violation Example | +|-------------|------|-------------------| +| VSA | Features don't reference each other | Feature A imports from Feature B | +| Clean Architecture | Domain has zero project references | Domain references Infrastructure | +| DDD | Aggregates don't reference other aggregates | Order aggregate imports Product aggregate | +| Modular Monolith | Modules communicate only via integration events | Module A directly references Module B's DbContext | + +3. Run `find_references` on module/layer boundary types to verify encapsulation: +``` +→ find_references(symbolName: "OrdersDbContext") + Should only be referenced within the Orders module. + External references = module boundary violation. +``` + +4. Run `detect_circular_dependencies` to find cycles: +``` +→ detect_circular_dependencies(scope: projects) + Flag any project-level cycles. + +→ detect_circular_dependencies(scope: types, projectFilter: "MyApp.Application") + Flag type-level cycles within the application layer. +``` + +## Anti-patterns + +### Reviewing Without MCP Tools + +``` +# BAD — Reading every file manually, missing patterns across the codebase +"Let me read OrderService.cs... looks fine to me." +# Missed: 3 DateTime.Now usages, 1 async void, 2 compiler warnings +``` + +``` +# GOOD — MCP-first, then targeted file reads +→ detect_antipatterns: Found 3 DateTime.Now (AP004), 1 async void (AP001) +→ get_diagnostics: 2 CS8600 warnings in OrderService.cs +"I found 6 issues via static analysis. Let me read the files for business logic review..." +``` + +### Vague Feedback + +``` +# BAD +"The code could be better." +"This doesn't look right." +"Consider refactoring this." +``` + +``` +# GOOD +"OrderService.cs:47 — `DateTime.Now` should be `TimeProvider.GetUtcNow()`. +DateTime.Now is untestable and uses local timezone. Inject TimeProvider +via primary constructor and call GetUtcNow()." +``` + +### Missing Security Checks + +``` +# BAD — Only checking code style and patterns +"Code looks clean, approved!" +# Missed: SQL injection in raw query, missing authorization attribute, exposed PII in logs +``` + +``` +# GOOD — Security is a review dimension +"## Critical +- **OrderController.cs:23** Missing `[Authorize]` — endpoint exposes order data without auth +- **SearchService.cs:45** SQL injection — user input concatenated into raw SQL. Use parameterized query. +## Suggestions +- **LoggingMiddleware.cs:12** PII exposure — email logged at Information level. Mask or use Debug level." +``` + +### Blocking on Style, Ignoring Substance + +``` +# BAD — 10 comments about naming, 0 about the race condition +"Rename `svc` to `service`. Use `var` instead of explicit type. Add XML docs." +``` + +``` +# GOOD — Prioritize by impact +"## Critical +- Race condition in OrderService.ProcessAsync — concurrent calls can double-charge +## Suggestions +- Consider renaming `svc` to `service` for clarity" +``` + +## Decision Guide + +| Scenario | Review Type | MCP Tools | +|----------|------------|-----------| +| Feature PR (3+ files) | Full PR Review | All tools | +| Bug fix (1-2 files) | Quick Review | detect_antipatterns, get_diagnostics | +| Config/infra changes | Quick Review + Manual | get_project_graph | +| New project/module added | Architecture Compliance | get_project_graph, detect_circular_dependencies | +| Refactor PR | Full PR Review + Architecture | All tools + find_references (blast radius) | +| Security-sensitive change | Full PR Review → escalate to security-auditor | detect_antipatterns + manual security review | +| Test-only changes | Quick Review | get_diagnostics only | +| Performance-critical path | Full PR Review → escalate to performance-analyst | get_diagnostics + manual review | diff --git a/.opencode/skills/configuration/SKILL.md b/.opencode/skills/configuration/SKILL.md new file mode 100644 index 00000000..f7ed4de3 --- /dev/null +++ b/.opencode/skills/configuration/SKILL.md @@ -0,0 +1,197 @@ +--- +name: configuration +description: > + Configuration patterns for .NET 10 applications. Covers the Options pattern, + IOptionsSnapshot vs IOptions, secrets management, and environment-based + configuration. + Load this skill when setting up application configuration, managing secrets, + binding configuration sections, or when the user mentions "configuration", + "appsettings", "Options pattern", "IOptions", "IOptionsSnapshot", "secrets", + "user secrets", "environment variables", "connection string", or "config binding". +--- + +# Configuration + +## Core Principles + +1. **Options pattern always** — Never read `IConfiguration` directly in services. Bind configuration sections to strongly-typed classes with validation. +2. **Validate on startup** — Use `ValidateDataAnnotations()` and `ValidateOnStart()` to catch misconfiguration before the first request. +3. **Secrets never in source** — Use user secrets in development, Azure Key Vault or environment variables in production. Never commit secrets to git. +4. **Configuration layering** — `appsettings.json` → `appsettings.{Environment}.json` → environment variables → user secrets. Later sources override earlier ones. + +## Patterns + +### Options Pattern + +```csharp +// Options class with validation attributes +public class DatabaseOptions +{ + public const string SectionName = "Database"; + + [Required] + public required string ConnectionString { get; init; } + + [Range(1, 100)] + public int MaxRetryCount { get; init; } = 3; + + [Range(1, 60)] + public int CommandTimeoutSeconds { get; init; } = 30; +} + +// Registration with validation +builder.Services.AddOptions() + .BindConfiguration(DatabaseOptions.SectionName) + .ValidateDataAnnotations() + .ValidateOnStart(); // Fails at startup if configuration is invalid +``` + +```json +// appsettings.json +{ + "Database": { + "ConnectionString": "", + "MaxRetryCount": 3, + "CommandTimeoutSeconds": 30 + } +} +``` + +### Injecting Options + +```csharp +// IOptions — singleton, read once at startup, doesn't change +public class OrderService(IOptions options) +{ + private readonly DatabaseOptions _db = options.Value; +} + +// IOptionsSnapshot — scoped, re-reads per request (for reloadable config) +public class OrderService(IOptionsSnapshot options) +{ + private readonly DatabaseOptions _db = options.Value; +} + +// IOptionsMonitor — singleton, actively watches for changes +public class BackgroundWorker(IOptionsMonitor options) +{ + public void DoWork() + { + var current = options.CurrentValue; // Always latest + } +} +``` + +### Custom Validation (Complex Rules) + +```csharp +builder.Services.AddOptions() + .BindConfiguration("Jwt") + .Validate(options => + { + if (string.IsNullOrEmpty(options.Key) || options.Key.Length < 32) + return false; + if (options.ExpirationMinutes <= 0) + return false; + return true; + }, "JWT key must be at least 32 characters and expiration must be positive") + .ValidateOnStart(); +``` + +### Azure Key Vault (Production) + +```csharp +// Program.cs — add Key Vault as a configuration source +if (builder.Environment.IsProduction()) +{ + var keyVaultUri = new Uri(builder.Configuration["KeyVault:Uri"]!); + builder.Configuration.AddAzureKeyVault(keyVaultUri, new DefaultAzureCredential()); +} +``` + +### Configuration for Multiple Environments + +```csharp +// Named options — different config per named instance +builder.Services.AddOptions("internal") + .BindConfiguration("Smtp:Internal"); +builder.Services.AddOptions("customer") + .BindConfiguration("Smtp:Customer"); + +// Usage +public class EmailService(IOptionsSnapshot options) +{ + public async Task SendInternalEmail(string to, string body) + { + var smtp = options.Get("internal"); + // ... + } +} +``` + +## Anti-patterns + +### Don't Read IConfiguration Directly + +```csharp +// BAD — stringly-typed, no validation, hard to test +public class OrderService(IConfiguration config) +{ + public void Process() + { + var timeout = int.Parse(config["Database:CommandTimeout"]!); + } +} + +// GOOD — strongly-typed options +public class OrderService(IOptions options) +{ + public void Process() + { + var timeout = options.Value.CommandTimeoutSeconds; + } +} +``` + +### Don't Put Secrets in appsettings.json + +```json +// BAD — committed to source control +{ + "Jwt": { "Key": "super-secret-key" }, + "Database": { "ConnectionString": "Server=prod;Password=secret" } +} + +// GOOD — appsettings.json has defaults/structure only +{ + "Jwt": { "Key": "", "Issuer": "myapp", "Audience": "myapp" }, + "Database": { "ConnectionString": "" } +} +// Secrets provided via user-secrets (dev) or env vars / Key Vault (prod) +``` + +### Don't Skip Startup Validation + +```csharp +// BAD — misconfiguration discovered at runtime +builder.Services.Configure(builder.Configuration.GetSection("Jwt")); + +// GOOD — fail fast at startup +builder.Services.AddOptions() + .BindConfiguration("Jwt") + .ValidateDataAnnotations() + .ValidateOnStart(); +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Binding config to class | Options pattern with `BindConfiguration` | +| Simple, immutable config | `IOptions` | +| Config that changes per request | `IOptionsSnapshot` | +| Background service watching config | `IOptionsMonitor` | +| Development secrets | `dotnet user-secrets` | +| Production secrets | Azure Key Vault or environment variables | +| Validating config | `ValidateDataAnnotations()` + `ValidateOnStart()` | +| Multiple configs of same type | Named options with `IOptionsSnapshot.Get(name)` | diff --git a/.opencode/skills/container-publish/SKILL.md b/.opencode/skills/container-publish/SKILL.md new file mode 100644 index 00000000..b6bd926a --- /dev/null +++ b/.opencode/skills/container-publish/SKILL.md @@ -0,0 +1,235 @@ +--- +name: container-publish +description: > + Dockerfile-less containerization using the .NET 10 SDK container publishing + feature. Covers MSBuild properties, chiseled images, multi-arch builds, and + registry publishing — all without writing a Dockerfile. + Load this skill when the user wants to containerize without a Dockerfile, or + mentions "dotnet publish container", "PublishContainer", "ContainerRepository", + "ContainerFamily", "chiseled", "distroless", "container publish", "SDK + container", "no Dockerfile", or "containerize without Docker". +--- + +# Container Publishing (No Dockerfile) + +## Core Principles + +1. **No Dockerfile needed** — The .NET 10 SDK builds OCI-compliant container images directly from `dotnet publish /t:PublishContainer`. No Dockerfile to write or maintain. +2. **Chiseled images for production** — Use `noble-chiseled` base images: no shell, no package manager, 7 Linux components vs 100+. Smallest attack surface. +3. **Non-root by default** — .NET 10 container images run as the `app` user automatically. Never override to root in production. +4. **Configuration in the .csproj** — All container settings are MSBuild properties, versioned with your project. No separate files to drift. + +## Patterns + +### Minimal Container Publish + +No project file changes needed. Just publish: + +```bash +dotnet publish /t:PublishContainer --os linux --arch x64 +``` + +This creates a container image in your local Docker daemon using the default `aspnet:10.0` base image. + +### Production-Ready .csproj Configuration + +```xml + + + + net10.0 + mycompany/myapp-api + noble-chiseled + + + + + + + + + + +``` + +### Publishing to a Registry + +Authenticate with `docker login` first, then specify the registry: + +```bash +# GitHub Container Registry +docker login ghcr.io +dotnet publish /t:PublishContainer --os linux --arch x64 \ + -p ContainerRegistry=ghcr.io \ + -p ContainerImageTag=1.0.0 + +# Azure Container Registry +az acr login --name myregistry +dotnet publish /t:PublishContainer --os linux --arch x64 \ + -p ContainerRegistry=myregistry.azurecr.io + +# Docker Hub (requires username prefix in repository) +dotnet publish /t:PublishContainer --os linux --arch x64 \ + -p ContainerRegistry=docker.io \ + -p ContainerRepository=myuser/myapp +``` + +### Multi-Architecture Images + +Build images for multiple platforms with a single publish: + +```xml + + linux-x64;linux-arm64 + linux-x64;linux-arm64 + +``` + +```bash +dotnet publish /t:PublishContainer +``` + +This produces an OCI Image Index — registries serve the correct architecture automatically. + +### Multiple Tags + +```bash +# Bash — note the quoting for semicolons +dotnet publish /t:PublishContainer --os linux --arch x64 \ + -p ContainerImageTags='"1.0.0;latest"' +``` + +Or in the project file: + +```xml +1.0.0;latest +``` + +### Save as Tarball (No Docker Required) + +No container runtime needed on the build machine. Useful for CI scanning: + +```bash +dotnet publish /t:PublishContainer --os linux --arch x64 \ + -p ContainerArchiveOutputPath=./images/myapp.tar.gz + +# Scan with Trivy before pushing +trivy image --input ./images/myapp.tar.gz +``` + +### Chiseled Image Variants + +| ContainerFamily | Use Case | Shell | Size | +|----------------|----------|-------|------| +| *(default)* | General purpose (Debian) | Yes | ~220 MB | +| `noble-chiseled` | Production (no shell) | No | ~110 MB | +| `noble-chiseled-extra` | Production with localization (ICU) | No | ~120 MB | +| `alpine` | Small size, has shell | Yes | ~112 MB | + +```xml + +noble-chiseled + + +noble-chiseled-extra +``` + +For Native AOT, the SDK auto-selects `chiseled-aot`: + +```xml +true + +``` + +### CI/CD with GitHub Actions + +```yaml +jobs: + publish: + runs-on: ubuntu-latest + permissions: + packages: write + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-dotnet@v4 + with: + dotnet-version: '10.0.x' + - uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - run: | + dotnet publish src/MyApp.Api/MyApp.Api.csproj \ + /t:PublishContainer --os linux --arch x64 \ + -p ContainerRegistry=ghcr.io \ + -p ContainerRepository=${{ github.repository_owner }}/myapp \ + -p ContainerImageTag=${{ github.sha }} +``` + +## Anti-patterns + +### Don't Use the Deprecated Property Names + +```xml + +myapp + + +myapp +``` + +### Don't Use PublishProfile=DefaultContainer + +```bash +# BAD — old approach, inconsistent across project types +dotnet publish -p:PublishProfile=DefaultContainer + +# GOOD — use the MSBuild target directly +dotnet publish /t:PublishContainer +``` + +### Don't Forget to Target Linux + +```bash +# BAD on Windows — may produce a Windows container +dotnet publish /t:PublishContainer + +# GOOD — explicitly target Linux +dotnet publish /t:PublishContainer --os linux --arch x64 +``` + +### Don't Skip Authentication Before Push + +```bash +# BAD — fails with CONTAINER1013 error +dotnet publish /t:PublishContainer -p ContainerRegistry=ghcr.io + +# GOOD — authenticate first +docker login ghcr.io +dotnet publish /t:PublishContainer -p ContainerRegistry=ghcr.io +``` + +### Don't Use SDK Publishing When You Need OS Packages + +```xml + + + + +myregistry/custom-base:1.0 +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Standard ASP.NET Core API | SDK container publishing with `noble-chiseled` | +| Worker service / console app | SDK container publishing (native .NET 10 support) | +| Needs native OS packages | Dockerfile (or custom base image + SDK publishing) | +| Azure Functions | Dockerfile (not supported by SDK publishing) | +| CI without Docker daemon | Tarball output with `ContainerArchiveOutputPath` | +| Multi-arch deployment (x64 + arm64) | `ContainerRuntimeIdentifiers` property | +| Production image size | `noble-chiseled` (~110 MB) or Native AOT (~10 MB) | +| Local development | `dotnet publish /t:PublishContainer --os linux --arch x64` | +| Registry push | `ContainerRegistry` + `docker login` | diff --git a/.opencode/skills/context-discipline/SKILL.md b/.opencode/skills/context-discipline/SKILL.md new file mode 100644 index 00000000..16f5df3f --- /dev/null +++ b/.opencode/skills/context-discipline/SKILL.md @@ -0,0 +1,217 @@ +--- +name: context-discipline +description: > + Token budget management for Claude Code sessions. Teaches how to minimize + context consumption using MCP-first navigation, lazy loading, subagent + isolation, and strategic file reading. Keeps Claude effective throughout + long sessions by treating the 200k token window as a budget, not a dumping + ground. Load this skill when context is running low, sessions feel sluggish, + Claude starts forgetting earlier context, or when planning how to explore + a large codebase efficiently. Keywords: "context", "tokens", "budget", + "running out of context", "too many files", "large codebase", "memory". +--- + +# Context Discipline + +## Core Principles + +1. **MCP tools first, file reads second** — A Roslyn MCP query costs 30-150 tokens. Reading a file costs 500-2000+ tokens. For navigation and understanding, always try MCP tools before opening files. Only read files when you need to modify them or MCP tools don't provide enough detail. + +2. **Lazy load everything** — Don't read files "just in case." Don't load all skills upfront. Don't explore directories you aren't about to modify. Load information at the moment you need it, not before. + +3. **Subagents are context isolation chambers** — Every subagent gets its own context window. Offload exploration, research, and analysis to subagents. They process information and return a summary — your main context stays clean. + +4. **Summarize and discard** — After exploring a subsystem, summarize what you learned in a few lines. The summary is what stays in context, not the raw file contents. Think of it as compressing information. + +5. **Know your budget** — A 200k token window sounds large but fills fast. A typical .cs file is 500-2000 tokens. Loading 50 files can consume half your budget. Plan your reads like you plan your sprints — deliberately. + +## Patterns + +### MCP-First Navigation + +Always prefer MCP tools for understanding code structure: + +``` +TASK: Understand how OrderService works + +EXPENSIVE APPROACH (file reads): +1. Read src/Orders/OrderService.cs → ~1200 tokens +2. Read src/Orders/IOrderService.cs → ~300 tokens +3. Read src/Orders/OrderRepository.cs → ~800 tokens +4. Read src/Orders/Models/Order.cs → ~600 tokens +Total: ~2900 tokens consumed + +TOKEN-EFFICIENT APPROACH (MCP-first): +1. find_symbol "OrderService" → ~50 tokens (file path + line) +2. get_public_api "OrderService" → ~120 tokens (method signatures) +3. find_references "OrderService" → ~80 tokens (who uses it) +4. get_type_hierarchy "OrderService" → ~60 tokens (inheritance chain) +Total: ~310 tokens consumed — 9x cheaper + +Only THEN read the specific method you need to modify: ~200 tokens +Grand total: ~510 tokens vs 2900 — 5.7x savings +``` + +### Subagent Offloading Decision Matrix + +Decide when to offload to a subagent vs. handle in main context: + +``` +USE A SUBAGENT WHEN: +- Exploring an unfamiliar part of the codebase (> 3 files to read) +- Researching a question that requires reading docs or multiple files +- Running analysis that produces verbose output (test results, diagnostics) +- Comparing approaches that require loading multiple examples +- Any task where the journey is verbose but the answer is concise + +STAY IN MAIN CONTEXT WHEN: +- Modifying a file you've already read +- Quick lookups (1-2 MCP queries) +- Tasks where you need to see prior conversation context +- Writing code that builds on discussion with the user +``` + +Example subagent delegation: + +``` +TASK: "Find all places where we handle authentication" + +MAIN CONTEXT APPROACH (expensive): +- Read 8 files, consume ~8000 tokens +- All that content stays in context forever + +SUBAGENT APPROACH (efficient): +- Spawn subagent: "Find all authentication handling in this codebase. + Use find_symbol and find_references for auth-related types. + Return: file paths, line numbers, and a 1-line summary per location." +- Subagent returns ~200 tokens of summarized findings +- Main context stays clean +``` + +### Context Budget Planning + +Before a complex task, estimate token spend per phase: + +``` +UNDERSTAND (~5k): MCP queries (~300) + subagent exploration (~500) + reserve +PLAN (~2k): Discussion + plan documentation +IMPLEMENT (~15k): Read files to modify (~3k) + write code (~5k) + iteration (~7k) +VERIFY (~3k): Build + test + diagnostics + format check +REMAINING: ~175k for conversation — comfortable +``` + +### File Reading Prioritization + +When you must read files, prioritize by impact: + +``` +PRIORITY 1 — Files you will modify +Read fully. You need exact content to make correct edits. + +PRIORITY 2 — Files with interfaces/contracts you must satisfy +Read the interface/base class. Skip implementation details. + +PRIORITY 3 — Files for reference patterns +Use get_public_api first. Only read if the API surface isn't enough. + +PRIORITY 4 — Files for general context +Use subagent to summarize. Don't read in main context. + +NEVER READ: +- Entire directories "to understand the project" — use get_project_graph +- Test files for context (unless modifying tests) — use get_test_coverage_map +- Generated files (.designer.cs, migrations) — use get_diagnostics for issues +- Package/config files unless specifically needed +``` + +### Context Pruning & Large Codebase Strategy + +``` +WARNING SIGNS (take action if any apply): +- Read 10+ files, 50+ exchanges, forgetting earlier details, re-reading files + +RECOVERY: Summarize in 5-10 lines → subagents for remaining exploration → +MCP-only for new lookups → reference prior line numbers → suggest new session if needed + +LARGE CODEBASES (50+ projects): +get_project_graph → identify 2-3 relevant projects → find_symbol for key types → +get_public_api for interfaces → read ONLY files you'll modify → subagents for cross-cutting + +NEVER: Read every file to "understand" a project, load all skills upfront, +open a file to find one function (use find_symbol) +``` + +## Anti-patterns + +### Reading Entire Files for One Function + +``` +// BAD — read 1500 tokens to find one 10-line method +Read: src/Orders/OrderService.cs (full file, 80 lines) +*Only needed the ProcessOrder method on line 42* + +// GOOD — targeted approach +MCP: find_symbol "ProcessOrder" → "src/Orders/OrderService.cs:42" +Read: src/Orders/OrderService.cs lines 42-55 → ~200 tokens +``` + +### Loading All Skills Upfront + +``` +// BAD — dump 15 skills into context at session start +"Load: modern-csharp, ef-core, minimal-api, testing, docker, + authentication, logging, caching, messaging, resilience..." +*15 skills × ~300 tokens each = ~4500 tokens before any work starts* + +// GOOD — load skills as topics arise +Session start: modern-csharp (always relevant) +User asks about EF: load ef-core +User asks about tests: load testing +*Only pay for what you use* +``` + +### Not Using Subagents for Exploration + +``` +// BAD — explore in main context, polluting the window +Read 12 files across 4 projects to understand auth flow +*~15,000 tokens consumed, all staying in context* + +// GOOD — subagent explores, returns summary +Subagent: "Trace the authentication flow from login to token validation. + Return: the flow as numbered steps with file:line references." +*~300 tokens in main context* +``` + +### Loading Everything Because the Window Is Large + +``` +// BAD — "200k tokens is huge, let's load everything" +Read all 30 files in the Orders module +Read all 15 test files +Read the entire docker-compose.yml +Read all migration files +*80k tokens consumed before writing a single line of code* + +// GOOD — minimum viable context +MCP: get_project_graph (solution shape) +MCP: find_symbol (locate target types) +Read: 2-3 files you'll actually modify +Subagent: summarize anything else you need +*~3k tokens consumed, 197k remaining for actual work* +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Need to find where a type is defined | `find_symbol` — never grep | +| Need to understand a type's API | `get_public_api` — don't read the file | +| Need to modify a file | Read it fully — you need exact content | +| Need to understand project structure | `get_project_graph` — don't browse directories | +| Need to explore unfamiliar code | Spawn a subagent — keep main context clean | +| Read more than 10 files in a session | Pause — switch to MCP + subagents | +| Context feels heavy or sluggish | Summarize what you know, use subagents going forward | +| Large codebase (50+ projects) | MCP-first, subagent-heavy, read only files you modify | +| User asks about a new topic mid-session | Load the relevant skill on demand, not in advance | +| Need to compare two approaches | Subagent per approach, compare summaries | diff --git a/.opencode/skills/convention-learner/SKILL.md b/.opencode/skills/convention-learner/SKILL.md new file mode 100644 index 00000000..f26caab6 --- /dev/null +++ b/.opencode/skills/convention-learner/SKILL.md @@ -0,0 +1,242 @@ +--- +name: convention-learner +description: > + Detects and enforces project-specific coding conventions by analyzing + existing codebase patterns. Learns naming conventions, folder structure, + test organization, and coding style from the existing code. + Load when: "conventions", "coding standards", "project patterns", + "enforce style", "detect patterns", "learn conventions", "code consistency". +--- + +# Convention Learner + +## Core Principles + +1. **Observe before enforcing** — Never impose conventions without first analyzing the existing codebase. A project with 200 `internal sealed class` handlers should not get a new `public class` handler. Detect first, then match. +2. **Project conventions override generic rules** — If the project uses `*Service` instead of `*Handler`, follow the project's convention even if the kit default is different. Explicit `.editorconfig` and `Directory.Build.props` rules always win. +3. **Use MCP tools for analysis** — `get_public_api` reveals naming patterns, `get_project_graph` shows structure conventions, `detect_antipatterns` tracks quality trends. Tools provide objective data; file reads provide confirmation. +4. **Document findings** — After detecting conventions, suggest adding them to the project's CLAUDE.md. Undocumented conventions are lost when the original developers leave. +5. **Consistency over perfection** — A project with consistent `snake_case` database columns is better than a project with half `snake_case` and half `PascalCase`. Match the existing pattern, even if another convention is theoretically superior. + +## Patterns + +### Convention Detection Flow + +Systematic analysis to understand a project's coding conventions. Run this when joining an existing project or before generating new code. + +**Step 1: Project Structure Analysis** +``` +→ get_project_graph + Detect: + - Project naming: PascalCase? Dots? (MyApp.Domain vs Domain) + - Layer organization: by layer (Domain/Application/Infrastructure) or by feature? + - Test project naming: *.Tests, *.UnitTests, *.IntegrationTests? + - Shared project: Common/, Shared/, BuildingBlocks/? +``` + +**Step 2: Type Naming Patterns** +``` +→ get_public_api (on 3-5 key types across different layers) + Detect: + - Class modifiers: sealed? internal? internal sealed? + - Interface prefix: I* (standard) or no prefix? + - Suffix conventions: Handler, Service, Repository, Validator, Endpoint? + - Record usage: for DTOs? for value objects? for commands/queries? + - Primary constructor usage: consistently? selectively? +``` + +**Step 3: Folder Structure Patterns** +Scan the file system for structural conventions: +- Feature folders: `Features/{FeatureName}/` with all files together? +- Layer folders: `Controllers/`, `Services/`, `Repositories/` separate? +- Shared patterns: `Common/`, `Extensions/`, `Middleware/`? +- Configuration location: root? `Config/` folder? `Infrastructure/`? + +**Step 4: Configuration Detection** +Check for explicit convention enforcers: + +``` +→ Look for Directory.Build.props + - TreatWarningsAsErrors? + - Nullable enabled globally? + - ImplicitUsings? + - AnalysisLevel? + +→ Look for .editorconfig + - Naming rules: camelCase fields? _prefixed privates? + - Code style: var preferences, expression bodies, using placement + +→ Look for global.json + - SDK version pinned? + - Roll-forward policy? +``` + +**Step 5: Build Convention Summary** +Compile findings into a structured summary: + +```markdown +## Detected Conventions + +### Naming +- Classes: `internal sealed class` (95% of handlers/services) +- Suffixes: Handlers end in `Handler`, validators in `Validator` +- Records: Used for DTOs and commands/queries + +### Structure +- Architecture: Vertical Slice Architecture +- Features: `Features/{Name}/` with command, handler, validator, endpoint in one file + +### Code Style +- Primary constructors: Used consistently for DI injection +- Nullable: Enabled globally, no suppressions (`!`) used +- File-scoped namespaces: 100% consistent +``` + +Add categories as needed: EF Core (configurations, naming, migrations), Testing (framework, naming, fixtures), etc. + +### Convention Enforcement + +Apply detected conventions when generating new code or reviewing existing code. + +**When Generating Code:** +Match every detected pattern: + +```csharp +// If existing handlers are: internal sealed class + primary constructor +// Generate matching: +internal sealed class CreateProductHandler(AppDbContext db, TimeProvider clock) +{ + // Not: public class CreateProductHandler + // Not: internal class CreateProductHandler (missing sealed) +} +``` + +```csharp +// If existing DTOs are records with init properties +// Generate matching: +public record ProductResponse(Guid Id, string Name, decimal Price); +// Not: public class ProductResponse { public Guid Id { get; set; } } +``` + +**When Reviewing Code:** +Flag deviations from detected conventions: + +``` +⚠️ Convention violation: CreateOrderHandler is `public class` but project convention + is `internal sealed class` (detected in 12/12 existing handlers). + Change to: internal sealed class CreateOrderHandler +``` + +**Suggesting Enforcement Rules:** +After detecting conventions, suggest `.editorconfig` rules to enforce them automatically: + +```ini +# Key .editorconfig rules to suggest based on detected conventions +dotnet_diagnostic.CA1852.severity = warning # Seal internal types +csharp_style_namespace_declarations = file_scoped:warning +csharp_style_prefer_primary_constructors = true:suggestion +# Add dotnet_naming_rule entries for private field prefix (_camelCase) if detected +``` + +### Anti-pattern Tracking + +Use `detect_antipatterns` to track recurring quality issues across sessions. + +**Periodic Check:** +``` +→ detect_antipatterns (scope: solution) + Track over time: + - Are the same patterns recurring? (DateTime.Now keeps appearing) + - Are new patterns emerging? (new HttpClient() in a new module) + - Is the count trending up or down? +``` + +**Prioritization:** +``` +| Anti-pattern | Count | Trend | Priority | +|-------------|-------|-------|----------| +| DateTime.Now | 12 | ↑ +3 | High — add to CLAUDE.md conventions | +| async void | 1 | → same | Medium — one-off fix | +| new HttpClient | 0 | ↓ -2 | Low — already fixing | +``` + +When patterns recur, add explicit rules to CLAUDE.md: +```markdown +## Conventions +- **NEVER use DateTime.Now** — Use TimeProvider.GetUtcNow() (12 violations found, fixing) +``` + +## Anti-patterns + +### Enforcing Without Detecting + +``` +# BAD — Imposing kit defaults on a project with its own conventions +"All handlers should be internal sealed class" +# But this project uses public class with interfaces for testing +``` + +``` +# GOOD — Detect first, then follow what exists +→ get_public_api reveals: 8/8 handlers are `public class` implementing `IHandler` +"This project uses public handlers with interfaces. Matching that convention." +``` + +### Overriding Explicit Project Rules + +``` +# BAD — Ignoring .editorconfig because kit says otherwise +# .editorconfig says: csharp_style_expression_bodied_methods = false +# But generating expression-bodied methods anyway +``` + +``` +# GOOD — .editorconfig and Directory.Build.props always win +"Your .editorconfig disables expression-bodied methods. +I'll use block-bodied methods to match your project settings." +``` + +### Applying Generic Conventions to Unconventional Projects + +``` +# BAD — Forcing Clean Architecture naming on a VSA project +"You need a Services/ folder and a Repositories/ folder" +# But this project uses feature folders with everything co-located +``` + +``` +# GOOD — Match the project's organizational convention +"This project uses feature folders. I'll add the new feature +at Features/Shipping/ with all related files together." +``` + +### Documenting Conventions Without Evidence + +``` +# BAD — "Conventions" based on reading one file +"Convention: Use var everywhere" (based on seeing var in one method) +``` + +``` +# GOOD — Document only patterns confirmed across multiple files +→ get_public_api on 5 types: 100% use explicit types for non-obvious cases +"Convention: Use explicit types for non-obvious cases (e.g., method returns), +var for obvious cases (e.g., new MyClass()). Confirmed across 5 files." +``` + +## Decision Guide + +| Scenario | Action | Tool | +|----------|--------|------| +| Joining existing project | Run full convention detection flow | get_project_graph, get_public_api | +| Generating new code | Check detected conventions first | Previous detection results | +| Reviewing code | Flag convention deviations | get_public_api + comparison | +| Convention conflict (kit vs project) | **Project wins** | — | +| Convention conflict (team disagreement) | Document both, suggest .editorconfig | — | +| No conventions detected | Use kit defaults, document them | architecture-advisor skill | +| Recurring anti-pattern | Add to CLAUDE.md conventions | detect_antipatterns | +| New team member onboarding | Run detection, generate convention doc | Full detection flow | +| .editorconfig exists | Trust it, don't override | Read .editorconfig | +| No .editorconfig | Suggest creating one based on detected patterns | Detection + generation | +| Pattern seen once | Create instinct at 0.3 confidence via `instinct-system` skill | instinct-system | +| Pattern confirmed 3+ times | Instinct auto-promotes to 0.7, suggest adding to CLAUDE.md | instinct-system | diff --git a/.opencode/skills/ddd/SKILL.md b/.opencode/skills/ddd/SKILL.md new file mode 100644 index 00000000..fb21fa4b --- /dev/null +++ b/.opencode/skills/ddd/SKILL.md @@ -0,0 +1,329 @@ +--- +name: ddd +description: > + Domain-Driven Design tactical patterns for .NET applications. Covers aggregates, + aggregate roots, value objects, domain events, domain services, strongly-typed IDs, + and repository patterns for aggregate persistence. + Load this skill when implementing DDD, working with aggregates, value objects, + domain events, bounded contexts, or when the architecture-advisor recommends + DDD + Clean Architecture. Pair with the clean-architecture skill. +--- + +# Domain-Driven Design (DDD) + +## Core Principles + +1. **Aggregates define consistency boundaries** — An aggregate is a cluster of entities and value objects treated as a single unit for data changes. All invariants within an aggregate are enforced in a single transaction. Cross-aggregate consistency is eventual. +2. **Value objects over primitives** — Replace primitive obsession with value objects. `Money`, `EmailAddress`, `OrderNumber` are not strings — they carry validation, equality, and behavior. Use C# records for immutable value objects. +3. **Domain events decouple side effects** — When something meaningful happens in the domain (OrderPlaced, PaymentReceived), raise a domain event. Side effects (send email, update read model, notify another aggregate) subscribe to these events. The aggregate stays focused on its own rules. +4. **Aggregate root is the sole entry point** — External code accesses an aggregate only through its root entity. Child entities are never loaded or modified independently. The root enforces all invariants for the entire aggregate. +5. **Repositories persist aggregates, not entities** — One repository per aggregate root. The repository loads and saves the entire aggregate as a unit. No repository for child entities. The Infrastructure implementation uses `DbContext` internally — this is a DDD tactical pattern for aggregate boundaries, not a generic CRUD wrapper. + +## Patterns + +### Aggregate Root + +The aggregate root owns all access to its children and enforces invariants: + +```csharp +// Domain/Orders/Order.cs +public sealed class Order : AggregateRoot +{ + private readonly List _lines = []; + + private Order() { } // EF Core + + public OrderNumber Number { get; private set; } = null!; + public CustomerId CustomerId { get; private set; } + public Money Total { get; private set; } = Money.Zero("USD"); + public OrderStatus Status { get; private set; } + public DateTimeOffset PlacedAt { get; private set; } + public IReadOnlyList Lines => _lines.AsReadOnly(); + + public static Order Place(CustomerId customerId, OrderNumber number, DateTimeOffset now) + { + var order = new Order + { + Id = Guid.CreateVersion7(), + CustomerId = customerId, + Number = number, + Status = OrderStatus.Placed, + PlacedAt = now + }; + + order.RaiseDomainEvent(new OrderPlaced(order.Id, customerId, now)); + return order; + } + + public Result AddLine(ProductId productId, int quantity, Money unitPrice) + { + if (Status is not OrderStatus.Placed) + return Result.Failure("Cannot modify a confirmed or cancelled order"); + + if (quantity <= 0) + return Result.Failure("Quantity must be positive"); + + var existing = _lines.FirstOrDefault(l => l.ProductId == productId); + if (existing is not null) + { + existing.IncreaseQuantity(quantity); + } + else + { + _lines.Add(new OrderLine(productId, quantity, unitPrice)); + } + + RecalculateTotal(); + return Result.Success(); + } + + public Result Confirm() + { + if (Status is not OrderStatus.Placed) + return Result.Failure("Only placed orders can be confirmed"); + + if (_lines.Count == 0) + return Result.Failure("Cannot confirm an order with no lines"); + + Status = OrderStatus.Confirmed; + RaiseDomainEvent(new OrderConfirmed(Id)); + return Result.Success(); + } + + private void RecalculateTotal() + { + Total = _lines.Aggregate(Money.Zero(Total.Currency), (sum, line) => sum + line.Subtotal); + } +} +``` + +### Value Objects as Records + +Use C# records for immutable value objects with structural equality: + +```csharp +// Domain/Common/Money.cs +public sealed record Money +{ + public decimal Amount { get; } + public string Currency { get; } + + public Money(decimal amount, string currency) + { + ArgumentOutOfRangeException.ThrowIfNegative(amount); + ArgumentException.ThrowIfNullOrWhiteSpace(currency); + + Amount = amount; + Currency = currency.ToUpperInvariant(); + } + + public static Money Zero(string currency) => new(0, currency); + + public static Money operator +(Money left, Money right) + { + if (left.Currency != right.Currency) + throw new InvalidOperationException($"Cannot add {left.Currency} and {right.Currency}"); + return new Money(left.Amount + right.Amount, left.Currency); + } +} + +// Other value objects (EmailAddress, OrderNumber, etc.) follow the same pattern: +// sealed record, constructor validation, no public setters +``` + +### Strongly-Typed IDs with EF Core Converters + +Prevent mixing up GUIDs from different entities: + +```csharp +// Domain/Common/StronglyTypedId.cs +public readonly record struct CustomerId(Guid Value) +{ + public static CustomerId New() => new(Guid.CreateVersion7()); + public override string ToString() => Value.ToString(); +} + +public readonly record struct ProductId(Guid Value) +{ + public static ProductId New() => new(Guid.CreateVersion7()); +} + +public readonly record struct OrderNumber(string Value) +{ + public override string ToString() => Value; +} + +// Infrastructure/Persistence/Configurations/OrderConfiguration.cs +public class OrderConfiguration : IEntityTypeConfiguration +{ + public void Configure(EntityTypeBuilder builder) + { + builder.HasKey(o => o.Id); + + builder.Property(o => o.CustomerId) + .HasConversion(id => id.Value, value => new CustomerId(value)); + + builder.Property(o => o.Number) + .HasConversion(n => n.Value, value => new OrderNumber(value)) + .HasMaxLength(50); + + builder.ComplexProperty(o => o.Total, money => + { + money.Property(m => m.Amount).HasColumnName("Total").HasPrecision(18, 2); + money.Property(m => m.Currency).HasColumnName("Currency").HasMaxLength(3); + }); + + builder.HasMany(o => o.Lines).WithOne().HasForeignKey("OrderId"); + builder.Navigation(o => o.Lines).AutoInclude(); + } +} +``` + +### Domain Event Dispatching + +Raise events in the aggregate, dispatch in SaveChangesAsync: + +```csharp +// Domain/Common/AggregateRoot.cs +public abstract class AggregateRoot : Entity +{ + private readonly List _domainEvents = []; + + public IReadOnlyList DomainEvents => _domainEvents.AsReadOnly(); + + protected void RaiseDomainEvent(IDomainEvent domainEvent) => _domainEvents.Add(domainEvent); + + public void ClearDomainEvents() => _domainEvents.Clear(); +} + +public interface IDomainEvent : INotification +{ + DateTimeOffset OccurredAt { get; } +} + +// Domain/Orders/Events/OrderPlaced.cs +public sealed record OrderPlaced(Guid OrderId, CustomerId CustomerId, DateTimeOffset PlacedAt) : IDomainEvent +{ + public DateTimeOffset OccurredAt => PlacedAt; +} + +// Infrastructure/Persistence/AppDbContext.cs +public override async Task SaveChangesAsync(CancellationToken ct = default) +{ + var aggregates = ChangeTracker.Entries() + .Where(e => e.Entity.DomainEvents.Count > 0) + .Select(e => e.Entity) + .ToList(); + + var events = aggregates.SelectMany(a => a.DomainEvents).ToList(); + + var result = await base.SaveChangesAsync(ct); + + foreach (var @event in events) + await _publisher.Publish(@event, ct); + + foreach (var aggregate in aggregates) + aggregate.ClearDomainEvents(); + + return result; +} +``` + +### Domain Services + +For logic that does not belong to a single aggregate: + +```csharp +// Domain/Orders/Services/PricingService.cs +// Coordinates logic across aggregates — takes domain interfaces, returns value objects +public sealed class PricingService(IDiscountPolicy discountPolicy) +{ + public Money CalculatePrice(ProductId productId, int quantity, Money unitPrice, CustomerId customerId) + { + var subtotal = new Money(unitPrice.Amount * quantity, unitPrice.Currency); + var discount = discountPolicy.GetDiscount(customerId, productId, quantity); + return new Money(subtotal.Amount * (1 - discount), subtotal.Currency); + } +} +``` + +## Anti-patterns + +### Oversized Aggregates + +```csharp +// BAD — Customer aggregate owns everything the customer touches +public class Customer : AggregateRoot +{ + public List Orders { get; } = []; // should be separate aggregate + public List Payments { get; } = []; // should be separate aggregate + public List
Addresses { get; } = []; // might be OK as child + public ShoppingCart Cart { get; set; } // should be separate aggregate +} + +// GOOD — small, focused aggregates linked by ID +public class Customer : AggregateRoot +{ + public CustomerName Name { get; private set; } + public EmailAddress Email { get; private set; } + // Orders, Payments, Cart are separate aggregates referencing CustomerId +} +``` + +### Domain Events for Intra-Aggregate Logic + +```csharp +// BAD — using events for logic within the same aggregate +order.RaiseDomainEvent(new OrderLineAdded(line)); +// Then a handler recalculates the total... but you're in the same aggregate! + +// GOOD — just call the method directly within the aggregate +_lines.Add(line); +RecalculateTotal(); // private method, no event needed +``` + +### Value Objects with Identity + +```csharp +// BAD — value object with an Id (it's an entity then!) +public record Address +{ + public Guid Id { get; init; } // value objects don't have identity + public string Street { get; init; } +} + +// GOOD — value objects are defined by their attributes, not an Id +public record Address(string Street, string City, string PostalCode, string Country); +``` + +### Anemic Aggregates + +```csharp +// BAD — aggregate is just a data bag, service does all the work +public class Order : AggregateRoot +{ + public OrderStatus Status { get; set; } // public setter! + public List Lines { get; set; } = []; +} + +// Service directly manipulates order state +order.Status = OrderStatus.Confirmed; // no invariant check! +order.Lines.Add(newLine); // no validation! + +// GOOD — aggregate encapsulates rules (see Aggregate Root pattern above) +order.Confirm(); // validates status, raises event +order.AddLine(productId, quantity, unitPrice); // validates, recalculates +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| When to use DDD | Complex domain with business rules that go beyond CRUD | +| When to use value objects | Any concept with validation rules or equality based on attributes, not identity | +| Aggregate size | Keep small — typically 1 root entity + 0-3 child entities. Load the whole aggregate every time | +| Domain events vs integration events | Domain events: within bounded context, same transaction. Integration events: cross-context, via message bus | +| Strongly-typed IDs | Always for aggregate root IDs that cross boundaries. Optional for child entity IDs | +| When NOT to use DDD | Simple CRUD, settings, audit logs, read models — use plain entities | +| Repository vs DbContext | Repository per aggregate root for complex aggregates; IAppDbContext for simpler queries | +| Domain services | Only when logic requires multiple aggregates or external data the aggregate should not know about | diff --git a/.opencode/skills/de-sloppify/SKILL.md b/.opencode/skills/de-sloppify/SKILL.md new file mode 100644 index 00000000..5a4fcbb5 --- /dev/null +++ b/.opencode/skills/de-sloppify/SKILL.md @@ -0,0 +1,277 @@ +--- +name: de-sloppify +description: > + Systematic code cleanup pipeline for .NET projects. Runs 7 ordered steps: + formatting, unused usings, analyzer warnings, dead code removal, TODO resolution, + sealed class audit, and CancellationToken propagation. Each step is verified + independently with tests between phases. Load this skill when: "clean up", + "de-sloppify", "tidy up", "remove dead code", "code cleanup", "housekeeping", + "tech debt", "fix warnings", "seal classes", "add CancellationToken", + "unused usings", "format code". +--- + +# De-Sloppify + +## Core Principles + +1. **Systematic over random** — Follow the cleanup pipeline in order. Formatting first (because it touches every file), dead code last (because earlier steps might reveal it). Random cleanup misses things and creates merge conflicts. + +2. **Verify after each step** — Run `dotnet build` and `dotnet test` after every step. A cleanup that breaks something is worse than the mess it was fixing. Never batch multiple cleanup types into one untested change. + +3. **Safe removals only** — Before removing any code flagged as "dead," verify it isn't used via reflection, DI conventions, or serialization. `find_references` shows compile-time usage; grep for string-based references that Roslyn cannot track. + +4. **One concern at a time** — Don't mix formatting fixes with logic changes. Don't combine dead code removal with new feature work. Separate concerns make code review possible and reverts safe. + +5. **Commit between phases** — Each cleanup step gets its own commit. If Step 4 (dead code removal) breaks something, revert just that commit without losing the formatting and analyzer fixes from Steps 1-3. + +## Patterns + +### 7-Step Cleanup Pipeline + +Execute in order. Verify (build + test) after each step. Commit each step separately. + +**Step 1: Format All Code** +```bash +dotnet format +``` +Why first: Formatting touches many files. Getting it out of the way prevents merge conflicts with subsequent steps. After this step, every file has consistent style. + +Verify: `dotnet format --verify-no-changes` should report no changes. +Commit: `chore: apply dotnet format` + +**Step 2: Remove Unused Usings** +```bash +dotnet format analyzers --diagnostics IDE0005 +``` +Why second: Unused usings are noise. Removing them makes subsequent analysis cleaner and reduces false positives in code review. + +Verify: `dotnet build` should show no new errors. Run `dotnet test` if usings removal is extensive. +Commit: `chore: remove unused using statements` + +**Step 3: Fix Analyzer Warnings** +``` +MCP: get_diagnostics(scope: "solution", severityFilter: "warning") +``` +Triage warnings by category: +- **Nullability warnings (CS8600-CS8604)** — Add null checks or use the null-forgiving operator with a comment explaining why null is impossible. +- **Unused variables (CS0219)** — Remove them. +- **Obsolete API usage (CS0618)** — Migrate to the recommended replacement. +- **IDE suggestions (IDE0xxx)** — Apply if they improve readability. + +Fix in priority order: compiler warnings first, then analyzer suggestions. +Verify: `dotnet build` with zero new warnings. `dotnet test` passes. +Commit: `chore: fix analyzer warnings` + +**Step 4: Remove Dead Code** +``` +MCP: find_dead_code(scope: "solution", kind: "all") +``` +For each result, perform a safety check before removing: + +``` +SAFETY CHECK BEFORE REMOVAL: +1. find_references(symbolName: "DeadType") — confirm zero compile-time references +2. Grep for string-based usage: + - "nameof(DeadType)" — sometimes used in attributes or logging + - Reflection: Type.GetType("DeadType"), Activator.CreateInstance + - DI registration: services.AddScoped(typeof(IHandler<>), typeof(DeadType)) + - Serialization: [JsonDerivedType(typeof(DeadType))] +3. Check if it's a public API consumed by external packages +4. Check if it's referenced in configuration files (appsettings.json, etc.) + +ONLY remove if all checks come back clean. +``` + +Verify: `dotnet build` and `dotnet test` pass. +Commit: `chore: remove dead code` + +**Step 5: Resolve TODOs** +```bash +# Find all TODOs in the codebase +grep -rn "TODO\|HACK\|FIXME\|XXX" --include="*.cs" +``` +For each TODO, decide: +- **Fix it now** — If it's small and self-contained, resolve it in this cleanup pass. +- **Create an issue** — If it requires significant work, create a GitHub issue and update the TODO with the issue number: `// TODO(#142): Implement retry logic` +- **Remove it** — If it's stale (the work was already done or is no longer relevant), delete the comment. + +Verify: `dotnet build` and `dotnet test` pass. +Commit: `chore: resolve TODO comments` + +**Step 6: Seal Non-Inherited Classes** +``` +MCP: find_dead_code(scope: "solution", kind: "type") — for a list of types +MCP: get_type_hierarchy(typeName: "EachClass") — check for derived types +``` +Add `sealed` to every class that: +- Has no derived types (confirmed via `get_type_hierarchy`) +- Is not a base class by design (no `virtual` or `abstract` members) +- Is not used as an open generic in DI registration +- Is not a test fixture base class + +Why seal: Sealed classes enable compiler optimizations (devirtualization), communicate design intent ("this class is not meant to be extended"), and prevent accidental inheritance. + +```csharp +// BEFORE +public class OrderValidator : AbstractValidator +{ + public OrderValidator() + { + RuleFor(x => x.CustomerId).NotEmpty(); + } +} + +// AFTER — sealed, because nothing inherits from it +public sealed class OrderValidator : AbstractValidator +{ + public OrderValidator() + { + RuleFor(x => x.CustomerId).NotEmpty(); + } +} +``` + +Skip sealing: +- Classes with `virtual` members designed for override +- Classes used as `IClassFixture` in tests (xUnit requires non-sealed) +- Base classes in a hierarchy (`Entity`, `AggregateRoot`, etc.) + +Verify: `dotnet build` and `dotnet test` pass. +Commit: `chore: seal non-inherited classes` + +**Step 7: Propagate CancellationToken** +``` +MCP: detect_antipatterns(severity: "warning") — filter for "missing CancellationToken" +``` +Trace the async call chain from entry points (endpoints, handlers) through services to data access (DbContext, HttpClient). Ensure `CancellationToken` flows through every layer. + +```csharp +// BEFORE — CancellationToken stops at the endpoint +app.MapGet("/orders/{id}", async (Guid id, AppDbContext db) => +{ + var order = await db.Orders.FindAsync(id); + return order is not null ? Results.Ok(order) : Results.NotFound(); +}); + +// AFTER — CancellationToken propagated to EF Core +app.MapGet("/orders/{id}", async (Guid id, AppDbContext db, CancellationToken ct) => +{ + var order = await db.Orders.FindAsync([id], ct); + return order is not null ? Results.Ok(order) : Results.NotFound(); +}); +``` + +Common propagation points: +- Minimal API endpoints: add `CancellationToken ct` parameter (auto-bound by ASP.NET Core) +- Mediator/MediatR handlers: already provided in `Handle(TRequest, CancellationToken)` +- EF Core: `SaveChangesAsync(ct)`, `ToListAsync(ct)`, `FindAsync([key], ct)` +- HttpClient: `GetAsync(url, ct)`, `PostAsync(url, content, ct)` + +Verify: `dotnet build` and `dotnet test` pass. +Commit: `chore: propagate CancellationToken through async chains` + +### Cleanup Summary Report + +After completing all steps, produce a summary: + +```markdown +## De-Sloppify Report + +| Step | Changes | Files Affected | +|------|---------|----------------| +| 1. Format | Applied consistent formatting | 23 files | +| 2. Usings | Removed 47 unused usings | 18 files | +| 3. Analyzers | Fixed 12 warnings (8 nullability, 3 unused vars, 1 obsolete) | 9 files | +| 4. Dead Code | Removed 3 unused types, 5 unused methods | 6 files | +| 5. TODOs | Fixed 2, created issues for 3, removed 1 stale | 5 files | +| 6. Sealed | Sealed 14 classes | 14 files | +| 7. CancellationToken | Added propagation to 8 async chains | 11 files | + +**Total: 7 commits, 86 files improved** +``` + +## Anti-patterns + +### Mixing Cleanup with Feature Work + +``` +# BAD — one commit with formatting + new feature + dead code removal +git commit -m "Add order validation and clean up code" +# Impossible to review, impossible to revert the cleanup without losing the feature + +# GOOD — separate commits, separate concerns +git commit -m "chore: apply dotnet format" +git commit -m "chore: remove dead code" +git commit -m "feat: add order validation" +``` + +### Removing Code Without Checking Reflection + +``` +# BAD — find_dead_code says it's unused, so delete it +MCP: find_dead_code → "PaymentProcessor has 0 references" +*Deletes PaymentProcessor* +# Runtime crash: DI container can't resolve IPaymentProcessor +# It was registered via: services.AddScoped(typeof(IPaymentProcessor), typeof(PaymentProcessor)) + +# GOOD — safety check before removal +MCP: find_dead_code → "PaymentProcessor has 0 references" +MCP: find_references(symbolName: "PaymentProcessor") → 0 compile-time refs +Grep: "PaymentProcessor" in *.cs, *.json → Found in DI registration +*Keep PaymentProcessor — it's used via DI convention* +``` + +### Sealing Classes That Tests Mock + +``` +# BAD — sealing a class that tests inherit from +public sealed class OrderService { ... } +// Test project: class MockOrderService : OrderService { ... } — COMPILE ERROR + +# GOOD — check for test doubles before sealing +MCP: get_type_hierarchy(typeName: "OrderService") → no derived types in production +Grep: "OrderService" in test projects → no inheritance, only usage via interface +*Safe to seal — tests use IOrderService, not OrderService directly* +``` + +### Batch-Committing All Cleanup + +``` +# BAD — one giant commit with all 7 steps +git add -A && git commit -m "chore: cleanup everything" +# If sealing a class broke a test, you have to revert ALL cleanup to fix it + +# GOOD — commit per step with verification between +Step 1 → verify → commit +Step 2 → verify → commit +... +# If Step 6 (sealed) breaks something, revert only that commit +``` + +### Running Cleanup Without Tests + +``` +# BAD — "it's just formatting and dead code, what could go wrong?" +dotnet format → commit (no test run) +Remove dead code → commit (no test run) +# Dead code was actually used by a test helper via reflection — tests broken + +# GOOD — verify after every step +dotnet format → dotnet test → commit +Remove dead code → dotnet test → commit +``` + +## Decision Guide + +| Scenario | Steps to Run | Notes | +|----------|-------------|-------| +| Full cleanup pass | All 7 | Dedicate a session to cleanup only | +| Quick tidy before PR | 1, 2, 6 | Format, usings, format check | +| After large feature merge | 1, 2, 3, 4 | Clean up accumulated mess | +| Quarterly maintenance | All 7 | Schedule regular cleanup sprints | +| New team member onboarding | 1, 2, 3 | Get the codebase to a clean baseline | +| Before performance work | 4, 6 | Remove dead code, seal classes for devirtualization | +| After dependency upgrade | 2, 3 | Fix new analyzer warnings from updated packages | +| Pre-release hardening | All 7 | Full cleanup before a release | +| CI warning threshold exceeded | 3 | Focus on analyzer warnings only | +| Tech debt sprint | 4, 5 | Dead code and TODO resolution | diff --git a/.opencode/skills/dependency-injection/SKILL.md b/.opencode/skills/dependency-injection/SKILL.md new file mode 100644 index 00000000..ab12db98 --- /dev/null +++ b/.opencode/skills/dependency-injection/SKILL.md @@ -0,0 +1,179 @@ +--- +name: dependency-injection +description: > + Dependency injection patterns for .NET 10. Covers service lifetimes, keyed + services, the decorator pattern, factory pattern, and common DI pitfalls. + Load this skill when registering services, resolving lifetime issues, designing + service composition, or when the user mentions "DI", "dependency injection", + "service registration", "AddScoped", "AddTransient", "AddSingleton", "keyed + services", "decorator", "Scrutor", "IServiceCollection", or "captive dependency". +--- + +# Dependency Injection + +## Core Principles + +1. **Constructor injection is the default** — Inject dependencies through the constructor (primary constructors make this clean). No service locator, no property injection. +2. **Match lifetimes carefully** — A singleton must never depend on a scoped or transient service. This is the most common DI bug. +3. **Register interfaces, resolve interfaces** — Register `services.AddScoped()`, not the concrete type. +4. **Keyed services for strategy pattern** — .NET 8+ keyed services replace manual factory patterns for selecting between implementations. + +## Patterns + +### Keyed Services (.NET 8+) + +Use keyed services to register and resolve multiple implementations of the same interface. + +```csharp +// Registration +builder.Services.AddKeyedScoped("email"); +builder.Services.AddKeyedScoped("sms"); +builder.Services.AddKeyedScoped("push"); + +// Resolution via attribute +public class OrderHandler([FromKeyedServices("email")] INotificationService notifier) +{ + public async Task Handle(CreateOrder.Command command, CancellationToken ct) + { + // ... create order + await notifier.SendAsync(notification, ct); + } +} + +// Resolution via IServiceProvider +public class NotificationRouter(IServiceProvider provider) +{ + public INotificationService GetService(string channel) + { + return provider.GetRequiredKeyedService(channel); + } +} +``` + +### Decorator Pattern + +```csharp +// Base service +public interface IOrderService +{ + Task> CreateAsync(CreateOrderRequest request, CancellationToken ct); +} + +public class OrderService(AppDbContext db, TimeProvider clock) : IOrderService +{ + public async Task> CreateAsync(CreateOrderRequest request, CancellationToken ct) + { + var order = Order.Create(request, clock.GetUtcNow()); + db.Orders.Add(order); + await db.SaveChangesAsync(ct); + return Result.Success(order); + } +} + +// Decorator — adds logging +public class LoggingOrderService(IOrderService inner, ILogger logger) : IOrderService +{ + public async Task> CreateAsync(CreateOrderRequest request, CancellationToken ct) + { + logger.LogInformation("Creating order for customer {CustomerId}", request.CustomerId); + var result = await inner.CreateAsync(request, ct); + if (result.IsSuccess) + logger.LogInformation("Order {OrderId} created", result.Value.Id); + return result; + } +} + +// Registration with Scrutor +builder.Services.AddScoped(); +builder.Services.Decorate(); +``` + +### Registration by Convention (Scrutor) + +```csharp +// Auto-register all services matching a convention +builder.Services.Scan(scan => scan + .FromAssemblyOf() + .AddClasses(classes => classes.AssignableTo()) + .AsImplementedInterfaces() + .WithTransientLifetime() + .AddClasses(classes => classes.AssignableTo()) + .AsImplementedInterfaces() + .WithScopedLifetime()); +``` + +### Factory Pattern + +When you need runtime logic to select an implementation. + +```csharp +builder.Services.AddScoped(sp => +{ + var config = sp.GetRequiredService>().Value; + return config.Provider switch + { + "stripe" => ActivatorUtilities.CreateInstance(sp), + "paypal" => ActivatorUtilities.CreateInstance(sp), + _ => throw new InvalidOperationException($"Unknown payment provider: {config.Provider}") + }; +}); +``` + +### Options Registration + +```csharp +// Bind configuration section to a strongly-typed options class +builder.Services.AddOptions() + .BindConfiguration("Jwt") + .ValidateDataAnnotations() + .ValidateOnStart(); + +// Inject as IOptions +public class TokenService(IOptions options) +{ + private readonly JwtOptions _jwt = options.Value; +} +``` + +## Anti-patterns + +### Don't Capture Scoped Services in Singletons + +```csharp +// BAD — DbContext is scoped, captured by singleton = memory leak + stale data +builder.Services.AddSingleton(); // depends on AppDbContext + +// GOOD — use IServiceScopeFactory in singleton +public class OrderCache(IServiceScopeFactory scopeFactory) +{ + public async Task GetAsync(Guid id) + { + await using var scope = scopeFactory.CreateAsyncScope(); + var db = scope.ServiceProvider.GetRequiredService(); + return await db.Orders.FindAsync(id); + } +} +``` + +### Don't Register Everything as Singleton + +```csharp +// BAD — making a service singleton when it holds mutable state +builder.Services.AddSingleton(); // has DbContext dependency + +// GOOD — match the lifetime to the service's needs +builder.Services.AddScoped(); +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Stateless service | Scoped (default) or Transient | +| Configuration / cache | Singleton | +| DbContext | Scoped (registered by `AddDbContext`) | +| Multiple implementations | Keyed services (strategy pattern) | +| Cross-cutting behavior | Decorator pattern | +| Convention-based registration | Scrutor | +| Runtime implementation selection | Factory delegate | +| Strongly-typed config | `AddOptions().BindConfiguration()` | diff --git a/.opencode/skills/docker/SKILL.md b/.opencode/skills/docker/SKILL.md new file mode 100644 index 00000000..5b2ba662 --- /dev/null +++ b/.opencode/skills/docker/SKILL.md @@ -0,0 +1,180 @@ +--- +name: docker +description: > + Docker containerization for .NET 10 applications. Covers multi-stage builds, + .NET container images, non-root user configuration, health checks, and + .dockerignore. + Load this skill when containerizing an application, optimizing image size, + setting up Docker Compose for local development, or when the user mentions + "Docker", "Dockerfile", "container", "docker-compose", "image", "multi-stage", + "non-root", ".dockerignore", "container health check", or "dotnet publish container". +--- + +# Docker + +## Core Principles + +1. **Multi-stage builds always** — Separate build and runtime stages. Build in the SDK image, run in the ASP.NET runtime image. +2. **Non-root by default** — .NET container images support `USER app` by default since .NET 8. Never run as root in production. +3. **Layer caching matters** — Copy `.csproj` files and restore before copying source code. This caches NuGet dependencies across builds. +4. **Health checks in the container** — Use `HEALTHCHECK` in Dockerfile or configure in Docker Compose / orchestrator. + +## Patterns + +### Multi-Stage Dockerfile for Web API + +```dockerfile +# Stage 1: Build +FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build +WORKDIR /src + +# Copy project files and restore (cached layer) +COPY ["src/MyApp.Api/MyApp.Api.csproj", "src/MyApp.Api/"] +COPY ["src/MyApp.Domain/MyApp.Domain.csproj", "src/MyApp.Domain/"] +COPY ["Directory.Build.props", "."] +COPY ["Directory.Packages.props", "."] +RUN dotnet restore "src/MyApp.Api/MyApp.Api.csproj" + +# Copy everything and build +COPY . . +RUN dotnet publish "src/MyApp.Api/MyApp.Api.csproj" \ + -c Release \ + -o /app/publish \ + --no-restore + +# Stage 2: Runtime +FROM mcr.microsoft.com/dotnet/aspnet:10.0 AS runtime +WORKDIR /app + +# Non-root user (default in .NET 8+ images) +USER app + +COPY --from=build /app/publish . + +EXPOSE 8080 +HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \ + CMD ["dotnet", "MyApp.Api.dll", "--urls", "http://localhost:8080/health/live"] + +ENTRYPOINT ["dotnet", "MyApp.Api.dll"] +``` + +### .dockerignore + +``` +**/.git +**/.vs +**/bin +**/obj +**/node_modules +**/Dockerfile* +**/docker-compose* +**/tests +``` + +### Docker Compose for Local Development + +Key .NET-specific concerns — pass connection strings via environment, use `depends_on` with health checks: + +```yaml +services: + api: + build: + context: . + dockerfile: src/MyApp.Api/Dockerfile + ports: + - "5000:8080" + environment: + - ASPNETCORE_ENVIRONMENT=Development + - ConnectionStrings__Default=Host=postgres;Database=myapp;Username=postgres;Password=postgres + - ConnectionStrings__Redis=redis:6379 + depends_on: + postgres: + condition: service_healthy + # Add postgres/redis services with healthcheck — standard boilerplate +``` + +### Optimized Build with .slnx + +For solutions with multiple projects, restore only the necessary projects. + +```dockerfile +FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build +WORKDIR /src + +# Copy solution and all project files +COPY *.slnx . +COPY Directory.Build.props . +COPY Directory.Packages.props . +COPY src/**/*.csproj ./src/ + +# Restore project structure +RUN for file in src/**/*.csproj; do \ + mkdir -p $(dirname $file) && mv $file $(dirname $file)/; \ + done +RUN dotnet restore + +COPY . . +RUN dotnet publish src/MyApp.Api -c Release -o /app/publish --no-restore +``` + +### Health Check Endpoint + +```csharp +// In Program.cs — lightweight health endpoint for Docker +app.MapGet("/health/live", () => Results.Ok("healthy")) + .ExcludeFromDescription(); +``` + +## Anti-patterns + +### Don't Use SDK Image for Runtime + +```dockerfile +# BAD — SDK image is 900MB+, includes compilers +FROM mcr.microsoft.com/dotnet/sdk:10.0 +COPY . . +RUN dotnet run + +# GOOD — separate build and runtime, runtime image is ~200MB +FROM mcr.microsoft.com/dotnet/aspnet:10.0 +``` + +### Don't Copy Everything Before Restore + +```dockerfile +# BAD — any source change invalidates the NuGet cache +COPY . . +RUN dotnet restore + +# GOOD — copy only project files first, then restore +COPY ["src/MyApp.Api/MyApp.Api.csproj", "src/MyApp.Api/"] +RUN dotnet restore "src/MyApp.Api/MyApp.Api.csproj" +COPY . . +``` + +### Don't Run as Root + +```dockerfile +# BAD — running as root (security risk) +FROM mcr.microsoft.com/dotnet/aspnet:10.0 +COPY --from=build /app . +ENTRYPOINT ["dotnet", "MyApp.Api.dll"] + +# GOOD — use the built-in non-root user +FROM mcr.microsoft.com/dotnet/aspnet:10.0 +USER app +COPY --from=build /app . +ENTRYPOINT ["dotnet", "MyApp.Api.dll"] +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Web API container | Multi-stage build with aspnet runtime image | +| Worker service | Multi-stage build with dotnet/runtime image | +| Local development | Docker Compose with service dependencies | +| CI builds | Multi-stage build (self-contained) | +| Image size optimization | Use Alpine variant + trimming for small images | +| Health monitoring | HEALTHCHECK instruction + /health endpoint | +| Secrets | Environment variables or mounted secrets, never in image | diff --git a/.opencode/skills/ef-core/SKILL.md b/.opencode/skills/ef-core/SKILL.md new file mode 100644 index 00000000..962914f1 --- /dev/null +++ b/.opencode/skills/ef-core/SKILL.md @@ -0,0 +1,309 @@ +--- +name: ef-core +description: > + Entity Framework Core patterns for .NET 10. Covers DbContext configuration, + migrations workflow, interceptors, compiled queries, ExecuteUpdateAsync, + ExecuteDeleteAsync, value converters, and query optimization. + Load this skill when working with databases, writing queries, managing schema + changes, or when the user mentions "EF Core", "Entity Framework", "DbContext", + "migration", "LINQ query", "database", "SQL", "N+1", "Include", "split query", + "value converter", "interceptor", or "compiled query". +--- + +# EF Core (.NET 10) + +## Core Principles + +1. **EF Core is the default ORM** — Use it unless you have a specific reason not to (extreme perf, legacy DB without FK constraints). See ADR-003. +2. **DbContext is a unit of work** — Don't wrap it in another UoW abstraction. EF Core already implements Unit of Work and Repository patterns internally. +3. **Queries should be projections** — Use `.Select()` to project into DTOs instead of loading full entities. This avoids over-fetching and N+1 issues. +4. **Migrations are code** — Treat them like any other source code. Review them, test them, never auto-apply in production. + +## Patterns + +### DbContext Configuration + +Use `IEntityTypeConfiguration` to keep entity configs separate and discoverable. + +```csharp +// Persistence/AppDbContext.cs +public class AppDbContext(DbContextOptions options) : DbContext(options) +{ + public DbSet Orders => Set(); + public DbSet Products => Set(); + + protected override void OnModelCreating(ModelBuilder modelBuilder) + { + modelBuilder.ApplyConfigurationsFromAssembly(typeof(AppDbContext).Assembly); + } +} + +// Persistence/Configurations/OrderConfiguration.cs +public class OrderConfiguration : IEntityTypeConfiguration +{ + public void Configure(EntityTypeBuilder builder) + { + builder.HasKey(o => o.Id); + + builder.Property(o => o.Total) + .HasPrecision(18, 2); + + builder.HasMany(o => o.Items) + .WithOne() + .HasForeignKey(i => i.OrderId) + .OnDelete(DeleteBehavior.Cascade); + + builder.HasIndex(o => o.CustomerId); + builder.HasIndex(o => o.CreatedAt); + } +} +``` + +### Registration + +```csharp +// Program.cs +builder.Services.AddDbContext(options => + options.UseNpgsql(builder.Configuration.GetConnectionString("Default"))); +``` + +### Query Projections (Avoid Over-Fetching) + +```csharp +// GOOD — project to DTO, only loads needed columns +public async Task GetOrderAsync(Guid id, CancellationToken ct) +{ + return await db.Orders + .Where(o => o.Id == id) + .Select(o => new OrderResponse( + o.Id, + o.Total, + o.CreatedAt, + o.Items.Select(i => new OrderItemResponse(i.ProductName, i.Quantity, i.Price)).ToList())) + .FirstOrDefaultAsync(ct); +} +``` + +### Pagination + +```csharp +public async Task> ListOrdersAsync(int page, int pageSize, CancellationToken ct) +{ + var query = db.Orders + .OrderByDescending(o => o.CreatedAt) + .Select(o => new OrderSummary(o.Id, o.CustomerName, o.Total, o.Status)); + + var totalCount = await query.CountAsync(ct); + var items = await query + .Skip((page - 1) * pageSize) + .Take(pageSize) + .ToListAsync(ct); + + return new PagedList(items, totalCount, page, pageSize); +} +``` + +### ExecuteUpdateAsync / ExecuteDeleteAsync + +Bulk operations that bypass change tracking for better performance. + +```csharp +// Update without loading entities +await db.Orders + .Where(o => o.Status == OrderStatus.Pending && o.CreatedAt < cutoff) + .ExecuteUpdateAsync(s => s + .SetProperty(o => o.Status, OrderStatus.Expired) + .SetProperty(o => o.UpdatedAt, clock.GetUtcNow()), + ct); + +// Delete without loading entities +await db.Orders + .Where(o => o.Status == OrderStatus.Cancelled && o.CreatedAt < archiveCutoff) + .ExecuteDeleteAsync(ct); +``` + +### Interceptors + +Use interceptors for cross-cutting concerns like audit trails and soft deletes. + +```csharp +public class AuditInterceptor(TimeProvider clock) : SaveChangesInterceptor +{ + public override ValueTask> SavingChangesAsync( + DbContextEventData eventData, + InterceptionResult result, + CancellationToken ct = default) + { + var context = eventData.Context; + if (context is null) return ValueTask.FromResult(result); + + var now = clock.GetUtcNow(); + + foreach (var entry in context.ChangeTracker.Entries()) + { + switch (entry.State) + { + case EntityState.Added: + entry.Entity.CreatedAt = now; + entry.Entity.UpdatedAt = now; + break; + case EntityState.Modified: + entry.Entity.UpdatedAt = now; + break; + } + } + + return ValueTask.FromResult(result); + } +} + +// Registration +builder.Services.AddDbContext((sp, options) => + options + .UseNpgsql(connectionString) + .AddInterceptors(sp.GetRequiredService())); +``` + +### Compiled Queries + +Use for hot-path queries that execute frequently with the same shape. + +```csharp +public class OrderQueries +{ + public static readonly Func> GetById = + EF.CompileAsyncQuery((AppDbContext db, Guid id, CancellationToken ct) => + db.Orders + .Include(o => o.Items) + .FirstOrDefault(o => o.Id == id)); +} + +// Usage +var order = await OrderQueries.GetById(db, orderId, ct); +``` + +### Value Converters + +```csharp +// Store enum as string +builder.Property(o => o.Status) + .HasConversion() + .HasMaxLength(50); + +// Strongly-typed IDs +public readonly record struct OrderId(Guid Value); + +builder.Property(o => o.Id) + .HasConversion(id => id.Value, value => new OrderId(value)); +``` + +### Migrations Workflow + +```bash +# Create a migration +dotnet ef migrations add AddOrderIndex --project src/MyApp.Infrastructure --startup-project src/MyApp.Api + +# Review the generated migration — ALWAYS review before applying +# Check for data loss, index strategy, constraint names + +# Apply to development database +dotnet ef database update --project src/MyApp.Infrastructure --startup-project src/MyApp.Api + +# Generate SQL script for production +dotnet ef migrations script --idempotent --output migrations.sql +``` + +### Global Query Filters + +```csharp +// Soft delete filter +builder.HasQueryFilter(o => !o.IsDeleted); + +// Multi-tenant filter +builder.HasQueryFilter(o => o.TenantId == _tenantProvider.TenantId); + +// Bypass when needed +var allOrders = await db.Orders.IgnoreQueryFilters().ToListAsync(ct); +``` + +## Anti-patterns + +### Don't Wrap DbContext in a Repository + +```csharp +// BAD — unnecessary abstraction that limits EF Core's power +public interface IOrderRepository +{ + Task GetByIdAsync(Guid id); + Task AddAsync(Order order); + Task SaveChangesAsync(); +} + +// GOOD — use DbContext directly in handlers +public class Handler(AppDbContext db) +{ + public async Task Handle(GetOrder.Query query, CancellationToken ct) + { + return await db.Orders.FindAsync([query.Id], ct); + } +} +``` + +### Don't Use Lazy Loading + +```csharp +// BAD — lazy loading causes N+1 queries and hides data access +builder.Services.AddDbContext(options => + options.UseLazyLoadingProxies()); // DON'T + +// GOOD — explicit loading with Include or projection +var orders = await db.Orders + .Include(o => o.Items) + .Where(o => o.CustomerId == customerId) + .ToListAsync(ct); +``` + +### Don't Use .ToListAsync() Then Filter in Memory + +```csharp +// BAD — loads ALL orders, filters in C# +var orders = await db.Orders.ToListAsync(ct); +var pending = orders.Where(o => o.Status == OrderStatus.Pending); + +// GOOD — filter in the database +var pending = await db.Orders + .Where(o => o.Status == OrderStatus.Pending) + .ToListAsync(ct); +``` + +### Don't Forget to Await Async Methods + +```csharp +// BAD — missing await, returns before save completes +public void Handle(CreateOrder.Command command) +{ + db.Orders.Add(order); + db.SaveChangesAsync(); // Fire-and-forget BUG +} + +// GOOD +public async Task Handle(CreateOrder.Command command, CancellationToken ct) +{ + db.Orders.Add(order); + await db.SaveChangesAsync(ct); +} +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Standard CRUD | DbContext with projections | +| Bulk updates (100+ rows) | `ExecuteUpdateAsync` / `ExecuteDeleteAsync` | +| Hot-path read query | Compiled query | +| Complex reporting query | Raw SQL with `FromSqlInterpolated` or Dapper | +| Audit trails | `SaveChangesInterceptor` | +| Multi-tenancy | Global query filter | +| Soft deletes | Global query filter + interceptor | +| Strongly-typed IDs | Value converter | +| Production migration | Idempotent SQL script, never auto-migrate | diff --git a/.opencode/skills/error-handling/SKILL.md b/.opencode/skills/error-handling/SKILL.md new file mode 100644 index 00000000..2cef27ed --- /dev/null +++ b/.opencode/skills/error-handling/SKILL.md @@ -0,0 +1,254 @@ +--- +name: error-handling +description: > + Error handling strategy for .NET 10 applications. Covers the Result pattern, + ProblemDetails (RFC 9457), global exception handling, FluentValidation, and + structured error responses. + Load this skill when implementing error handling, validation, or designing + API error contracts, or when the user mentions "error handling", "Result pattern", + "ProblemDetails", "exception", "validation", "FluentValidation", "error response", + "global exception handler", or "RFC 9457". +--- + +# Error Handling + +## Core Principles + +1. **Use the Result pattern for expected failures** — Don't throw exceptions for things like "order not found" or "validation failed". These are expected outcomes, not exceptional conditions. See ADR-002. +2. **Reserve exceptions for unexpected failures** — Database connection lost, null reference bugs, network timeouts — these are truly exceptional and should propagate to the global handler. +3. **Every API error returns ProblemDetails** — RFC 9457 is the standard. Every error response has `type`, `title`, `status`, `detail`, and optionally `errors`. +4. **Validate at the boundary** — Validate incoming requests at the API layer, not deep inside business logic. + +## Patterns + +### Result Pattern + +A simple, generic result type that carries either a value or errors. + +```csharp +public class Result +{ + public bool IsSuccess { get; } + public bool IsFailure => !IsSuccess; + public List Errors { get; } + + protected Result(bool isSuccess, List? errors = null) + { + IsSuccess = isSuccess; + Errors = errors ?? []; + } + + public static Result Success() => new(true); + public static Result Failure(params string[] errors) => new(false, [..errors]); + public static Result Success(T value) => new(value); + public static Result Failure(params string[] errors) => new(errors); +} + +public class Result : Result +{ + public T Value { get; } + + internal Result(T value) : base(true) => Value = value; + internal Result(IEnumerable errors) : base(false, [..errors]) => Value = default!; +} +``` + +### Result to ProblemDetails Mapping + +```csharp +public static class ResultExtensions +{ + public static IResult ToProblemDetails(this Result result, int statusCode = 400) + { + return TypedResults.Problem( + title: "One or more errors occurred", + statusCode: statusCode, + extensions: new Dictionary + { + ["errors"] = result.Errors + }); + } +} + +// Usage in endpoint +group.MapPost("/", async (CreateOrder.Command command, ISender sender, CancellationToken ct) => +{ + var result = await sender.Send(command, ct); + return result.IsSuccess + ? TypedResults.Created($"/api/orders/{result.Value.Id}", result.Value) + : result.ToProblemDetails(); +}); +``` + +### Global Exception Handler + +Catches unexpected exceptions and converts them to ProblemDetails. For the modern `IExceptionHandler` approach (preferred), see `knowledge/common-infrastructure.md`. The inline lambda below works for simple cases: + +```csharp +// Program.cs +app.UseExceptionHandler(errorApp => +{ + errorApp.Run(async context => + { + var exception = context.Features.Get()?.Error; + var logger = context.RequestServices.GetRequiredService>(); + + logger.LogError(exception, "Unhandled exception for {Method} {Path}", + context.Request.Method, context.Request.Path); + + var problem = new ProblemDetails + { + Title = "An unexpected error occurred", + Status = StatusCodes.Status500InternalServerError, + Type = "https://tools.ietf.org/html/rfc9110#section-15.6.1" + }; + + // Don't leak details in production + if (context.RequestServices.GetRequiredService().IsDevelopment()) + { + problem.Detail = exception?.Message; + } + + context.Response.StatusCode = problem.Status.Value; + await context.Response.WriteAsJsonAsync(problem); + }); +}); +``` + +### FluentValidation with Endpoint Filters + +```csharp +// Validator +public class CreateOrderValidator : AbstractValidator +{ + public CreateOrderValidator() + { + RuleFor(x => x.CustomerId) + .NotEmpty().WithMessage("Customer ID is required"); + + RuleFor(x => x.Items) + .NotEmpty().WithMessage("At least one item is required"); + + RuleForEach(x => x.Items).ChildRules(item => + { + item.RuleFor(x => x.ProductId).NotEmpty(); + item.RuleFor(x => x.Quantity).GreaterThan(0); + }); + } +} + +// Generic validation filter +public class ValidationFilter : IEndpointFilter +{ + public async ValueTask InvokeAsync( + EndpointFilterInvocationContext context, + EndpointFilterDelegate next) + { + var validator = context.HttpContext.RequestServices.GetService>(); + if (validator is null) + return await next(context); + + var request = context.Arguments.OfType().FirstOrDefault(); + if (request is null) + return await next(context); + + var result = await validator.ValidateAsync(request); + if (!result.IsValid) + { + return TypedResults.ValidationProblem(result.ToDictionary()); + } + + return await next(context); + } +} + +// Registration +group.MapPost("/", CreateOrder) + .AddEndpointFilter>(); +``` + +### Typed Error Results + +For richer error handling, use typed error enums or error objects. + +```csharp +public abstract record Error(string Code, string Message); +public record NotFoundError(string Entity, object Id) + : Error("not_found", $"{Entity} with ID {Id} was not found"); +public record ValidationError(string Field, string Message) + : Error("validation", Message); +public record ConflictError(string Message) + : Error("conflict", Message); + +// Map to HTTP status codes +public static IResult ToHttpResult(this Error error) => error switch +{ + NotFoundError => TypedResults.Problem(title: error.Message, statusCode: 404), + ValidationError => TypedResults.Problem(title: error.Message, statusCode: 400), + ConflictError => TypedResults.Problem(title: error.Message, statusCode: 409), + _ => TypedResults.Problem(title: error.Message, statusCode: 500) +}; +``` + +## Anti-patterns + +### Don't Throw Exceptions for Flow Control + +```csharp +// BAD — exceptions for expected outcomes +public Order GetOrder(Guid id) +{ + var order = db.Orders.Find(id) + ?? throw new NotFoundException($"Order {id} not found"); + return order; +} + +// GOOD — Result pattern +public Result GetOrder(Guid id) +{ + var order = db.Orders.Find(id); + return order is not null + ? Result.Success(order) + : Result.Failure($"Order {id} not found"); +} +``` + +### Don't Return Raw Error Strings from APIs + +```csharp +// BAD — inconsistent error format +return Results.BadRequest("Something went wrong"); +return Results.BadRequest(new { error = "Invalid input" }); + +// GOOD — always ProblemDetails +return TypedResults.Problem(title: "Invalid input", statusCode: 400); +return TypedResults.ValidationProblem(validationResult.ToDictionary()); +``` + +### Don't Catch and Swallow Exceptions + +```csharp +// BAD — silently swallowing +try { await ProcessOrder(order); } +catch (Exception) { /* ignore */ } + +// GOOD — log and handle appropriately +try { await ProcessOrder(order); } +catch (PaymentException ex) +{ + logger.LogWarning(ex, "Payment failed for order {OrderId}", order.Id); + return Result.Failure("Payment processing failed"); +} +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Expected business failure | Result pattern | +| Input validation | FluentValidation with endpoint filter | +| Unexpected crash | Global exception handler → ProblemDetails | +| API error format | RFC 9457 ProblemDetails — always | +| Validation in handler | Return Result.Failure, don't throw | +| External service failure | Catch specific exception, return Result.Failure | +| Logging errors | Structured logging with correlation ID | diff --git a/.opencode/skills/health-check/SKILL.md b/.opencode/skills/health-check/SKILL.md new file mode 100644 index 00000000..38d2589d --- /dev/null +++ b/.opencode/skills/health-check/SKILL.md @@ -0,0 +1,320 @@ +--- +name: health-check +description: > + Multi-dimensional health assessment for .NET projects with letter grades (A-F) + using Roslyn MCP tools. Evaluates 8 dimensions: build health, code quality, + architecture, test coverage, dead code, API surface, security posture, and + documentation. Produces a structured report card with actionable recommendations. + Load this skill when: "health check", "how healthy is this", "project health", + "code quality report", "grade this project", "assess codebase", "quality audit", + "technical assessment", "codebase review", "report card". +--- + +# Health Check + +## Core Principles + +1. **Data-driven assessment** — Use MCP tools for every dimension. `get_diagnostics` for build health, `detect_antipatterns` for code quality, `detect_circular_dependencies` for architecture, `get_test_coverage_map` for testing, `find_dead_code` for dead code. Gut feeling is not a grade. + +2. **Letter grades with justification** — Every dimension gets A (90+), B (80+), C (70+), D (60+), or F (<60). Every grade includes the specific data points that produced it. "B in Code Quality" means nothing. "B in Code Quality: 3 anti-patterns in 2,400 lines (1.25 per 1K)" is actionable. + +3. **Actionable recommendations** — Every grade below A comes with specific, prioritized fix suggestions. "Improve test coverage" is not actionable. "Add test classes for OrderService, PaymentProcessor, and ShippingCalculator (3 production types without tests)" is. + +4. **Comparative baselines** — Grade against .NET best practices, not perfection. Zero warnings is aspirational. Fewer than 1 warning per 1K lines of code is excellent. Context matters. + +5. **Non-judgmental tone** — Health checks are diagnostic, not punitive. A project with a C grade has a clear improvement path. Frame findings as opportunities, not failures. + +## Patterns + +### 8-Dimension Health Assessment + +Run all dimensions. Each uses specific MCP tools and produces a letter grade. + +**Dimension 1: Build Health** +``` +Tool: dotnet build --no-restore +Metric: Error count, warning count +``` + +| Grade | Criteria | +|-------|----------| +| A | 0 errors, 0 warnings | +| B | 0 errors, 1-5 warnings | +| C | 0 errors, 6-15 warnings | +| D | 0 errors, 16-30 warnings | +| F | Any errors, or 30+ warnings | + +**Dimension 2: Code Quality** +``` +Tool: MCP detect_antipatterns(projectFilter: each project) +Metric: Anti-pattern count per 1K lines of code +``` + +| Grade | Criteria | +|-------|----------| +| A | 0 anti-patterns | +| B | < 0.5 per 1K lines | +| C | 0.5 - 1.5 per 1K lines | +| D | 1.5 - 3.0 per 1K lines | +| F | > 3.0 per 1K lines | + +Common anti-patterns detected: async void, sync-over-async, `new HttpClient()`, `DateTime.Now`, +broad catch blocks, string interpolation in logging, missing CancellationToken. + +**Dimension 3: Architecture** +``` +Tool: MCP get_project_graph — check dependency direction +Tool: MCP detect_circular_dependencies(scope: "projects") — find cycles +Tool: MCP detect_circular_dependencies(scope: "types", projectFilter: each) — type-level cycles +``` + +| Grade | Criteria | +|-------|----------| +| A | Correct dependency direction, 0 circular deps (project or type level) | +| B | Correct dependency direction, 1-2 type-level cycles (no project cycles) | +| C | 1-2 minor direction issues, or 3-5 type-level cycles | +| D | Project-level circular dependency, or significant layer violations | +| F | Multiple project-level cycles, no discernible architecture | + +**Dimension 4: Test Coverage** +``` +Tool: MCP get_test_coverage_map(projectFilter: each production project) +Metric: Percentage of production types with corresponding test classes +``` + +| Grade | Criteria | +|-------|----------| +| A | 90%+ types have test classes | +| B | 75-89% types have test classes | +| C | 50-74% types have test classes | +| D | 25-49% types have test classes | +| F | < 25% types have test classes | + +Note: This is structural coverage (test class exists), not runtime line coverage. +A test class existing does not guarantee thorough testing, but its absence guarantees none. + +**Dimension 5: Dead Code** +``` +Tool: MCP find_dead_code(scope: "solution", kind: "all", maxResults: 50) +Metric: Count of unused types, methods, and properties +``` + +| Grade | Criteria | +|-------|----------| +| A | 0-2 dead symbols | +| B | 3-8 dead symbols | +| C | 9-15 dead symbols | +| D | 16-25 dead symbols | +| F | 25+ dead symbols | + +Note: Some false positives are expected (reflection, DI conventions). Verify before penalizing. + +**Dimension 6: API Surface** +``` +Tool: MCP get_public_api(typeName: each public type) — review public API design +Tool: MCP find_references(symbolName: public members) — check for overexposed APIs +``` + +| Grade | Criteria | +|-------|----------| +| A | Minimal public surface, proper return types, consistent naming | +| B | Mostly clean, 1-2 overexposed types | +| C | Several types expose internal details, inconsistent return types | +| D | Public APIs leak implementation, mixed return type patterns | +| F | No API design consideration, everything is public | + +Check for: +- Services that should be `internal` but are `public` +- Methods returning `Task` instead of `Task>` for operations that can fail +- Inconsistent return types across similar endpoints (some `TypedResults`, some `IResult`) +- Public setters on types that should be immutable + +**Dimension 7: Security Posture** +``` +Tool: dotnet list package --vulnerable --include-transitive +Tool: MCP detect_antipatterns — filter for security-related patterns +Scan: Hardcoded secrets, connection strings in code, missing auth attributes +``` + +| Grade | Criteria | +|-------|----------| +| A | 0 vulnerable packages, no hardcoded secrets, auth on all endpoints | +| B | 0 critical/high vulns, 1-2 low/medium vulns, clean auth | +| C | 1-2 medium vulns, or minor auth gaps | +| D | High-severity vuln, or missing auth on sensitive endpoints | +| F | Critical vuln, hardcoded secrets, or systemic auth gaps | + +**Dimension 8: Documentation** +``` +Scan: XML docs on public API types and methods +Check: README exists, is current, covers setup and architecture +``` + +| Grade | Criteria | +|-------|----------| +| A | 90%+ public APIs have XML docs, README is comprehensive | +| B | 70-89% XML doc coverage, README covers basics | +| C | 50-69% XML doc coverage, README exists but is sparse | +| D | < 50% XML doc coverage, minimal README | +| F | No XML docs, no README or severely outdated | + +### Report Card Format + +```markdown +## Project Health Report + +**Project:** MyApp | **Date:** 2026-03-04 | **Assessed by:** Claude (MCP-assisted) + +### Grades + +| Dimension | Grade | Score | Key Finding | +|-----------|-------|-------|-------------| +| Build Health | A | 95 | 0 errors, 2 pre-existing warnings | +| Code Quality | B | 82 | 3 anti-patterns in 4.2K lines | +| Architecture | A | 92 | Clean dependency direction, 0 circular deps | +| Test Coverage | C | 68 | 34/50 production types have test classes | +| Dead Code | B | 85 | 5 unused methods identified | +| API Surface | B | 80 | 2 overexposed service types | +| Security | A | 94 | 0 vulnerable packages, auth coverage complete | +| Documentation | D | 55 | 12/30 public APIs have XML docs | + +### Overall GPA: 3.0 (B-) + +### Priority Recommendations + +1. **Test Coverage (C -> B):** Add test classes for these 16 untested types: + - `OrderService`, `PaymentProcessor`, `ShippingCalculator` (critical path) + - `EmailNotifier`, `InventoryChecker`, ... (supporting services) + Estimated effort: 2-3 days + +2. **Documentation (D -> C):** Add XML docs to public API types: + - Start with the 8 controller/endpoint classes (user-facing APIs) + - Then cover the 10 public service interfaces + Estimated effort: 1 day + +3. **Code Quality (B -> A):** Fix 3 anti-patterns: + - `OrderService.cs:47` — Replace `DateTime.Now` with `TimeProvider.GetUtcNow()` + - `PaymentClient.cs:23` — Replace `new HttpClient()` with `IHttpClientFactory` + - `NotificationHandler.cs:12` — Replace `async void` with `async Task` + Estimated effort: 1 hour +``` + +### GPA Calculation + +Convert letter grades to points: A=4.0, B=3.0, C=2.0, D=1.0, F=0.0 +GPA = average of all 8 dimension scores. + +| GPA Range | Overall Assessment | +|-----------|--------------------| +| 3.5 - 4.0 | Excellent — production-ready, well-maintained | +| 3.0 - 3.4 | Good — solid foundation, minor improvements needed | +| 2.5 - 2.9 | Fair — functional but accumulating tech debt | +| 2.0 - 2.4 | Needs Work — significant improvements required | +| < 2.0 | Critical — major structural issues to address | + +### Quick Health Check + +For a rapid assessment, run dimensions 1-4 only: + +``` +QUICK HEALTH (4 dimensions): +1. Build Health — dotnet build +2. Code Quality — detect_antipatterns +3. Architecture — get_project_graph + detect_circular_dependencies +4. Test Coverage — get_test_coverage_map + +Use when: +- Mid-sprint checkpoint +- Quick status before a demo +- Onboarding to an unfamiliar codebase +- After a major merge +``` + +### Trend Tracking + +If a previous health report exists, compare grades: + +```markdown +### Trend + +| Dimension | Previous | Current | Change | +|-----------|----------|---------|--------| +| Build Health | B | A | Improved — fixed 4 warnings | +| Code Quality | C | B | Improved — resolved 7 anti-patterns | +| Test Coverage | C | C | No change — still 68% | +| Dead Code | B | B | No change | +``` + +Track trends to show progress over time. Improving grades validate cleanup efforts. + +## Anti-patterns + +### Grading Without MCP Tools + +``` +# BAD — gut-feeling assessment +"The code looks pretty clean, I'd give it a B overall." +# No data. No specific findings. No actionable recommendations. + +# GOOD — MCP-driven assessment with data +MCP: detect_antipatterns → 3 findings +MCP: get_diagnostics → 2 warnings +MCP: get_test_coverage_map → 68% coverage +"Code Quality: B (3 anti-patterns in 4.2K lines = 0.71 per 1K). + Specific anti-patterns: DateTime.Now in OrderService.cs:47, ..." +``` + +### Only Checking Build Health + +``` +# BAD — build passes, ship it +"dotnet build succeeded with 0 errors. The project is healthy!" +# Misses: 12 anti-patterns, 3 circular dependencies, 30% test coverage, 2 CVEs + +# GOOD — all 8 dimensions for a complete picture +Build passes, but Architecture is D (circular deps), Test Coverage is F (15%), +and Security is D (high-severity CVE). Overall GPA: 2.1 — Needs Work. +``` + +### Inflated Grades + +``` +# BAD — grading on a curve to make the project look good +15 warnings → "That's pretty good for a project this size" → Grade: B +# Absolute standards exist for a reason + +# GOOD — consistent grading against defined thresholds +15 warnings → Grade C (6-15 warnings bracket) +"15 warnings puts this in the C range. Here are the 5 highest-priority + warnings to fix to reach B (under 6 warnings)." +``` + +### Recommendations Without Specifics + +``` +# BAD — vague improvement suggestions +"Improve test coverage." +"Fix code quality issues." +"Address security concerns." + +# GOOD — specific, prioritized, estimated +"Add test classes for OrderService, PaymentProcessor, ShippingCalculator. + These are on the critical path and have 0 test coverage. + Start with OrderService — it has the most complex logic. + Estimated effort: 4 hours for all three." +``` + +## Decision Guide + +| Scenario | Assessment Type | Dimensions | +|----------|----------------|------------| +| New project onboarding | Full Health Check | All 8 | +| Mid-sprint checkpoint | Quick Health | 1-4 | +| Pre-release quality gate | Full Health Check | All 8 | +| After major refactor | Targeted | 1 (Build), 3 (Architecture), 4 (Tests) | +| Post-dependency update | Targeted | 1 (Build), 7 (Security) | +| Tech debt prioritization | Full Health Check | All 8, focus on lowest grades | +| Monthly maintenance review | Full Health Check | All 8 with trend comparison | +| Before hiring/onboarding | Full Health Check | All 8 — sets baseline for new team member | +| After cleanup sprint | Targeted | Re-grade dimensions that were cleaned up | +| Executive summary needed | Full Health Check | All 8 with GPA summary | diff --git a/.opencode/skills/httpclient-factory/SKILL.md b/.opencode/skills/httpclient-factory/SKILL.md new file mode 100644 index 00000000..10b919c0 --- /dev/null +++ b/.opencode/skills/httpclient-factory/SKILL.md @@ -0,0 +1,258 @@ +--- +name: httpclient-factory +description: > + IHttpClientFactory and typed HTTP clients for .NET 10 applications. Covers + named/typed/keyed clients, DelegatingHandlers, resilience with + Microsoft.Extensions.Http.Resilience, and testing patterns. + Load this skill when configuring HTTP clients, adding retry/circuit breaker + policies, or when the user mentions "HttpClient", "IHttpClientFactory", + "AddHttpClient", "typed client", "named client", "DelegatingHandler", + "resilience", "retry", "circuit breaker", "hedging", "Polly", + "AddStandardResilienceHandler", "socket exhaustion", or "Refit". +--- + +# HttpClient Factory + +## Core Principles + +1. **Never `new HttpClient()` per request** — Raw `HttpClient` creation causes socket exhaustion under load and ignores DNS changes. Use `IHttpClientFactory` to manage handler lifetimes. +2. **Keyed clients over typed clients** — Keyed DI (`.AddAsKeyed()`) is the recommended pattern in .NET 10. Typed clients captured in singletons silently break handler rotation. +3. **Resilience is not optional** — Every external HTTP call needs retry, circuit breaker, and timeout. `AddStandardResilienceHandler()` provides sensible defaults in one line. +4. **DelegatingHandlers for cross-cutting concerns** — Auth tokens, correlation IDs, and logging belong in the handler pipeline, not scattered across service methods. + +## Patterns + +### Named Client with Resilience + +```csharp +builder.Services.AddHttpClient("github", client => +{ + client.BaseAddress = new Uri("https://api.github.com/"); + client.DefaultRequestHeaders.UserAgent.ParseAdd("MyApp/1.0"); + client.DefaultRequestHeaders.Accept.Add( + new MediaTypeWithQualityHeaderValue("application/json")); +}) +.AddStandardResilienceHandler(); + +// Usage via factory +public sealed class GitHubService(IHttpClientFactory factory) +{ + public async Task GetRepoAsync(string owner, string name, CancellationToken ct) + { + var client = factory.CreateClient("github"); + return await client.GetFromJsonAsync($"repos/{owner}/{name}", ct); + } +} +``` + +### Keyed Client (Recommended in .NET 10) + +Combines named client configurability with direct injection. No string lookups. + +```csharp +builder.Services.AddHttpClient("payments", client => +{ + client.BaseAddress = new Uri("https://api.payments.example.com/"); +}) +.AddStandardResilienceHandler() +.AddAsKeyed(); // Register as keyed scoped service + +// Inject directly — no IHttpClientFactory needed +app.MapPost("/charge", async ( + [FromKeyedServices("payments")] HttpClient httpClient, + ChargeRequest request, + CancellationToken ct) => +{ + var response = await httpClient.PostAsJsonAsync("charges", request, ct); + return response.IsSuccessStatusCode + ? TypedResults.Ok() + : TypedResults.Problem("Payment failed"); +}); +``` + +Global opt-in: `builder.Services.ConfigureHttpClientDefaults(b => b.AddAsKeyed());` + +### Standard Resilience Handler + +`AddStandardResilienceHandler()` chains 5 strategies: + +| Strategy | Default | +|----------|---------| +| Rate limiter | 1000 concurrent requests | +| Total timeout | 30 seconds | +| Retry | 3 retries, exponential backoff with jitter | +| Circuit breaker | Opens at 10% failure rate | +| Attempt timeout | 10 seconds per attempt | + +```csharp +builder.Services.AddHttpClient("api") + .AddStandardResilienceHandler(options => + { + options.Retry.MaxRetryAttempts = 5; + options.Retry.Delay = TimeSpan.FromSeconds(1); + options.TotalRequestTimeout.Timeout = TimeSpan.FromSeconds(60); + options.AttemptTimeout.Timeout = TimeSpan.FromSeconds(15); + + // Disable retries for non-idempotent methods + options.Retry.DisableForUnsafeHttpMethods(); + }); +``` + +### DelegatingHandler for Auth Token Injection + +```csharp +public sealed class AuthenticationHandler(ITokenService tokenService) + : DelegatingHandler +{ + protected override async Task SendAsync( + HttpRequestMessage request, CancellationToken cancellationToken) + { + var token = await tokenService.GetAccessTokenAsync(cancellationToken); + request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", token); + return await base.SendAsync(request, cancellationToken); + } +} + +// Registration +builder.Services.AddTransient(); +builder.Services.AddHttpClient("api") + .AddHttpMessageHandler() + .AddStandardResilienceHandler(); +``` + +### DelegatingHandler for Correlation ID Propagation + +```csharp +public sealed class CorrelationIdHandler(IHttpContextAccessor httpContextAccessor) + : DelegatingHandler +{ + protected override Task SendAsync( + HttpRequestMessage request, CancellationToken cancellationToken) + { + if (httpContextAccessor.HttpContext?.Request.Headers + .TryGetValue("X-Correlation-Id", out var correlationId) is true) + { + request.Headers.Add("X-Correlation-Id", correlationId.ToString()); + } + return base.SendAsync(request, cancellationToken); + } +} +``` + +### SocketsHttpHandler Configuration + +```csharp +builder.Services.AddHttpClient("advanced") + .UseSocketsHttpHandler((handler, _) => + { + handler.PooledConnectionLifetime = TimeSpan.FromMinutes(2); + handler.PooledConnectionIdleTimeout = TimeSpan.FromMinutes(1); + handler.MaxConnectionsPerServer = 100; + handler.AutomaticDecompression = + DecompressionMethods.GZip | DecompressionMethods.Brotli; + }); +``` + +### Testing with Mock Handler + +```csharp +public sealed class MockHttpHandler( + HttpStatusCode statusCode, + string content) : HttpMessageHandler +{ + protected override Task SendAsync( + HttpRequestMessage request, CancellationToken cancellationToken) + { + return Task.FromResult(new HttpResponseMessage(statusCode) + { + Content = new StringContent(content, Encoding.UTF8, "application/json") + }); + } +} + +// In test +var handler = new MockHttpHandler(HttpStatusCode.OK, """{"id":1}"""); +var client = new HttpClient(handler) { BaseAddress = new Uri("https://api.test/") }; +var service = new MyService(client); +``` + +## Anti-patterns + +### Don't Create HttpClient Per Request + +```csharp +// BAD — socket exhaustion under load, ignores DNS changes +public async Task GetDataAsync() +{ + using var client = new HttpClient(); + return await client.GetStringAsync("https://api.example.com/data"); +} + +// GOOD — factory-managed +public async Task GetDataAsync(CancellationToken ct) +{ + var client = factory.CreateClient("api"); + return await client.GetStringAsync("https://api.example.com/data", ct); +} +``` + +### Don't Capture Typed Clients in Singletons + +```csharp +// BAD — transient HttpClient captured by singleton defeats handler rotation +services.AddSingleton(); +services.AddHttpClient(); + +// GOOD — use keyed client or IHttpClientFactory in singletons +services.AddSingleton(); +services.AddHttpClient("myservice").AddAsKeyed(ServiceLifetime.Singleton); +``` + +### Don't Mutate DefaultRequestHeaders on Shared Clients + +```csharp +// BAD — not thread-safe +httpClient.DefaultRequestHeaders.Authorization = + new AuthenticationHeaderValue("Bearer", token); + +// GOOD — use DelegatingHandler or per-request HttpRequestMessage +using var request = new HttpRequestMessage(HttpMethod.Get, "/api/data"); +request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", token); +await httpClient.SendAsync(request, ct); +``` + +### Don't Forget CancellationToken + +```csharp +// BAD — no cancellation support +var result = await httpClient.GetFromJsonAsync("/orders/1"); + +// GOOD — always pass CancellationToken +var result = await httpClient.GetFromJsonAsync("/orders/1", cancellationToken); +``` + +### Don't Stack Multiple Resilience Handlers + +```csharp +// BAD — conflicting resilience strategies +builder.AddStandardResilienceHandler(); +builder.AddStandardHedgingHandler(); + +// GOOD — one standard handler, or a custom pipeline +builder.AddStandardResilienceHandler(); +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| New .NET 10 project | Keyed clients with `AddAsKeyed()` | +| Singleton service needs HttpClient | Named client via `IHttpClientFactory` or keyed singleton | +| External API calls | `AddStandardResilienceHandler()` on every client | +| Auth token injection | `DelegatingHandler` registered with `AddHttpMessageHandler` | +| Hedging (parallel requests) | `AddStandardHedgingHandler()` for latency-sensitive calls | +| Non-idempotent methods | `DisableForUnsafeHttpMethods()` on retry options | +| Custom retry logic | `AddResilienceHandler("name", builder => ...)` | +| Connection pooling control | `UseSocketsHttpHandler` with `PooledConnectionLifetime` | +| API client generation | Refit with `AddRefitClient()` | +| Integration testing | Custom `HttpMessageHandler` or `MockHttpMessageHandler` | diff --git a/.opencode/skills/instinct-system/SKILL.md b/.opencode/skills/instinct-system/SKILL.md new file mode 100644 index 00000000..b695d3b4 --- /dev/null +++ b/.opencode/skills/instinct-system/SKILL.md @@ -0,0 +1,266 @@ +--- +name: instinct-system +description: > + Confidence-scored instinct system for learning project-specific patterns through + an observe-hypothesize-confirm cycle. Instincts start as low-confidence hypotheses + and graduate to permanent rules in MEMORY.md once confirmed. Stored per-project + in .claude/instincts.md. Load this skill when you notice a recurring pattern, + want to track a project convention, encounter "learn this", "I think they always", + "notice a pattern", "instinct", "hypothesis", "confidence", or when starting a + session (to load existing instincts). +--- + +# Instinct System + +## Core Principles + +1. **Instincts are hypotheses, not rules** — An instinct starts as a guess based on a single observation. It has no authority until confirmed across multiple instances. Never treat a first observation as a project convention. "I saw one handler use `sealed` " is an instinct at 0.3; "all 12 handlers use `sealed`" is a rule at 0.9. + +2. **Confidence scoring drives behavior (0.3-0.9)** — Each instinct carries a numeric confidence that determines how Claude acts on it. At 0.3, merely note it. At 0.5, mention it when relevant. At 0.7, follow it by default. At 0.9, promote it to a permanent rule. Never apply a low-confidence instinct without flagging the uncertainty. + +3. **Project-scoped, never global** — Instincts are stored per-project in `.claude/instincts.md`. What holds true in one codebase may be wrong in another. A project using `internal sealed class` everywhere says nothing about a different project that uses `public class` with interface testing. Instincts do not transfer at full confidence. + +4. **Observe-Hypothesize-Confirm cycle** — The lifecycle is disciplined: see a pattern, form a hypothesis, actively seek confirming or disconfirming evidence, adjust confidence accordingly. Passive observation is not enough. When you form an instinct, look for it in the next 2-3 related files. + +5. **Evolution path to permanence** — Instincts are temporary by design. At 0.9 confidence, they graduate to `MEMORY.md` as permanent rules or trigger updates to relevant skills. An instinct that never reaches 0.7 after 5+ observations should be discarded, not left to rot. + +## Patterns + +### Instinct Lifecycle + +The full lifecycle from first observation to permanent rule: + +``` +1. OBSERVE + During normal work, notice something that might be a pattern. + "This handler is internal sealed class. Is that the convention?" + +2. HYPOTHESIZE + Create an instinct with initial confidence 0.3. + Write to .claude/instincts.md: + - Use `sealed` on all handler classes | confidence: 0.3 | seen: 1 | last: 2025-07-15 + +3. SEEK CONFIRMATION + When working in related areas, actively check for the pattern. + Open 2-3 other handlers. Do they all use `internal sealed class`? + +4. CONFIRM or DISCONFIRM + - If confirmed: increment `seen`, raise confidence per the adjustment rules + - If contradicted: halve confidence, note the exception + - If mixed: hold confidence steady, note the split + +5. PROMOTE + At 0.9, present to the user for promotion to MEMORY.md. + "I've observed [pattern] across [N] instances. Should I add this + as a permanent rule to MEMORY.md?" +``` + +### Instinct Storage Format + +Store instincts in `.claude/instincts.md` with categories and structured metadata: + +```markdown +# Project Instincts + +## Code Style [0.7] +- Use `sealed` on all handler classes | confidence: 0.8 | seen: 5 | last: 2025-07-15 +- Prefix private fields with underscore | confidence: 0.5 | seen: 2 | last: 2025-07-14 + +## Architecture [0.6] +- Feature folders use singular names (Order, not Orders) | confidence: 0.7 | seen: 4 | last: 2025-07-15 + +## Naming [0.5] +- Command classes end in Command, not Request | confidence: 0.5 | seen: 3 | last: 2025-07-15 +``` + +Category header `[0.7]` = average confidence. Use standard categories: Code Style, Architecture, Naming, Testing, Data Access, API Design, Configuration, Performance, Tooling. Each entry: `description | confidence: N | seen: N | last: date`. + +### Confidence Adjustment Rules + +Follow these rules precisely. No ad-hoc scoring. + +``` +CONFIRMATION TRACK: + First observation → 0.3 (hypothesis formed) + Second confirmation → 0.5 (pattern emerging) + Third confirmation → 0.7 (likely convention) + Fourth+ confirmation → 0.8 (strong convention) + Fifth+ with zero contradictions → 0.9 (promotion candidate) + +CONTRADICTION HANDLING: + Any contradiction → halve current confidence + Example: instinct at 0.7, contradicted → drops to 0.35 + Two contradictions in a row → drop to 0.1 (effectively dead) + +USER OVERRIDE: + User explicitly confirms → jump to 0.8 + User explicitly corrects → drop to 0.0 (remove the instinct) + User says "sometimes" → cap at 0.5 (conditional pattern) + +STALENESS: + No new observations for 10+ sessions → flag for review + Contradicted and not re-confirmed for 5 sessions → remove +``` + +### Acting on Instincts by Confidence Level + +``` +0.0 - 0.2 → IGNORE — insufficient evidence, do not mention +0.3 - 0.4 → NOTE — record internally, do not apply +0.5 - 0.6 → MENTION — "I notice this project may use [pattern]. Should I follow it?" +0.7 - 0.8 → FOLLOW — apply by default, mention when first applied +0.9 → PROMOTE — offer to add to MEMORY.md as a permanent rule +``` + +When generating code, apply instincts at 0.7+ silently. For instincts at 0.5-0.6, mention them as suggestions. Never silently apply an instinct below 0.7. + +### Seeking Confirmation Actively + +Do not wait passively for evidence. When you form a new instinct, look for it: + +``` +ACTIVE SEEKING PROTOCOL: +1. Form instinct: "This project uses Result for handler returns" +2. Before next code generation, check 2-3 existing handlers: + → Use find_symbol to locate other handlers + → Use get_public_api to check their return types +3. Count matches and mismatches +4. Adjust confidence based on findings +5. Update .claude/instincts.md with new count and confidence + +EXAMPLE: + New instinct: "Handlers return Result" at 0.3 + → Check CreateOrderHandler: returns Result ✓ + → Check GetProductHandler: returns Result ✓ + → Check DeleteUserHandler: returns Task ✗ + Result: 2/3 match → raise to 0.5, note the exception + Updated: Handlers return Result | confidence: 0.5 | seen: 3 | last: 2025-07-15 + Exception: DeleteUserHandler uses Task +``` + +### Promotion Protocol + +When an instinct reaches 0.9 confidence: + +``` +PROMOTION STEPS: +1. Present the evidence to the user: + "I've observed [pattern] across [N] instances with zero contradictions: + - CreateOrderHandler: ✓ + - UpdateOrderHandler: ✓ + - DeleteOrderHandler: ✓ + - GetProductHandler: ✓ + - CreateProductHandler: ✓ + Should I add this as a permanent rule to MEMORY.md?" + +2. If user approves: + - Add to MEMORY.md under the appropriate category + - Remove from .claude/instincts.md + - Format as a clear, generalized rule (use self-correction-loop generalization) + +3. If user declines: + - Cap confidence at 0.8 + - Note: "User reviewed, chose not to promote" + - Keep in instincts for continued reference + +4. If user provides context: + - "That's only for command handlers, not query handlers" + - Adjust the instinct to be more specific + - Reset confidence to 0.5 (narrowed scope needs re-confirmation) +``` + +### Export and Import Between Projects + +Export instincts at 0.7+ to `.claude/instincts-export.md`. On import, apply 0.2 confidence decay (0.9 becomes 0.7, 0.7 becomes 0.5). Never import above 0.7 — every project must confirm locally. Mark imported instincts with `source: "imported from [project]"`. + +### Session-Start Instinct Loading + +At the beginning of each session, load and apply instincts: + +``` +SESSION START: +1. Read .claude/instincts.md (if it exists) +2. Load all instincts at 0.7+ into active context +3. Note instincts at 0.5-0.6 for mention-when-relevant +4. Ignore instincts below 0.5 (they'll be confirmed or discarded organically) +5. Check for stale instincts (no updates in 10+ sessions) — flag for review +``` + +## Anti-patterns + +### Over-Eager Pattern Recognition + +``` +# BAD — treating first observation as a rule +*Reads one handler file* +"This project always uses internal sealed class on handlers." +*Generates 5 new handlers with internal sealed — but the project + actually uses public class in 8 out of 9 existing handlers* + +# GOOD — forming a hypothesis and seeking confirmation +*Reads one handler file* +"Noticed CreateOrderHandler uses internal sealed class. + Forming instinct at 0.3. Let me check a few more handlers..." +*Checks 3 more handlers, finds they all use public class* +"Disconfirmed. The one internal sealed handler was an exception." +``` + +### Stagnant Instincts + +``` +# BAD — instincts sit at 0.3 forever, never confirmed or discarded +.claude/instincts.md has 40 instincts, 35 of them at confidence 0.3 +from 3 months ago — useless noise + +# GOOD — actively seek confirmation or discard +After forming an instinct, check 2-3 related files in the same session. +Instincts that can't reach 0.5 within 3 sessions get removed. +``` + +### Global Instincts + +``` +# BAD — applying instincts from one project to another at full confidence +"ProjectA uses FluentValidation, so ProjectB must too." +*ProjectB uses DataAnnotations exclusively* + +# GOOD — project-scoped instincts with import decay +"ProjectA used FluentValidation (0.9). Importing to ProjectB at 0.7. + Let me check what ProjectB actually uses..." +*Finds DataAnnotations → drops to 0.0, removes the instinct* +``` + +### Instinct Hoarding + +``` +# BAD — never cleaning up the instinct file +.claude/instincts.md grows to 200 lines, full of contradictions +and instincts that were never confirmed + +# GOOD — periodic cleanup +Every 5 sessions, scan instincts: +- Remove anything below 0.2 +- Remove anything stale (no update in 10+ sessions) +- Promote anything at 0.9+ +- Keep the file under 50 active instincts +``` + +## Decision Guide + +| Scenario | Action | +|----------|--------| +| First time seeing a pattern | Create instinct at 0.3, seek confirmation in 2-3 related files | +| Pattern seen twice | Raise to 0.5, keep seeking | +| Pattern seen 3+ times with no contradictions | Raise to 0.7, start following by default | +| Pattern contradicted | Halve confidence, note the exception | +| User says "we always do X" | Create instinct at 0.8 (user confirmation) | +| User says "no, that's wrong" | Drop instinct to 0.0, remove it | +| Instinct at 0.9 | Present evidence to user, offer promotion to MEMORY.md | +| Instinct stale for 10+ sessions | Flag for review, ask user if it's still valid | +| Starting a new project | Do not import instincts from other projects above 0.7 | +| Similar project, want to share instincts | Export at 0.7+, import with 0.2 decay | +| Instinct file exceeds 50 entries | Audit: remove dead instincts, promote mature ones | +| Generating code and instinct is 0.5-0.6 | Mention the instinct, ask before applying | +| Generating code and instinct is 0.7+ | Apply silently, mention on first use | +| Conflicting instincts in same category | Keep both, note the conflict, seek the distinguishing condition | +| User partially confirms ("only for commands") | Narrow scope, reset to 0.5, re-confirm with narrowed definition | diff --git a/.opencode/skills/learning-log/SKILL.md b/.opencode/skills/learning-log/SKILL.md new file mode 100644 index 00000000..32e4a55b --- /dev/null +++ b/.opencode/skills/learning-log/SKILL.md @@ -0,0 +1,207 @@ +--- +name: learning-log +description: > + Auto-document insights and discoveries during development sessions. Unlike + MEMORY.md (corrective rules from the self-correction-loop skill), the learning + log captures organic discoveries: non-obvious bugs, undocumented architecture + decisions, performance findings, workarounds, and gotchas. Stored at + .claude/learning-log.md. Load this skill when Claude discovers something + non-obvious, finds a workaround, uncovers an undocumented decision, or when + the user asks about "learnings", "discoveries", "gotchas", "what did we learn", + or "document this finding". +--- + +# Learning Log + +## Core Principles + +1. **Log insights, not rules** — MEMORY.md stores corrective rules ("always use X instead of Y"). The learning log stores discoveries ("X behaves differently when Y is configured because Z"). Rules prescribe behavior; insights explain the world. + +2. **Structure enables searchability** — Every entry has a date, category, title, description, and affected files. Consistent structure means you can search by category, scan titles, or find entries related to specific files. + +3. **Log during work, not after** — Capture insights the moment they occur. Waiting until the end of a session means half the detail is lost. A 2-line entry written immediately is worth more than a paragraph reconstructed from memory. + +4. **Periodic review extracts patterns** — A monthly scan of the learning log reveals recurring themes. Three "gotcha" entries about the same subsystem suggests a systemic issue worth addressing. Individual entries are useful; patterns across entries are actionable. + +5. **Distinct from handoff notes** — The wrap-up-ritual handoff captures session state (done/pending/learned). The learning log is a persistent, growing knowledge base. Handoffs are overwritten; the log only grows (and is periodically pruned). + +## Patterns + +### Log Entry Format + +Each entry follows a consistent structure in `.claude/learning-log.md`: + +```markdown +# Learning Log + +## 2025-07-15 | Bug Root Cause | EF Core SaveChanges Silently Succeeds on Duplicate Keys +`SaveChangesAsync` with a duplicate PK throws on the *next* `SaveChangesAsync` call, not the insert. +**Files:** `src/Orders/Features/CreateOrder.cs:42` +**Resolution:** Call `SaveChangesAsync` immediately after `Add()`, before any other operations. + +## 2025-07-12 | Gotcha | MassTransit Consumer Registration Order Matters +Multiple consumers for the same message type run in registration order. If the first throws, +subsequent consumers are skipped. Caused missed audit events. +**Files:** `src/Shared/Extensions/MassTransitConfig.cs:15-30` +**Resolution:** Configure independent consumer endpoints or use the retry filter. +``` + +### Auto-Logging Triggers + +Log entries automatically when these situations occur: + +``` +TRIGGER 1: Non-Obvious Bug Root Cause +When debugging reveals the root cause is NOT where the error appeared. +→ Log the misdirection, the actual cause, and how to avoid confusion. + +TRIGGER 2: Undocumented Architecture Decision +When you discover WHY something was built a certain way (not just HOW). +→ Log the decision, the alternatives considered, and the rationale. + +TRIGGER 3: Workaround for Framework/Library Limitation +When the "correct" approach doesn't work and you need an alternative. +→ Log what didn't work, why, and what works instead. + +TRIGGER 4: Performance Finding +When profiling or observation reveals unexpected performance behavior. +→ Log the finding, the measurement, and the optimization applied. + +TRIGGER 5: External Service Behavior +When an external API/service behaves differently than documented. +→ Log the expected vs actual behavior and any workaround. + +TRIGGER 6: Non-Obvious Configuration +When a setting or configuration has a surprising effect. +→ Log the configuration, the surprising behavior, and the correct setup. +``` + +### Category System + +Use these 6 categories consistently: + +``` +Architecture Decision — WHY something is structured a certain way +Bug Root Cause — Non-obvious bugs where the error ≠ the cause +Performance Discovery — Unexpected performance behavior or optimization +Pattern Found — Reusable pattern discovered in the codebase +Gotcha — Surprising behavior in frameworks, libraries, or APIs +External Service — Quirks of third-party services and APIs +``` + +### Practical Logging Workflow + +How to log during active development: + +``` +DURING WORK: +1. You encounter something non-obvious +2. Spend 30 seconds writing a log entry (2-4 lines) +3. Include the category, a descriptive title, and affected files +4. Continue working — the entry is captured, detail can be added later + +ENTRY QUALITY LEVELS: +Quick (during work): Date | Category | Title + 1-line description + files +Full (if time allows): Date | Category | Title + full description + resolution + files + +A quick entry is infinitely better than no entry. +``` + +### Log vs. Memory vs. Handoff + +Distinguish between the three knowledge stores: + +``` +MEMORY.md (via self-correction-loop): +- Contains: Prescriptive rules ("always do X", "never do Y") +- Source: User corrections, promoted learning log entries +- Lifespan: Permanent until proven wrong +- Format: Category → bullet point rules + +.claude/learning-log.md (this skill): +- Contains: Descriptive insights ("X happens because Y") +- Source: Organic discoveries during development +- Lifespan: 3-6 months, then archive or promote +- Format: Date | Category | Title | Description | Files + +.claude/handoff.md (via wrap-up-ritual): +- Contains: Session state (done/pending/learned) +- Source: End of each session +- Lifespan: Until next session overwrites it +- Format: Completed / Pending / Learned sections +``` + +## Anti-patterns + +### Logging Everything + +``` +// BAD — low-value entries that add noise +## 2025-07-15 | Pattern Found | Used Primary Constructors +Used primary constructors for the OrderService class. +**Files:** src/Orders/OrderService.cs + +// GOOD — only log when it's non-obvious or surprising +## 2025-07-15 | Gotcha | Primary Constructor Parameters Captured as Fields +Primary constructor parameters in C# 14 are implicitly captured as fields. +If you also declare an explicit field with the same name, you get a compiler +warning but no error — and the two can silently diverge. +**Files:** src/Orders/OrderService.cs:5 +``` + +### No Categorization + +``` +// BAD — entries without categories are unsearchable +## 2025-07-15 | Compiled queries don't support Include() +## 2025-07-14 | MassTransit consumer ordering matters +## 2025-07-13 | Orders module uses VSA, Identity uses CA + +// GOOD — categories enable filtering and pattern detection +## 2025-07-15 | Performance Discovery | Compiled Queries Don't Support Include() +## 2025-07-14 | Gotcha | MassTransit Consumer Registration Order Matters +## 2025-07-13 | Architecture Decision | Why Orders Uses VSA While Identity Uses CA +``` + +### Write-Only Log (Never Reviewed) + +``` +// BAD — 100 entries, never reviewed +.claude/learning-log.md grows to 500 lines +Same gotchas keep appearing because no one reads the log +No entries are promoted to MEMORY.md + +// GOOD — monthly reviews extract value +Every 20 entries, scan for patterns +Promote recurring findings to MEMORY.md as preventive rules +Archive stale entries +The log stays lean and the rules get stronger +``` + +### Duplicating MEMORY.md Content + +``` +// BAD — restating a MEMORY.md rule as a log entry +MEMORY.md: "Always use TimeProvider instead of DateTime.Now" +learning-log.md: "## Gotcha | DateTime.Now Is Not Testable" ← redundant + +// GOOD — log adds a concrete incident the rule doesn't capture +learning-log.md: "## Bug Root Cause | Flaky Test Due to DateTime.Now + OrderExpiry test failed intermittently — DateTime.Now crossed midnight during run." +``` + +## Decision Guide + +| Scenario | Action | +|----------|--------| +| Found a non-obvious bug root cause | Log it — category: Bug Root Cause | +| Discovered why code is structured a certain way | Log it — category: Architecture Decision | +| Framework behaved unexpectedly | Log it — category: Gotcha | +| Performance surprise (good or bad) | Log it — category: Performance Discovery | +| Found a reusable pattern in the codebase | Log it — category: Pattern Found | +| External API behaved differently than docs say | Log it — category: External Service | +| User corrected Claude's code | Don't log — use `self-correction-loop` for MEMORY.md | +| Routine code change, nothing surprising | Don't log — only log insights | +| Same gotcha appeared 3+ times in the log | Promote to MEMORY.md as a preventive rule | +| Learning log exceeds 50 entries | Monthly review — archive old, promote recurring | +| Starting a new session | Scan recent log entries for context on the working area | diff --git a/.opencode/skills/logging/SKILL.md b/.opencode/skills/logging/SKILL.md new file mode 100644 index 00000000..c774ff29 --- /dev/null +++ b/.opencode/skills/logging/SKILL.md @@ -0,0 +1,164 @@ +--- +name: logging +description: > + Observability for .NET 10 applications. Covers Serilog structured logging, + OpenTelemetry traces and metrics, health checks, and correlation IDs. + Load this skill when setting up logging, tracing, metrics, or health monitoring, + or when the user mentions "Serilog", "logging", "structured log", "OpenTelemetry", + "traces", "metrics", "health check", "correlation ID", "observability", + "telemetry", "log enrichment", or "ILogger". +--- + +# Logging & Observability + +## Core Principles + +1. **Structured logging with Serilog** — Every log entry is a structured event with named properties, not a formatted string. This enables searching, filtering, and alerting. +2. **OpenTelemetry for distributed tracing** — Traces connect requests across services. Metrics track system health over time. +3. **Health checks for operational readiness** — Every service exposes `/health` endpoints for load balancers and orchestrators. +4. **Correlation IDs for request tracing** — Every request gets a unique ID that flows through all log entries and downstream service calls. + +## Patterns + +### Serilog Setup + +```csharp +// Program.cs +builder.Host.UseSerilog((context, loggerConfig) => +{ + loggerConfig + .ReadFrom.Configuration(context.Configuration) + .Enrich.FromLogContext() + .Enrich.WithMachineName() + .Enrich.WithProperty("Application", "MyApp.Api") + .WriteTo.Console(outputTemplate: + "[{Timestamp:HH:mm:ss} {Level:u3}] {Message:lj} {Properties:j}{NewLine}{Exception}") + .WriteTo.Seq(context.Configuration["Seq:Url"] ?? "http://localhost:5341"); +}); + +// After building the app +app.UseSerilogRequestLogging(options => +{ + options.EnrichDiagnosticContext = (diagnosticContext, httpContext) => + { + diagnosticContext.Set("UserId", + httpContext.User.FindFirstValue(ClaimTypes.NameIdentifier) ?? "anonymous"); + }; +}); +``` + +### Structured Logging (Correct Usage) + +```csharp +// GOOD — structured logging with message template +logger.LogInformation("Processing order {OrderId} for customer {CustomerId}", + orderId, customerId); + +// GOOD — include relevant context +logger.LogWarning("Payment failed for order {OrderId}. Attempt {Attempt} of {MaxAttempts}", + orderId, attempt, maxAttempts); + +// GOOD — log exceptions with structured data +logger.LogError(exception, "Failed to process order {OrderId}", orderId); +``` + +### Correlation IDs + +```csharp +// Middleware to set correlation ID +public class CorrelationIdMiddleware(RequestDelegate next) +{ + private const string CorrelationIdHeader = "X-Correlation-Id"; + + public async Task InvokeAsync(HttpContext context) + { + var correlationId = context.Request.Headers[CorrelationIdHeader].FirstOrDefault() + ?? Guid.NewGuid().ToString(); + + context.Items["CorrelationId"] = correlationId; + context.Response.Headers[CorrelationIdHeader] = correlationId; + + using (LogContext.PushProperty("CorrelationId", correlationId)) + { + await next(context); + } + } +} + +// Program.cs +app.UseMiddleware(); +``` + +### OpenTelemetry Integration + +> For full OpenTelemetry setup (metrics, tracing, OTLP export), see the **opentelemetry** skill. +> The logging skill focuses on structured logging with Serilog. OpenTelemetry handles the export pipeline. + +### Health Checks + +```csharp +// Program.cs +builder.Services.AddHealthChecks() + .AddNpgSql(builder.Configuration.GetConnectionString("Default")!, + name: "database", tags: ["ready"]) + .AddRedis(builder.Configuration.GetConnectionString("Redis")!, + name: "redis", tags: ["ready"]) + .AddRabbitMQ(builder.Configuration.GetConnectionString("RabbitMq")!, + name: "rabbitmq", tags: ["ready"]); + +// Map endpoints +app.MapHealthChecks("/health/live", new HealthCheckOptions +{ + Predicate = _ => false // No dependency checks — just "am I running?" +}); + +app.MapHealthChecks("/health/ready", new HealthCheckOptions +{ + Predicate = check => check.Tags.Contains("ready") +}); +``` + +## Anti-patterns + +### Don't Use String Interpolation in Log Messages + +```csharp +// BAD — allocates string even if level is disabled, breaks structured logging +logger.LogInformation($"Order {orderId} created for {customerId}"); + +// GOOD — message template with named parameters +logger.LogInformation("Order {OrderId} created for {CustomerId}", orderId, customerId); +``` + +### Don't Log Sensitive Data + +```csharp +// BAD — logging credentials +logger.LogInformation("User logged in: {Email} with password {Password}", email, password); + +// GOOD — never log secrets, passwords, tokens, or PII +logger.LogInformation("User logged in: {Email}", email); +``` + +### Don't Skip Health Check Tags + +```csharp +// BAD — all checks run for liveness AND readiness +app.MapHealthChecks("/health"); + +// GOOD — separate liveness (am I running?) from readiness (can I serve traffic?) +app.MapHealthChecks("/health/live", new() { Predicate = _ => false }); +app.MapHealthChecks("/health/ready", new() { Predicate = c => c.Tags.Contains("ready") }); +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Application logging | Serilog with structured logging | +| Distributed tracing | OpenTelemetry with OTLP exporter | +| Custom business metrics | `IMeterFactory` + counters/histograms | +| Request tracing | Correlation ID middleware | +| Container health | `/health/live` and `/health/ready` endpoints | +| Log storage | Seq (development), Elastic/Grafana (production) | +| Log levels | Debug in dev, Information in staging, Warning in production | diff --git a/.opencode/skills/messaging/SKILL.md b/.opencode/skills/messaging/SKILL.md new file mode 100644 index 00000000..65505b26 --- /dev/null +++ b/.opencode/skills/messaging/SKILL.md @@ -0,0 +1,297 @@ +--- +name: messaging +description: > + Asynchronous messaging patterns for .NET applications. Covers Wolverine and + MassTransit, outbox pattern, saga and choreography, and broker configuration + for RabbitMQ and Azure Service Bus. + Load this skill when implementing event-driven communication, background + processing, module-to-module messaging, or when the user mentions "Wolverine", + "MassTransit", "message bus", "RabbitMQ", "Azure Service Bus", "event", + "publish", "consumer", "outbox", "saga", "integration event", "queue", + or "pub/sub". +--- + +# Messaging + +## Core Principles + +1. **Wolverine is the recommended default** — MIT licensed, combines mediator + messaging in one library with built-in outbox, saga support, and convention-based handlers. MassTransit is an alternative but requires a commercial license from v9. +2. **Outbox pattern for reliability** — Always use the transactional outbox to ensure messages are published only when the database transaction succeeds. +3. **Choreography for simple flows, saga for complex** — If a workflow has 2-3 steps, use event choreography. If it has compensating actions or complex state, use a saga. +4. **Messages are contracts** — Put message types in a shared contracts project. Keep them as simple records with primitive types. + +## Patterns + +### Wolverine Setup + +```csharp +// Program.cs +builder.Host.UseWolverine(opts => +{ + // Auto-discover handlers from this assembly + opts.Discovery.IncludeAssembly(typeof(Program).Assembly); + + // RabbitMQ transport + opts.UseRabbitMq(rabbit => + { + rabbit.HostName = "localhost"; + // Or from configuration: + // rabbit.HostName = builder.Configuration["RabbitMq:Host"]!; + }) + .AutoProvision() // Create queues/exchanges automatically + .AutoPurgeOnStartup(); // Dev only — clear queues on startup + + // Enable transactional outbox with EF Core + opts.Services.AddDbContextWithWolverineIntegration(x => + x.UseNpgsql(builder.Configuration.GetConnectionString("Default"))); + + opts.Policies.AutoApplyTransactions(); // Wrap handlers in DB transactions +}); +``` + +**Why**: `UseWolverine()` registers handler discovery, transport, and outbox in one place. `AutoProvision()` eliminates manual broker setup during development. + +### Publishing Events + +Wolverine supports two publishing styles: cascading messages (return values) and explicit publishing. + +```csharp +// Message contract (in shared Contracts project) +public record OrderCreated(Guid OrderId, string CustomerId, decimal Total, DateTimeOffset CreatedAt); + +// Style 1: Cascading messages — return the event from the handler +// Wolverine automatically publishes returned messages after the handler completes. +public static class CreateOrder +{ + public record Command(string CustomerId, List Items); + public record Response(Guid OrderId, decimal Total); + + public static async Task<(Response, OrderCreated)> HandleAsync( + Command command, AppDbContext db, TimeProvider clock, CancellationToken ct) + { + var order = Order.Create(command.CustomerId, command.Items, clock.GetUtcNow()); + db.Orders.Add(order); + await db.SaveChangesAsync(ct); + + var response = new Response(order.Id, order.Total); + var @event = new OrderCreated(order.Id, order.CustomerId, order.Total, order.CreatedAt); + + return (response, @event); // Both are published automatically + } +} +``` + +```csharp +// Style 2: Explicit publishing via IMessageBus +public static class CreateOrder +{ + public record Command(string CustomerId, List Items); + public record Response(Guid OrderId, decimal Total); + + public static async Task HandleAsync( + Command command, AppDbContext db, IMessageBus bus, TimeProvider clock, CancellationToken ct) + { + var order = Order.Create(command.CustomerId, command.Items, clock.GetUtcNow()); + db.Orders.Add(order); + await db.SaveChangesAsync(ct); + + await bus.PublishAsync(new OrderCreated( + order.Id, order.CustomerId, order.Total, order.CreatedAt)); + + return new Response(order.Id, order.Total); + } +} +``` + +**Why**: Cascading messages (tuple return) are simpler and testable — the handler is a pure function. Use explicit `IMessageBus` when publishing is conditional or requires multiple events. + +### Consuming Events + +Wolverine uses convention-based handlers — no interface, no base class. Just a `Handle` method with the message type as the first parameter. + +```csharp +// Notifications module — handles OrderCreated from Orders module +public static class OrderCreatedHandler +{ + public static async Task HandleAsync( + OrderCreated message, NotificationsDbContext db, ILogger logger, CancellationToken ct) + { + logger.LogInformation("Processing OrderCreated: {OrderId}", message.OrderId); + + var notification = new OrderNotification(message.OrderId, message.CustomerId); + db.Notifications.Add(notification); + await db.SaveChangesAsync(ct); + } +} +``` + +**Why**: Convention-based handlers have zero ceremony. Wolverine discovers them by signature: any public method named `Handle`/`HandleAsync`/`Consume`/`ConsumeAsync` with the message type as the first parameter. + +### Transactional Outbox + +Ensures messages are only published if the database transaction succeeds. + +```csharp +// 1. Register DbContext with Wolverine integration +builder.Host.UseWolverine(opts => +{ + opts.Services.AddDbContextWithWolverineIntegration(x => + x.UseNpgsql(builder.Configuration.GetConnectionString("Default"))); + + opts.Policies.AutoApplyTransactions(); +}); + +// 2. DbContext — add Wolverine outbox tables +public class AppDbContext(DbContextOptions options) : DbContext(options) +{ + public DbSet Orders => Set(); + + protected override void OnModelCreating(ModelBuilder modelBuilder) + { + // Wolverine inbox/outbox tables — required for transactional messaging + modelBuilder.AddIncomingWolverineMessageTable(); + modelBuilder.AddOutgoingWolverineMessageTable(); + } +} +``` + +**Why**: `AddDbContextWithWolverineIntegration` + `AutoApplyTransactions` wraps every handler in a transaction that includes outbox writes. Messages are only sent after the transaction commits — no dual-write problem. + +### Saga (Stateful Orchestration) + +Wolverine sagas use a `Saga` base class with `Start` and `Handle` methods. Cascading messages drive the saga forward. + +```csharp +public record OrderSagaState(Guid Id) +{ + public string? CustomerId { get; set; } + public bool PaymentReceived { get; set; } +} + +public class OrderSaga : Saga +{ + public Guid Id { get; set; } + + // Start the saga when an OrderCreated event arrives + public static (OrderSagaState, ProcessPayment) Start(OrderCreated message) + { + var state = new OrderSagaState(message.OrderId) + { + CustomerId = message.CustomerId + }; + + var command = new ProcessPayment(message.OrderId, message.Total); + return (state, command); // State is persisted, command is sent + } + + // Handle payment result + public CompleteOrder Handle(PaymentCompleted message) + { + PaymentReceived = true; + MarkCompleted(); // Ends the saga + return new CompleteOrder(Id); + } + + // Compensating action on failure + public CancelOrder Handle(PaymentFailed message) + { + MarkCompleted(); + return new CancelOrder(Id); + } +} +``` + +**Why**: Wolverine sagas use simple C# methods instead of a state machine DSL. Each handler returns cascading messages to drive the workflow. `MarkCompleted()` cleans up the saga state. + +### Alternative: MassTransit + +MassTransit is a mature alternative with a commercial license requirement from v9+. Key API surface: + +```csharp +// Setup +builder.Services.AddMassTransit(x => +{ + x.SetKebabCaseEndpointNameFormatter(); + x.AddConsumers(typeof(Program).Assembly); + x.UsingRabbitMq((context, cfg) => + { + cfg.Host(builder.Configuration.GetConnectionString("RabbitMq")); + cfg.ConfigureEndpoints(context); + }); +}); + +// Publishing +await publishEndpoint.Publish(new OrderCreated(...), ct); + +// Consuming — requires IConsumer interface +public class OrderCreatedConsumer(AppDbContext db) : IConsumer +{ + public async Task Consume(ConsumeContext context) + { + var message = context.Message; + // Handle event... + } +} + +// Outbox +x.AddEntityFrameworkOutbox(o => +{ + o.UsePostgres(); + o.UseBusOutbox(); +}); + +// Saga — uses MassTransitStateMachine +public class OrderSaga : MassTransitStateMachine { /* ... */ } +``` + +> **License note**: MassTransit v9+ requires a commercial license for production use. Wolverine (MIT) is the recommended default for new projects. + +## Anti-patterns + +### Don't Publish Events Without Outbox + +```csharp +// BAD — if SaveChanges succeeds but Publish fails, data is inconsistent +await db.SaveChangesAsync(ct); +await bus.PublishAsync(new OrderCreated(...)); + +// GOOD — use transactional outbox (messages are in the same transaction) +// Configure AddDbContextWithWolverineIntegration() + AutoApplyTransactions() +// Wolverine handles this automatically +``` + +### Don't Put Complex Logic in Message Contracts + +```csharp +// BAD — behavior in a message +public record OrderCreated(Guid OrderId) +{ + public decimal CalculateShipping() => /* logic */; // DON'T +} + +// GOOD — messages are pure data +public record OrderCreated(Guid OrderId, string CustomerId, decimal Total, DateTimeOffset CreatedAt); +``` + +### Don't Use Fire-and-Forget for Important Events + +```csharp +// BAD — no guarantee of delivery +_ = Task.Run(() => bus.PublishAsync(new OrderCreated(...))); + +// GOOD — await the publish (with outbox, this is transactional) +await bus.PublishAsync(new OrderCreated(...)); +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Module-to-module communication (new project) | Wolverine with events (MIT, free) | +| Module-to-module communication (existing MassTransit) | MassTransit (commercial license required from v9) | +| Reliable event publishing | Transactional outbox (both Wolverine and MassTransit support this) | +| Simple 2-3 step workflow | Event choreography | +| Complex workflow with compensation | Wolverine saga or MassTransit saga | +| Local development broker | RabbitMQ (via Docker or Aspire) | +| Production cloud broker | Azure Service Bus or RabbitMQ | +| Want single lib for mediator + messaging | Wolverine (replaces both Mediator and MassTransit) | diff --git a/.opencode/skills/migration-workflow/SKILL.md b/.opencode/skills/migration-workflow/SKILL.md new file mode 100644 index 00000000..4e48ab5e --- /dev/null +++ b/.opencode/skills/migration-workflow/SKILL.md @@ -0,0 +1,284 @@ +--- +name: migration-workflow +description: > + Safe migration workflows for EF Core database migrations, .NET version upgrades, + and NuGet dependency updates. Includes rollback strategies and verification steps. + Load when: "migration", "add migration", "ef migration", "update database", + "upgrade nuget", "update packages", "dependency update", "version upgrade". +--- + +# Migration Workflow + +## Core Principles + +1. **Verify before applying** — Always review generated migration SQL before applying to any database. `dotnet ef migrations script` shows the exact SQL. Never apply blindly. +2. **Rollback plan always** — Every migration has a rollback. For EF Core: `dotnet ef database update `. For packages: git revert. For .NET version: branch-based rollback. Document the rollback before applying. +3. **Test after migration** — Run the full test suite after every migration step. Migrations that break tests are not complete. Integration tests with Testcontainers catch real database issues. +4. **One change per migration** — Each EF Core migration should represent a single logical change (add table, rename column, add index). Multiple unrelated changes in one migration make rollback impossible. +5. **Incremental updates** — Update one package at a time, build, test. Update one target framework at a time, build, test. Never batch unrelated changes — when something breaks, you need to know which change caused it. + +## Patterns + +### EF Core Migration Workflow + +Step-by-step workflow for creating and applying database migrations safely. + +**Step 1: Check Current State** +```bash +# List all migrations and their status +dotnet ef migrations list --project src/Infrastructure --startup-project src/Api + +# Verify the database is at the expected migration +dotnet ef database update --project src/Infrastructure --startup-project src/Api -- --dry-run +``` + +**Step 2: Create Migration** +Use descriptive names that explain the change, not the entity: +```bash +# GOOD — Describes the change +dotnet ef migrations add AddOrderShippingAddress --project src/Infrastructure --startup-project src/Api +dotnet ef migrations add RenameCustomerEmailToContactEmail --project src/Infrastructure --startup-project src/Api +dotnet ef migrations add AddIndexOnOrderCreatedAt --project src/Infrastructure --startup-project src/Api + +# BAD — Describes the entity, not the change +dotnet ef migrations add Order +dotnet ef migrations add UpdateCustomer +``` + +**Step 3: Review Generated SQL** +```bash +# Generate the SQL script for review +dotnet ef migrations script --idempotent --project src/Infrastructure --startup-project src/Api + +# Or generate from a specific migration +dotnet ef migrations script PreviousMigration AddOrderShippingAddress --project src/Infrastructure --startup-project src/Api +``` + +Check for: +- ⚠️ Data loss: `DROP COLUMN`, `DROP TABLE`, column type changes that lose precision +- ⚠️ Long locks: `ALTER TABLE` on large tables without concurrent index creation +- ⚠️ Default values: New non-nullable columns need defaults for existing rows + +**Step 4: Handle Data Loss Warnings** +If EF Core warns about potential data loss: + +```csharp +// In the migration file — explicitly handle data transformation +protected override void Up(MigrationBuilder migrationBuilder) +{ + // Step 1: Add new column as nullable + migrationBuilder.AddColumn("ContactEmail", "Customers", nullable: true); + + // Step 2: Copy data from old column + migrationBuilder.Sql("UPDATE \"Customers\" SET \"ContactEmail\" = \"Email\""); + + // Step 3: Make non-nullable after data is copied + migrationBuilder.AlterColumn("ContactEmail", "Customers", nullable: false); + + // Step 4: Drop old column + migrationBuilder.DropColumn("Email", "Customers"); +} +``` + +**Step 5: Apply and Verify** +```bash +# Apply to development database +dotnet ef database update --project src/Infrastructure --startup-project src/Api + +# Run tests to verify +dotnet test +``` + +**Step 6: Rollback (if needed)** +```bash +# Rollback to previous migration +dotnet ef database update PreviousMigrationName --project src/Infrastructure --startup-project src/Api + +# Remove the failed migration from code +dotnet ef migrations remove --project src/Infrastructure --startup-project src/Api +``` + +### NuGet Dependency Update Workflow + +Safe process for updating NuGet packages without breaking the build. + +**Step 1: Audit Current State** +```bash +# List all outdated packages +dotnet list package --outdated + +# Check for vulnerable packages +dotnet list package --vulnerable +``` + +**Step 2: Categorize Updates** +- **Patch updates** (1.0.0 → 1.0.1): Safe, bug fixes only. Update all patches at once. +- **Minor updates** (1.0.0 → 1.1.0): Usually safe, new features. Update one at a time. +- **Major updates** (1.0.0 → 2.0.0): Breaking changes expected. Update one at a time, read release notes. + +**Step 3: Update Incrementally** +```bash +# Patch updates — batch is safe +dotnet outdated --upgrade Patch + +# Minor updates — one at a time +dotnet add src/Api/Api.csproj package Serilog.AspNetCore --version 9.1.0 +dotnet build +dotnet test + +# Major updates — one at a time with careful review +dotnet add src/Api/Api.csproj package WolverineFx +dotnet build # Fix compilation errors +dotnet test # Fix behavioral changes +``` + +**Step 4: Reference Package Recommendations** +Check `knowledge/package-recommendations.md` before adding new packages: +- Is there a built-in .NET alternative? (e.g., HybridCache vs third-party cache) +- Is the package actively maintained? +- Does it align with kit recommendations? + +**Step 5: Verify** +```bash +dotnet build # Clean compilation +dotnet test # All tests pass +``` + +### .NET Version Migration Workflow + +Structured upgrade from older .NET versions to .NET 10. + +**Step 1: Assess Current State** +``` +→ get_project_graph + List all projects and their target frameworks. + Flag: mixed TFMs, test projects on different versions. +``` + +**Step 2: Pre-Migration Checklist** +- [ ] All tests pass on current version +- [ ] No pending EF Core migrations +- [ ] Dependencies checked for .NET 10 compatibility +- [ ] Branch created for migration work + +**Step 3: Update global.json** +```json +{ + "sdk": { + "version": "10.0.100", + "rollForward": "latestMinor" + } +} +``` + +**Step 4: Update Target Frameworks** +Update each `.csproj` (or `Directory.Build.props` if centralized): +```xml + + net10.0 + 14 + +``` + +**Step 5: Update Packages** +```bash +# Update all Microsoft.* packages to 10.x +dotnet outdated --upgrade Major --include Microsoft.* +dotnet build # Fix compilation issues +``` + +**Step 6: Adopt New Features** +Reference `knowledge/dotnet-whats-new.md`: +- Replace `DateTime.Now`/`DateTime.UtcNow` with `TimeProvider` +- Use `HybridCache` instead of `IDistributedCache` +- Convert classes to primary constructors where appropriate +- Use collection expressions: `int[] x = [1, 2, 3]` +- Use the `field` keyword in property accessors + +**Step 7: Verify** +```bash +dotnet build # Clean build +dotnet test # All tests pass +dotnet format --verify-no-changes # Formatting consistent +``` + +Then run the health check workflow from the `project-setup` skill to establish the new baseline. + +## Anti-patterns + +### Applying Migrations Without Reviewing SQL + +```bash +# BAD — Blindly applying +dotnet ef database update +# Oops, dropped a column with 10 million rows of data +``` + +```bash +# GOOD — Review first, then apply +dotnet ef migrations script --idempotent > review.sql +# Read review.sql, check for DROP, ALTER, data loss +dotnet ef database update +``` + +### Updating All Packages at Once + +```bash +# BAD — Update everything, pray it works +dotnet outdated --upgrade Major +dotnet build # 47 errors — which package caused this? +``` + +```bash +# GOOD — One at a time, build after each +dotnet add package WolverineFx +dotnet build && dotnet test # ✅ +dotnet add package Serilog --version 5.0.0 +dotnet build && dotnet test # ❌ — Serilog 5.0 broke the sink config +``` + +### Skipping Tests After Migration + +```bash +# BAD +dotnet ef database update # "It compiled, ship it" +``` + +```bash +# GOOD +dotnet ef database update +dotnet test # Run FULL test suite, especially integration tests +# Integration tests with Testcontainers will catch schema mismatches +``` + +### Multiple Unrelated Changes in One Migration + +```bash +# BAD — Three unrelated changes in one migration +dotnet ef migrations add UpdateEverything +# Contains: new table + renamed column + dropped index +# Rollback is all-or-nothing for three unrelated changes +``` + +```bash +# GOOD — One change per migration +dotnet ef migrations add AddShippingAddressTable +dotnet ef migrations add RenameCustomerEmailColumn +dotnet ef migrations add DropUnusedOrderIndex +# Each can be rolled back independently +``` + +## Decision Guide + +| Scenario | Workflow | Key Step | +|----------|----------|----------| +| New database table | EF Core Migration | Create entity + config + migration | +| Column rename | EF Core Migration | Review SQL for data preservation | +| Add index | EF Core Migration | Check for long locks on large tables | +| Data transformation | EF Core Migration + raw SQL | Custom `Up()` with SQL statements | +| Outdated packages | NuGet Update | One at a time, build + test between each | +| Vulnerable package | NuGet Update (urgent) | Update immediately, test, deploy | +| .NET version upgrade | .NET Migration | Phase 1-4, verify at each phase | +| Add new package | NuGet Update | Check package-recommendations.md first | +| `ExecuteUpdateAsync` vs migration | Depends | Migration for schema; `ExecuteUpdateAsync` for bulk data updates at runtime | +| Modify existing migration | **Never** if already applied | Create new migration instead | diff --git a/.opencode/skills/minimal-api/SKILL.md b/.opencode/skills/minimal-api/SKILL.md new file mode 100644 index 00000000..1848d9e3 --- /dev/null +++ b/.opencode/skills/minimal-api/SKILL.md @@ -0,0 +1,318 @@ +--- +name: minimal-api +description: > + .NET 10 minimal APIs — the default for building HTTP endpoints. Covers MapGroup, + endpoint filters, TypedResults, OpenAPI metadata, parameter binding, and route + conventions. + Load this skill when creating API endpoints, configuring routing, setting up + OpenAPI documentation, or when the user mentions "endpoint", "MapGet", "MapPost", + "MapGroup", "TypedResults", "route", "minimal API", "OpenAPI", "swagger", + "rate limiting", or "output caching". +--- + +# Minimal APIs (.NET 10) + +## Core Principles + +1. **Minimal APIs are the default** — Use controllers only when migrating legacy code. Minimal APIs are lighter, faster, and compose well with any architecture style. +2. **Group endpoints with `MapGroup`** — Never scatter individual `MapGet`/`MapPost` calls in `Program.cs`. Group related endpoints together. +3. **Use `TypedResults` for OpenAPI** — `TypedResults.Ok(value)` gives you compile-time type safety AND correct OpenAPI documentation. `Results.Ok(value)` does not. +4. **Metadata over comments** — Use `.WithName()`, `.WithTags()`, `.WithSummary()` to document endpoints. The metadata feeds into OpenAPI specs. + +## Patterns + +### Endpoint Group Auto-Discovery (Required Pattern) + +Every endpoint group lives in its own file and implements `IEndpointGroup`. A single `app.MapEndpoints()` call in `Program.cs` discovers and registers all groups automatically. **Program.cs never changes when you add new endpoint groups.** + +```csharp +// Extensions/IEndpointGroup.cs +public interface IEndpointGroup +{ + void Map(IEndpointRouteBuilder app); +} +``` + +```csharp +// Extensions/EndpointExtensions.cs +public static class EndpointExtensions +{ + public static WebApplication MapEndpoints(this WebApplication app) + { + var groups = typeof(Program).Assembly + .GetTypes() + .Where(t => t.IsAssignableTo(typeof(IEndpointGroup)) && !t.IsInterface && !t.IsAbstract) + .Select(Activator.CreateInstance) + .Cast(); + + foreach (var group in groups) + group.Map(app); + + return app; + } +} +``` + +```csharp +// Program.cs — this NEVER changes when adding endpoints +var app = builder.Build(); +app.MapEndpoints(); +app.Run(); +``` + +```csharp +// Features/Orders/OrderEndpoints.cs — one file per endpoint group +public sealed class OrderEndpoints : IEndpointGroup +{ + public void Map(IEndpointRouteBuilder app) + { + var group = app.MapGroup("/api/orders").WithTags("Orders"); + + group.MapPost("/", CreateOrder) + .WithName("CreateOrder") + .WithSummary("Create a new order") + .Produces(StatusCodes.Status201Created) + .ProducesValidationProblem() + .RequireAuthorization(); + + group.MapGet("/{id:guid}", GetOrder) + .WithName("GetOrder") + .Produces() + .ProducesProblem(StatusCodes.Status404NotFound); + + group.MapGet("/", ListOrders) + .WithName("ListOrders") + .Produces>(); + } + + private static async Task, ValidationProblem>> CreateOrder( + CreateOrderRequest request, + ISender sender, + CancellationToken ct) + { + var result = await sender.Send(new CreateOrder.Command(request.CustomerId, request.Items), ct); + return result.IsSuccess + ? TypedResults.Created($"/api/orders/{result.Value.Id}", result.Value) + : TypedResults.ValidationProblem(result.Errors); + } + + private static async Task, NotFound>> GetOrder( + Guid id, + ISender sender, + CancellationToken ct) + { + var result = await sender.Send(new GetOrder.Query(id), ct); + return result.IsSuccess + ? TypedResults.Ok(result.Value) + : TypedResults.NotFound(); + } + + private static async Task>> ListOrders( + [AsParameters] ListOrdersQuery query, + ISender sender, + CancellationToken ct) + { + var result = await sender.Send(query, ct); + return TypedResults.Ok(result); + } +} +``` + +### TypedResults for Type-Safe Responses + +`TypedResults` provides compile-time guarantees and automatic OpenAPI schema generation. + +```csharp +// GOOD — TypedResults with union return type +private static async Task, NotFound, ValidationProblem>> GetProduct( + Guid id, + AppDbContext db, + CancellationToken ct) +{ + var product = await db.Products.FindAsync([id], ct); + return product is not null + ? TypedResults.Ok(product) + : TypedResults.NotFound(); +} +``` + +### Parameter Binding + +.NET 10 minimal APIs bind parameters from route, query, header, body, and DI automatically. + +```csharp +// Route parameters +app.MapGet("/orders/{id:guid}", (Guid id) => ...); + +// Query parameters (nullable = optional) +app.MapGet("/orders", (int page, int? pageSize, string? status) => ...); + +// Complex query parameters with [AsParameters] +public record ListOrdersQuery(int Page = 1, int PageSize = 20, string? Status = null); +app.MapGet("/orders", ([AsParameters] ListOrdersQuery query) => ...); + +// Header binding +app.MapGet("/orders", ([FromHeader(Name = "X-Correlation-Id")] string? correlationId) => ...); + +// DI services are auto-resolved (no attribute needed) +app.MapPost("/orders", (CreateOrderRequest request, ISender sender) => ...); +``` + +### Endpoint Filters + +Filters are the minimal API equivalent of action filters. Use them for cross-cutting concerns. + +```csharp +// Validation filter +public class ValidationFilter(IValidator validator) : IEndpointFilter +{ + public async ValueTask InvokeAsync(EndpointFilterInvocationContext context, EndpointFilterDelegate next) + { + var request = context.Arguments.OfType().FirstOrDefault(); + if (request is null) + return TypedResults.BadRequest("Request body is required."); + + var validationResult = await validator.ValidateAsync(request); + if (!validationResult.IsValid) + return TypedResults.ValidationProblem(validationResult.ToDictionary()); + + return await next(context); + } +} + +// Apply to an endpoint +group.MapPost("/", CreateOrder) + .AddEndpointFilter>(); + +// Apply to a group (affects all endpoints in the group) +group.AddEndpointFilter(); +``` + +### OpenAPI / Swagger Configuration + +.NET 10 has built-in OpenAPI support. Use it instead of Swashbuckle. + +```csharp +// Program.cs — service registration only, no endpoint wiring +builder.Services.AddOpenApi(); + +var app = builder.Build(); + +if (app.Environment.IsDevelopment()) +{ + app.MapOpenApi(); +} +app.MapEndpoints(); // auto-discovers all IEndpointGroup implementations + +// Endpoint metadata enriches the OpenAPI spec +group.MapPost("/", CreateOrder) + .WithName("CreateOrder") + .WithSummary("Create a new order") + .WithDescription("Creates a new order for the specified customer with the given line items.") + .Produces(StatusCodes.Status201Created) + .ProducesValidationProblem() + .ProducesProblem(StatusCodes.Status500InternalServerError); +``` + +### Rate Limiting + +```csharp +builder.Services.AddRateLimiter(options => +{ + options.AddFixedWindowLimiter("api", opt => + { + opt.PermitLimit = 100; + opt.Window = TimeSpan.FromMinutes(1); + }); +}); + +// Apply inside an IEndpointGroup.Map method +var group = app.MapGroup("/api/orders") + .WithTags("Orders") + .RequireRateLimiting("api"); +``` + +### Output Caching + +```csharp +builder.Services.AddOutputCache(options => +{ + options.AddBasePolicy(builder => builder.Expire(TimeSpan.FromMinutes(5))); + options.AddPolicy("ByIdCache", builder => builder + .Expire(TimeSpan.FromMinutes(10)) + .SetVaryByRouteValue("id")); +}); + +group.MapGet("/{id:guid}", GetOrder) + .CacheOutput("ByIdCache"); +``` + +## Anti-patterns + +### Don't Put Endpoints in Program.cs + +```csharp +// BAD — endpoints scattered in Program.cs +app.MapGet("/orders", async (AppDbContext db) => await db.Orders.ToListAsync()); +app.MapGet("/orders/{id}", async (Guid id, AppDbContext db) => await db.Orders.FindAsync(id)); +app.MapPost("/orders", async (Order order, AppDbContext db) => { /* ... */ }); +app.MapGet("/products", async (AppDbContext db) => await db.Products.ToListAsync()); + +// ALSO BAD — manual MapGroup calls in Program.cs (grows with every feature) +app.MapGroup("/api/orders").WithTags("Orders").MapOrderEndpoints(); +app.MapGroup("/api/products").WithTags("Products").MapProductEndpoints(); +app.MapGroup("/api/customers").WithTags("Customers").MapCustomerEndpoints(); +// Program.cs grows every time you add a feature... + +// GOOD — auto-discovered, Program.cs never changes +app.MapEndpoints(); // discovers all IEndpointGroup implementations +``` + +### Don't Use Untyped Results + +```csharp +// BAD — Results.Ok doesn't contribute to OpenAPI schema +private static async Task GetOrder(Guid id, AppDbContext db) +{ + var order = await db.Orders.FindAsync(id); + return order is not null ? Results.Ok(order) : Results.NotFound(); +} + +// GOOD — TypedResults with explicit union type +private static async Task, NotFound>> GetOrder(Guid id, AppDbContext db) +{ + var order = await db.Orders.FindAsync(id); + return order is not null ? TypedResults.Ok(order) : TypedResults.NotFound(); +} +``` + +### Don't Return Domain Entities Directly + +```csharp +// BAD — leaks internal structure, can't evolve independently +app.MapGet("/orders/{id}", async (Guid id, AppDbContext db) => + await db.Orders.Include(o => o.Items).FirstOrDefaultAsync(o => o.Id == id)); + +// GOOD — map to a response DTO +app.MapGet("/orders/{id}", async (Guid id, AppDbContext db) => +{ + var order = await db.Orders + .Where(o => o.Id == id) + .Select(o => new OrderResponse(o.Id, o.Total, o.CreatedAt)) + .FirstOrDefaultAsync(); + return order is not null ? TypedResults.Ok(order) : TypedResults.NotFound(); +}); +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| New HTTP API | `IEndpointGroup` per feature + `app.MapEndpoints()` auto-discovery | +| Existing MVC project | Keep controllers, migrate incrementally | +| OpenAPI documentation | Use `TypedResults` + `.WithName()` + `.WithSummary()` | +| Request validation | Endpoint filter with FluentValidation | +| Authentication/authorization | `.RequireAuthorization("PolicyName")` on group or endpoint | +| Rate limiting | `AddRateLimiter` + `.RequireRateLimiting()` | +| Response caching | `AddOutputCache` + `.CacheOutput()` | +| Complex model binding | `[AsParameters]` with a record type | diff --git a/.opencode/skills/model-selection/SKILL.md b/.opencode/skills/model-selection/SKILL.md new file mode 100644 index 00000000..966bb062 --- /dev/null +++ b/.opencode/skills/model-selection/SKILL.md @@ -0,0 +1,193 @@ +--- +name: model-selection +description: > + Strategic Claude model selection for .NET development workflows. Guides when + to use Opus 4.6 (deep reasoning, architecture, ambiguous problems) vs Sonnet 4.6 + (throughput, large context, routine implementation) vs Haiku 4.5 (fast, cheap + subagent tasks). Covers model switching workflows, subagent model assignment, + and cost-effective task routing. Load this skill when choosing models for tasks, + optimizing costs, working with subagents, or when the user mentions "model", + "Opus", "Sonnet", "Haiku", "which model", "cost", "switch model", or "fast mode". +--- + +# Model Selection + +## Core Principles + +1. **Match model to complexity, not size** — A 50-file refactor that follows a clear pattern is a Sonnet task (high throughput, simple logic). A 3-file architecture decision with trade-offs is an Opus task (deep reasoning). File count and complexity are orthogonal. + +2. **Sonnet is the workhorse** — 80% of .NET development tasks are routine: implement a feature following an established pattern, write tests, fix a known bug, run scaffolding. Sonnet 4.6 handles all of these at higher speed and lower cost. + +3. **Opus is the architect** — Use Opus 4.6 for tasks that require weighing trade-offs, reasoning about system design, debugging subtle issues, or making decisions with incomplete information. Opus excels when the answer isn't obvious. + +4. **Context window is a budget, not a dumping ground** — Sonnet 4.6's large context window enables working with big codebases but doesn't mean you should load everything. Apply `context-discipline` principles regardless of model. A focused Sonnet session outperforms a bloated one. + +5. **Haiku for fire-and-forget subagents** — When a subagent does a simple lookup, runs a script, or fetches information, Haiku 4.5 is fast and cheap. Reserve heavier models for subagents that need to reason. + +## Patterns + +### Task Complexity Assessment + +Classify each task to select the right model: + +``` +ROUTINE TASKS → Sonnet 4.6 +- Implement a feature following an existing pattern +- Write tests for existing code +- Fix a bug with a clear stack trace +- Add a new endpoint matching the project's convention +- Run scaffolding or code generation +- Apply a known refactoring pattern +- Format, lint, or fix build errors +- Write documentation from existing code + +COMPLEX TASKS → Opus 4.6 +- Design a new module or subsystem from scratch +- Choose between architecture approaches (VSA vs CA vs DDD) +- Debug a subtle issue with no clear stack trace +- Refactor with trade-offs (performance vs readability, consistency vs simplicity) +- Review architecture for design flaws +- Make decisions with incomplete or conflicting requirements +- Untangle complex dependencies or circular references +- Write a migration strategy for breaking changes + +SIMPLE TASKS → Haiku 4.5 (subagents only) +- Look up a file path or symbol location +- Run a build/test and report results +- Search for a pattern across files +- Summarize a file or module +- Format or validate data +``` + +### Model Switching Workflow + +The most effective pattern: Opus plans, Sonnet executes, Opus reviews. + +``` +PHASE 1: PLAN (Opus 4.6) +├── Analyze requirements and constraints +├── Identify architectural trade-offs +├── Design the approach with rationale +├── Define acceptance criteria +└── Output: detailed implementation plan + +PHASE 2: EXECUTE (Sonnet 4.6) +├── Implement following the Opus plan +├── Write code, tests, configurations +├── Run build and test verification +├── Handle routine issues (compilation errors, test fixes) +└── Output: working implementation + +PHASE 3: REVIEW (Opus 4.6) +├── Review implementation against the plan +├── Check for subtle issues (race conditions, N+1, security) +├── Evaluate architectural compliance +├── Suggest refinements +└── Output: approval or specific revision requests +``` + +How to switch in practice: + +``` +Claude Code CLI: +- /model opus → Switch to Opus 4.6 (planning/review phases) +- /model sonnet → Switch to Sonnet 4.6 (implementation phase) +- /model auto → Let Claude Code choose based on task + +Toggle fast mode: +- /fast → Toggle fast mode (Opus fast output for throughput) +``` + +### Subagent Model Assignment + +Assign models to subagents based on task complexity: + +``` +SUBAGENT: "Find all authentication middleware in the project" +→ MODEL: Haiku 4.5 +→ WHY: Simple search task, no reasoning required + +SUBAGENT: "Run dotnet test and summarize failures" +→ MODEL: Haiku 4.5 +→ WHY: Execute command, parse output, no complex analysis + +SUBAGENT: "Analyze the dependency graph for circular references and suggest fixes" +→ MODEL: Sonnet 4.6 +→ WHY: Needs to understand project structure and propose solutions + +SUBAGENT: "Review this PR for architectural issues and security vulnerabilities" +→ MODEL: Opus 4.6 +→ WHY: Deep reasoning about trade-offs, subtle issue detection + +SUBAGENT: "Summarize what the Orders module does" +→ MODEL: Haiku 4.5 or Sonnet 4.6 +→ WHY: Haiku for a quick overview, Sonnet if the module is complex +``` + +## Anti-patterns + +### Using Opus for Simple Tasks + +``` +// BAD — Opus for a routine CRUD endpoint +Using Opus 4.6 to implement GetOrderById following the exact same pattern +as the existing GetCustomerById. No decisions to make, just pattern replication. +*Slower and more expensive than needed* + +// GOOD — Sonnet for pattern replication +Using Sonnet 4.6 to implement GetOrderById. The pattern is established, +the code is straightforward, and Sonnet executes it faster. +``` + +### Using Sonnet for Architecture Decisions + +``` +// BAD — Sonnet for "should we use VSA or Clean Architecture?" +Sonnet gives a reasonable answer but may miss nuanced trade-offs +about team size, domain complexity, and long-term maintenance. +*The wrong architecture costs months of refactoring* + +// GOOD — Opus for architectural decisions +Opus weighs team size, domain complexity, current codebase patterns, +and long-term implications. The architecture decision is worth the +extra reasoning power. +``` + +### Same Model for All Subagents + +``` +// BAD — all subagents use Opus +5 subagents running Opus 4.6: +- "Find OrderService.cs" (Haiku could do this) +- "Run dotnet test" (Haiku could do this) +- "Summarize the Catalog module" (Haiku/Sonnet could do this) +- "Analyze circular dependencies" (Sonnet is sufficient) +- "Review architecture for security issues" (Opus is appropriate) +*4 out of 5 subagents are using more model than needed* + +// GOOD — model matches subagent task +- "Find OrderService.cs" → Haiku 4.5 +- "Run dotnet test" → Haiku 4.5 +- "Summarize the Catalog module" → Haiku 4.5 +- "Analyze circular dependencies" → Sonnet 4.6 +- "Review architecture for security issues" → Opus 4.6 +``` + +## Decision Guide + +| Scenario | Model | Rationale | +|----------|-------|-----------| +| Plan a new feature or module | Opus 4.6 | Requires weighing trade-offs | +| Implement a feature following existing patterns | Sonnet 4.6 | Pattern replication, high throughput | +| Debug a subtle intermittent issue | Opus 4.6 | Requires deep reasoning about state/timing | +| Fix a compilation error | Sonnet 4.6 | Clear error, mechanical fix | +| Write tests for existing code | Sonnet 4.6 | Test patterns are established | +| Architecture review / PR review | Opus 4.6 | Subtle issues need deep analysis | +| Code review for anti-patterns | Sonnet 4.6 | Pattern matching, well-defined rules | +| Refactor across many files (same pattern) | Sonnet 4.6 | Volume + consistency, not deep reasoning | +| Design database schema from requirements | Opus 4.6 | Normalization trade-offs, domain modeling | +| Subagent: file lookup or search | Haiku 4.5 | Simple task, fast and cheap | +| Subagent: summarize a module | Haiku 4.5 | Straightforward reading + compression | +| Subagent: analyze dependencies | Sonnet 4.6 | Needs to reason about structure | +| Working in a very large codebase | Sonnet 4.6 | Large context window + discipline | +| End-of-day wrap-up / handoff | Sonnet 4.6 | Structured capture, no deep reasoning | diff --git a/.opencode/skills/modern-csharp/SKILL.md b/.opencode/skills/modern-csharp/SKILL.md new file mode 100644 index 00000000..20c95ef5 --- /dev/null +++ b/.opencode/skills/modern-csharp/SKILL.md @@ -0,0 +1,181 @@ +--- +name: modern-csharp +description: > + Modern C# language features for .NET 10 and C# 14. Covers primary constructors, + collection expressions, the field keyword, extension members, records, pattern + matching, spans, and raw string literals. + Load this skill when writing any new C# code, reviewing existing code for + modernization, using "modern C#", "C# 14", "primary constructor", "collection + expression", "records", "pattern matching", "span", "field keyword", or + "extension members". Always loaded as the baseline for all agents. +--- + +# Modern C# (C# 14 / .NET 10) + +## Core Principles + +1. **Use the newest stable features** — C# 14 is the target. Prefer language-level constructs over library workarounds. +2. **Readability over cleverness** — Pattern matching and expression-bodied members improve readability when used appropriately; deeply nested patterns do not. +3. **Value types where possible** — Prefer `record struct`, `Span`, and stack allocation to reduce GC pressure. +4. **Immutability by default** — Use `record`, `readonly`, `init`, and `required` to make illegal states unrepresentable. + +## Patterns + +### Well-Known Features Quick Reference + +| Feature | Usage | Example | +|---------|-------|---------| +| Primary constructors | DI injection, eliminate field assignments | `public class OrderService(IOrderRepo repo, TimeProvider clock) { }` | +| Collection expressions | `[]` for all collection types + spread | `List names = ["Alice", "Bob"];` / `int[] all = [..a, ..b, 99];` | +| Records | DTOs, value objects, immutable data | `public record CreateOrderRequest(string CustomerId, List Items);` | +| `readonly record struct` | Small stack-allocated value types | `public readonly record struct Money(decimal Amount, string Currency);` | +| Pattern matching | Switch expressions, list/property patterns | `order switch { { Total: > 1000 } => "Premium", _ => "Standard" };` | +| List patterns | Deconstruct arrays/lists | `items switch { [] => "Empty", [var x] => $"One: {x}", [var f, .., var l] => $"{f}..{l}" };` | +| `Span` | Zero-allocation slicing | `ReadOnlySpan trimmed = input.Trim(); int.TryParse(trimmed[4..], out id);` | +| Raw string literals | Multi-line SQL, JSON, XML | `var sql = """ SELECT ... """;` / interpolated: `$$""" {"id": "{{id}}"} """;` | +| `required` members | Enforce initialization | `public required string ConnectionString { get; init; }` | +| `is` pattern + extraction | Null/type/property check | `if (result is { IsSuccess: true, Value: var order }) { ... }` | + +### The `field` Keyword (C# 14) + +Access the auto-generated backing field in property accessors without declaring it manually. + +```csharp +// GOOD — field keyword for validation in auto-property +public class Product +{ + public string Name + { + get => field; + set => field = value?.Trim() ?? throw new ArgumentNullException(nameof(value)); + } + + public decimal Price + { + get => field; + set => field = value >= 0 ? value : throw new ArgumentOutOfRangeException(nameof(value)); + } +} +``` + +#### Lazy Initialization with `field` + +```csharp +public class ProductCatalog +{ + // Lazy-load on first access — no manual Lazy or backing field + public IReadOnlyList Products + { + get => field ??= LoadProducts(); + } + + private static List LoadProducts() => /* expensive load */; +} +``` + +#### Change Notification with `field` + +```csharp +// INotifyPropertyChanged without manual backing fields +public class OrderViewModel : INotifyPropertyChanged +{ + public event PropertyChangedEventHandler? PropertyChanged; + + public string CustomerName + { + get => field; + set + { + if (field == value) return; + field = value; + PropertyChanged?.Invoke(this, new PropertyChangedEventArgs(nameof(CustomerName))); + } + } = ""; + + public decimal Total + { + get => field; + set + { + if (field == value) return; + field = value; + PropertyChanged?.Invoke(this, new PropertyChangedEventArgs(nameof(Total))); + } + } +} +``` + +### Extension Members (C# 14) + +Extension members replace static extension method classes with a cleaner syntax. + +```csharp +// GOOD — extension members (C# 14) +public extension OrderExtensions for Order +{ + public decimal TotalWithTax => Total * 1.2m; + + public bool IsHighValue => Total > 1000m; + + public string ToSummary() => $"Order #{Id}: {Total:C} ({Items.Count} items)"; +} +``` + +## Anti-patterns + +### Don't Use Obsolete Patterns When Modern Alternatives Exist + +```csharp +// BAD — manual backing field when field keyword works +private string _name; +public string Name +{ + get => _name; + set => _name = value ?? throw new ArgumentNullException(); +} + +// BAD — old-style collection initialization +var list = new List() { 1, 2, 3 }; + +// BAD — Tuple instead of record for domain types +(string Name, decimal Price) product = ("Widget", 9.99m); +// GOOD — record +public record Product(string Name, decimal Price); +``` + +### Don't Over-pattern-match + +```csharp +// BAD — deeply nested pattern that's hard to read +if (order is { Customer: { Address: { Country: { Code: "US" } } } }) + +// GOOD — extract to a clear method or use sequential checks +if (order.Customer.Address.Country.Code == "US") +``` + +### Don't Use `var` When the Type Is Not Obvious + +```csharp +// BAD — what type is this? +var result = Process(order); + +// GOOD — explicit type when not obvious +Result result = Process(order); +// Also GOOD — var is fine when type is apparent +var orders = new List(); +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| DTO / API contract | `record` (reference type) | +| Small value object (2-3 fields) | `readonly record struct` | +| Service with DI | Primary constructor | +| Collection creation | Collection expression `[]` | +| Property with validation | `field` keyword | +| Multi-line string (SQL, JSON) | Raw string literal `"""` | +| Slicing strings/arrays | `Span` | +| Type checking + extraction | Pattern matching with `is` / `switch` | +| Enforced initialization | `required` modifier | +| Adding methods to external types | Extension members | diff --git a/.opencode/skills/openapi/SKILL.md b/.opencode/skills/openapi/SKILL.md new file mode 100644 index 00000000..db66efde --- /dev/null +++ b/.opencode/skills/openapi/SKILL.md @@ -0,0 +1,287 @@ +--- +name: openapi +description: > + Built-in OpenAPI support for .NET 10 applications. Covers document generation, + transformers, TypedResults metadata, security schemes, XML comments, build-time + generation, and multiple document support. No Swashbuckle needed. + Load this skill when setting up API documentation, customizing OpenAPI output, + adding security schemes to docs, or when the user mentions "OpenAPI", + "AddOpenApi", "MapOpenApi", "document transformer", "operation transformer", + "schema transformer", "OpenAPI 3.1", "API documentation", "Swashbuckle + replacement", "Produces", "WithSummary", "WithDescription", "ProblemDetails", + "Kiota", or "client generation". +--- + +# OpenAPI + +## Core Principles + +1. **Built-in, not Swashbuckle** — .NET 10 ships `Microsoft.AspNetCore.OpenApi` as the official, framework-maintained OpenAPI solution. Swashbuckle was removed from templates in .NET 9 and is no longer recommended. +2. **TypedResults drive the schema** — `TypedResults.Ok()` automatically generates correct OpenAPI response schemas. `Results.Ok()` does not. Always use `TypedResults`. +3. **Transformers over workarounds** — Document, operation, and schema transformers compose cleanly. Use them for security schemes, global responses, and schema customization. +4. **Metadata on every endpoint** — Use `.WithName()`, `.WithSummary()`, `.WithTags()` on every endpoint. This metadata feeds directly into the OpenAPI spec and client generators. + +## Patterns + +### Basic Setup + +```csharp +var builder = WebApplication.CreateBuilder(args); +builder.Services.AddOpenApi(); + +var app = builder.Build(); + +if (app.Environment.IsDevelopment()) +{ + app.MapOpenApi(); // Serves at /openapi/v1.json +} +``` + +### Endpoint Metadata + +```csharp +group.MapPost("/", CreateOrder) + .WithName("CreateOrder") + .WithSummary("Create a new order") + .WithDescription("Creates a new order for the specified customer.") + .Produces(StatusCodes.Status201Created) + .ProducesValidationProblem() + .ProducesProblem(StatusCodes.Status500InternalServerError); +``` + +With `TypedResults`, response metadata is inferred automatically: + +```csharp +static async Task, ValidationProblem>> CreateOrder( + CreateOrderRequest request, ISender sender, CancellationToken ct) +{ + var result = await sender.Send(new CreateOrder.Command(request), ct); + return result.IsSuccess + ? TypedResults.Created($"/api/orders/{result.Value.Id}", result.Value) + : TypedResults.ValidationProblem(result.Errors); +} +``` + +### Bearer Token Security Scheme + +```csharp +builder.Services.AddOpenApi(options => +{ + options.AddDocumentTransformer(); +}); + +internal sealed class BearerSecuritySchemeTransformer( + IAuthenticationSchemeProvider authSchemeProvider) : IOpenApiDocumentTransformer +{ + public async Task TransformAsync(OpenApiDocument document, + OpenApiDocumentTransformerContext context, CancellationToken ct) + { + var schemes = await authSchemeProvider.GetAllSchemesAsync(); + if (!schemes.Any(s => s.Name == "Bearer")) + return; + + document.Components ??= new OpenApiComponents(); + document.Components.SecuritySchemes = new Dictionary + { + ["Bearer"] = new OpenApiSecurityScheme + { + Type = SecuritySchemeType.Http, + Scheme = "bearer", + BearerFormat = "JWT", + In = ParameterLocation.Header + } + }; + + foreach (var operation in document.Paths.Values.SelectMany(p => p.Operations)) + { + operation.Value.Security ??= []; + operation.Value.Security.Add(new OpenApiSecurityRequirement + { + [new OpenApiSecuritySchemeReference("Bearer", document)] = [] + }); + } + } +} +``` + +### Document Info Transformer + +```csharp +builder.Services.AddOpenApi(options => +{ + options.AddDocumentTransformer((document, context, ct) => + { + document.Info = new() + { + Title = "Checkout API", + Version = "v1", + Description = "API for processing orders and payments." + }; + return Task.CompletedTask; + }); +}); +``` + +### Multiple OpenAPI Documents + +```csharp +builder.Services.AddOpenApi("v1"); +builder.Services.AddOpenApi("internal", options => +{ + options.AddDocumentTransformer(); +}); + +// Endpoints choose their document via WithGroupName +app.MapGet("/public", () => "Hello").WithGroupName("v1"); +app.MapGet("/admin", () => "Secret").WithGroupName("internal"); +``` + +Endpoints without `.WithGroupName()` appear in all documents. + +### XML Documentation Comments (.NET 10) + +Enable in the project file — the source generator extracts ``, ``, `` tags automatically: + +```xml + + true + +``` + +```csharp +/// Retrieves a project board by ID. +/// The project board ID. +/// Returns the project board. +/// Board not found. +static async Task, NotFound>> GetBoard(int id, AppDbContext db) +{ + var board = await db.Boards.FindAsync(id); + return board is not null ? TypedResults.Ok(board) : TypedResults.NotFound(); +} +``` + +XML comments on lambdas are not captured by the compiler. Use named methods. + +### Schema Transformer + +```csharp +options.AddSchemaTransformer((schema, context, ct) => +{ + if (context.JsonTypeInfo.Type == typeof(decimal)) + { + schema.Format = "decimal"; + } + return Task.CompletedTask; +}); +``` + +### Per-Endpoint Operation Transformer (.NET 10) + +```csharp +app.MapGet("/old", () => "deprecated") + .AddOpenApiOperationTransformer((operation, context, ct) => + { + operation.Deprecated = true; + return Task.CompletedTask; + }); +``` + +### Build-Time Document Generation + +```xml + + + . + +``` + +The spec file is generated in the output directory during build. + +### YAML Endpoint (.NET 10) + +```csharp +app.MapOpenApi("/openapi/{documentName}.yaml"); +``` + +## Anti-patterns + +### Don't Use Swashbuckle for New Projects + +```csharp +// BAD — removed from .NET 9+ templates, maintenance concerns +builder.Services.AddSwaggerGen(); +app.UseSwagger(); +app.UseSwaggerUI(); + +// GOOD — built-in OpenAPI +builder.Services.AddOpenApi(); +app.MapOpenApi(); +``` + +### Don't Use WithOpenApi() in .NET 10 + +```csharp +// BAD — deprecated, produces ASPDEPR002 warning +app.MapGet("/", () => "hello").WithOpenApi(op => { op.Deprecated = true; return op; }); + +// GOOD — use per-endpoint operation transformer +app.MapGet("/", () => "hello") + .AddOpenApiOperationTransformer((op, ctx, ct) => + { + op.Deprecated = true; + return Task.CompletedTask; + }); +``` + +### Don't Use Untyped Results + +```csharp +// BAD — Results.Ok doesn't contribute to OpenAPI schema +static async Task GetOrder(Guid id, AppDbContext db) +{ + var order = await db.Orders.FindAsync(id); + return order is not null ? Results.Ok(order) : Results.NotFound(); +} + +// GOOD — TypedResults with union return type +static async Task, NotFound>> GetOrder(Guid id, AppDbContext db) +{ + var order = await db.Orders.FindAsync(id); + return order is not null ? TypedResults.Ok(order) : TypedResults.NotFound(); +} +``` + +### Don't Skip WithName on Endpoints + +```csharp +// BAD — client generators produce poor method names without operationId +group.MapGet("/{id:guid}", GetOrder); + +// GOOD — operationId feeds into generated client method names +group.MapGet("/{id:guid}", GetOrder).WithName("GetOrder"); +``` + +### Don't Use OpenApiAny in .NET 10 + +```csharp +// BAD — OpenApiAny types removed in Microsoft.OpenApi v2.x +schema.Example = new OpenApiString("2025-01-01"); + +// GOOD — use JsonNode from System.Text.Json.Nodes +schema.Example = JsonValue.Create("2025-01-01"); +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| New API project | `AddOpenApi()` + `MapOpenApi()` (built-in) | +| API documentation UI | Scalar (`MapScalarApiReference()`) | +| Security schemes in docs | Document transformer with `IOpenApiDocumentTransformer` | +| Response documentation | `TypedResults` with union return types | +| XML doc integration | `true` | +| Multiple API versions | Multiple `AddOpenApi("v1")` calls + `WithGroupName()` | +| Client code generation | Kiota (Microsoft recommended) or NSwag | +| Build-time spec | `Microsoft.Extensions.ApiDescription.Server` package | +| OpenAPI version | 3.1 (default in .NET 10), force 3.0 if consumers require it | +| Per-endpoint customization | `.AddOpenApiOperationTransformer()` on the endpoint | diff --git a/.opencode/skills/opentelemetry/SKILL.md b/.opencode/skills/opentelemetry/SKILL.md new file mode 100644 index 00000000..6db30542 --- /dev/null +++ b/.opencode/skills/opentelemetry/SKILL.md @@ -0,0 +1,271 @@ +--- +name: opentelemetry +description: > + OpenTelemetry observability for .NET 10 applications. Covers traces, metrics, + and logs using the OpenTelemetry SDK with OTLP export. Includes custom + ActivitySource, IMeterFactory metrics, resource configuration, and Aspire + Dashboard integration. + Load this skill when setting up distributed tracing, custom metrics, OTLP + export, or when the user mentions "OpenTelemetry", "OTLP", "traces", "spans", + "Activity", "ActivitySource", "metrics", "IMeterFactory", "Meter", "Counter", + "Histogram", "Gauge", "telemetry", "observability", "distributed tracing", + "OTEL", or "Aspire Dashboard". +--- + +# OpenTelemetry + +## Core Principles + +1. **Three pillars, one setup** — Configure traces, metrics, and logs through a single `AddOpenTelemetry()` call. Use `UseOtlpExporter()` for cross-cutting export to any OTLP-compatible backend. +2. **Use `IMeterFactory` for metrics** — Never create `Meter` instances with `new`. The factory manages lifetime through DI and prevents leaks. +3. **Null-safe activities** — `StartActivity()` returns `null` when no listener is attached. Always use `?.` when setting tags or events. +4. **Environment variables over code** — Use `OTEL_EXPORTER_OTLP_ENDPOINT` and `OTEL_SERVICE_NAME` so deployments control telemetry routing without code changes. +5. **Low-cardinality metric tags** — Keep metric tag combinations under ~1000 per instrument. Use span attributes or logs for high-cardinality data like user IDs or request IDs. + +## Patterns + +### Full Setup with All Three Signals + +```csharp +// Program.cs +var builder = WebApplication.CreateBuilder(args); + +builder.Services.AddOpenTelemetry() + .ConfigureResource(resource => resource + .AddService( + serviceName: builder.Environment.ApplicationName, + serviceVersion: "1.0.0")) + .WithTracing(tracing => tracing + .AddAspNetCoreInstrumentation() + .AddHttpClientInstrumentation() + .AddEntityFrameworkCoreInstrumentation() + .AddSource("MyApp.Orders")) + .WithMetrics(metrics => metrics + .AddAspNetCoreInstrumentation() + .AddHttpClientInstrumentation() + .AddRuntimeInstrumentation() + .AddMeter("MyApp.Orders")) + .WithLogging(logging => logging + .AddOtlpExporter()); + +// Cross-cutting OTLP export for traces + metrics (configured via env vars) +builder.Services.AddOpenTelemetry() + .UseOtlpExporter(); +``` + +The OTLP endpoint defaults to `http://localhost:4317` (gRPC). Override via: +``` +OTEL_EXPORTER_OTLP_ENDPOINT=http://collector:4317 +OTEL_SERVICE_NAME=MyApp.Api +``` + +### Custom Metrics with IMeterFactory + +Register a metrics class as a singleton. `IMeterFactory` handles `Meter` disposal through DI. + +```csharp +public sealed class OrderMetrics +{ + private readonly Counter _ordersCreated; + private readonly Histogram _orderDuration; + private readonly UpDownCounter _activeOrders; + private readonly Gauge _queueDepth; + + public OrderMetrics(IMeterFactory meterFactory) + { + var meter = meterFactory.Create("MyApp.Orders"); + + _ordersCreated = meter.CreateCounter( + "myapp.orders.created", "{orders}", "Number of orders created"); + + _orderDuration = meter.CreateHistogram( + "myapp.orders.duration", "s", "Order processing duration", + advice: new InstrumentAdvice + { + HistogramBucketBoundaries = [0.01, 0.05, 0.1, 0.5, 1, 5, 10] + }); + + _activeOrders = meter.CreateUpDownCounter( + "myapp.orders.active", "{orders}", "Currently active orders"); + + _queueDepth = meter.CreateGauge( + "myapp.orders.queue_depth", "{items}", "Current queue depth"); + } + + public void OrderCreated() => _ordersCreated.Add(1); + public void RecordDuration(double seconds) => _orderDuration.Record(seconds); + public void OrderStarted() => _activeOrders.Add(1); + public void OrderCompleted() => _activeOrders.Add(-1); + public void SetQueueDepth(double depth) => _queueDepth.Record(depth); +} + +// Registration +builder.Services.AddSingleton(); +``` + +### Multi-Dimensional Metric Tags + +Three or fewer tags are allocation-free. For more, use `TagList`. + +```csharp +// Allocation-free (3 or fewer tags) +_ordersCreated.Add(1, + new KeyValuePair("order.type", "standard"), + new KeyValuePair("payment.method", "credit_card")); + +// 4+ tags — use TagList to avoid allocations +var tags = new TagList +{ + { "order.type", "standard" }, + { "payment.method", "credit_card" }, + { "region", "us-east" }, + { "priority", "high" } +}; +_ordersCreated.Add(1, tags); +``` + +### Custom ActivitySource for Distributed Tracing + +```csharp +public sealed class OrderService(ILogger logger) +{ + private static readonly ActivitySource Source = new("MyApp.Orders"); + + public async Task ProcessOrderAsync(CreateOrderRequest request, CancellationToken ct) + { + using var activity = Source.StartActivity("ProcessOrder", ActivityKind.Internal); + activity?.SetTag("order.customer_id", request.CustomerId); + + try + { + await ValidateOrder(request, ct); + activity?.AddEvent(new ActivityEvent("OrderValidated")); + + var order = await SaveOrder(request, ct); + activity?.SetTag("order.id", order.Id.ToString()); + activity?.SetStatus(ActivityStatusCode.Ok); + return order; + } + catch (Exception ex) + { + activity?.SetStatus(ActivityStatusCode.Error, ex.Message); + activity?.RecordException(ex); + throw; + } + } +} +``` + +Register the source: `.AddSource("MyApp.Orders")` in the tracing builder. + +### Aspire Dashboard for Local Development + +Run the standalone Aspire Dashboard without Aspire orchestration: + +```bash +docker run --rm -it -p 18888:18888 -p 4317:18889 \ + mcr.microsoft.com/dotnet/aspire-dashboard:latest +``` + +Then point your app at it: +``` +OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 +``` + +Dashboard UI is at `http://localhost:18888`. + +### Source-Generated Logging with OTel + +For maximum performance, use `[LoggerMessage]` — eliminates boxing and allocations. + +```csharp +public partial class OrderService(ILogger logger) +{ + [LoggerMessage(Level = LogLevel.Information, + Message = "Processing order {OrderId} for customer {CustomerId}")] + partial void LogOrderProcessing(Guid orderId, Guid customerId); +} +``` + +OpenTelemetry logging automatically includes `TraceId` and `SpanId` when an `Activity` is current. + +## Anti-patterns + +### Don't Create Meters Per Request + +```csharp +// BAD — new Meter per request causes memory leaks +public void HandleRequest() +{ + var meter = new Meter("MyApp"); + meter.CreateCounter("requests").Add(1); +} + +// GOOD — singleton via IMeterFactory +public class MyMetrics(IMeterFactory meterFactory) +{ + private readonly Counter _requests = + meterFactory.Create("MyApp").CreateCounter("myapp.requests"); + public void RequestHandled() => _requests.Add(1); +} +``` + +### Don't Skip Null Checks on Activity + +```csharp +// BAD — NullReferenceException when no listener is attached +using var activity = source.StartActivity("Work"); +activity.SetTag("key", "value"); + +// GOOD — null-safe +activity?.SetTag("key", "value"); +``` + +### Don't Use High-Cardinality Metric Tags + +```csharp +// BAD — unbounded cardinality causes memory explosion in collectors +_counter.Add(1, new("request.id", Guid.NewGuid().ToString())); +_counter.Add(1, new("user.id", userId)); + +// GOOD — low-cardinality dimensions only +_counter.Add(1, new("http.method", "GET"), new("http.status_code", 200)); +``` + +### Don't Mix UseOtlpExporter with AddOtlpExporter + +```csharp +// BAD — throws NotSupportedException at runtime +builder.Services.AddOpenTelemetry() + .UseOtlpExporter() + .WithTracing(t => t.AddOtlpExporter()); + +// GOOD — use one approach +builder.Services.AddOpenTelemetry().UseOtlpExporter(); +``` + +### Don't Forget to Register Custom Sources + +```csharp +// BAD — activities silently dropped (no listener registered) +var source = new ActivitySource("MyApp.Custom"); +using var activity = source.StartActivity("Work"); // null! + +// GOOD — register in the tracing builder +otel.WithTracing(t => t.AddSource("MyApp.Custom")); +otel.WithMetrics(m => m.AddMeter("MyApp.Custom")); +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Full observability setup | `AddOpenTelemetry()` with all three signals + `UseOtlpExporter()` | +| Custom business metrics | `IMeterFactory` + singleton metrics class | +| Custom trace spans | `ActivitySource` + `StartActivity()` | +| Local development backend | Aspire Dashboard standalone container | +| Production backend | OTel Collector as intermediary to Grafana/Datadog/etc. | +| Sampling in production | `OTEL_TRACES_SAMPLER=parentbased_traceidratio` with 10% ratio | +| High-performance logging | `[LoggerMessage]` source generator | +| Metric tag cardinality | Max ~1000 combinations per instrument | +| Environment configuration | `OTEL_*` env vars (also work via `appsettings.json`) | diff --git a/.opencode/skills/project-setup/SKILL.md b/.opencode/skills/project-setup/SKILL.md new file mode 100644 index 00000000..0474ffa2 --- /dev/null +++ b/.opencode/skills/project-setup/SKILL.md @@ -0,0 +1,227 @@ +--- +name: project-setup +description: > + Interactive project setup, health check, and migration workflows. + Guides developers through project initialization with customized CLAUDE.md generation, + codebase health analysis using MCP tools, and .NET version migration. + Load when: "init project", "setup project", "new project", "health check", + "analyze project", "project report", "migrate", "upgrade dotnet", + "upgrade .NET", "generate CLAUDE.md". +--- + +# Project Setup & Workflows + +## Core Principles + +1. **Interactive over passive** — Don't dump a generic template. Ask questions, gather context, then generate a customized result tailored to the specific project. +2. **MCP-driven analysis** — Use Roslyn MCP tools for health checks and migration analysis instead of reading files manually. Token-efficient and semantically accurate. +3. **Generate, don't template** — CLAUDE.md files should be fully populated with specific choices (not `[PLACEHOLDER]` values). Every section should reflect the actual project decisions. +4. **Architecture-first** — Every workflow starts by understanding or selecting the project's architecture. Architecture drives folder structure, naming, patterns, and test organization. +5. **Verify after action** — After any workflow completes (init, migration, health check), verify the result. Run builds, tests, or health checks to confirm success. + +## Patterns + +### Project Init Workflow + +Interactive conversational flow for new projects. Execute steps in order, waiting for user input at each decision point. + +**Step 1: Project Identity** +Ask: +- Project name (used for solution, namespaces, CLAUDE.md) +- Project type: API, Blazor, Worker Service, Class Library, or Modular Monolith + +**Step 2: Architecture Selection** +Delegate to the `architecture-advisor` skill: +- Run the full questionnaire (15+ questions across 6 categories) +- Recommend VSA, Clean Architecture, DDD + CA, or Modular Monolith +- Explain the rationale for the recommendation + +**Step 3: Tech Stack Selection** +Ask about each dimension with a recommended default: + +| Dimension | Options | Default | +|-----------|---------|---------| +| Database | PostgreSQL, SQL Server, SQLite | PostgreSQL | +| Auth | JWT Bearer, OIDC (Keycloak/Auth0), None | JWT Bearer | +| Caching | HybridCache, Redis, None | HybridCache | +| Messaging | Wolverine (RabbitMQ), MassTransit, None | None (add later) | +| Observability | Serilog + OpenTelemetry, Basic logging | Serilog + OTEL | +| Resilience | Polly v8 pipelines, Basic retry | Polly v8 | + +**Step 4: Generate CLAUDE.md** +Generate a customized CLAUDE.md with all choices baked in: + +```markdown +# [ProjectName] — Development Instructions + +## Architecture +This project uses [Selected Architecture]. +[Architecture-specific conventions and rules] + +## Tech Stack +- **Runtime**: .NET 10 / C# 14 +- **Database**: [Selected] with EF Core 10 +- **Auth**: [Selected] +- **Caching**: [Selected] +- **Observability**: [Selected] + +## Conventions +[Architecture-specific patterns, naming, folder structure] + +## Skills +[List of relevant skills to load based on choices] +``` + +**Step 5: Next Steps** +Suggest: +- Initial project structure with `dotnet new` commands +- Directory.Build.props with common settings +- First feature scaffold to validate the architecture + +### Health Check Workflow + +Automated codebase analysis that produces a graded report card. Run this when asked to "check health", "analyze the project", or "how's the codebase". + +**Step 1: Solution Analysis** +``` +→ get_project_graph + Analyze: project count, dependency direction, target frameworks, naming consistency +``` + +**Step 2: Anti-pattern Scan** +``` +→ detect_antipatterns (scope: solution) + Count and categorize: async void, sync-over-async, DateTime.Now, new HttpClient(), etc. +``` + +**Step 3: Compiler Diagnostics** +``` +→ get_diagnostics (severity: warning, scope: solution) + Count warnings by category: CS8600 (nullability), CS0219 (unused vars), etc. +``` + +**Step 4: Dead Code Detection** +``` +→ find_dead_code (scope: solution) + Identify unused types, methods, and properties that can be removed. +``` + +**Step 5: Test Coverage Assessment** +``` +→ get_test_coverage_map + Check: test project exists, percentage of types with corresponding tests. +``` + +**Step 6: Report Card** + +Generate a structured report: + +``` +## Codebase Health Report + +### Grade: B+ (82/100) + +| Category | Score | Issues | +|----------|-------|--------| +| Architecture | 18/20 | Clean dependency direction, 1 questionable reference | +| Anti-patterns | 14/20 | 3 DateTime.Now usages, 1 async void | +| Diagnostics | 20/20 | 0 warnings | +| Dead Code | 16/20 | 4 unused methods found | +| Test Coverage | 14/20 | 70% of types have test coverage | + +### Priority Actions +1. **Replace DateTime.Now with TimeProvider** (3 locations) — error-handling skill +2. **Fix async void** in EventService.OnMessage — critical, exceptions will be unobserved +3. **Remove dead code** — 4 unused methods in OrderService, PaymentHelper +4. **Add tests** for ShippingService, NotificationService +``` + +Grading scale: +- **A (90-100)**: Production-ready, well-maintained +- **B (75-89)**: Good shape, minor improvements needed +- **C (60-74)**: Needs attention, several areas to improve +- **D (40-59)**: Significant issues, prioritize cleanup +- **F (<40)**: Critical problems, stop feature work and fix + +### Migration Workflow + +> For complete migration workflows (EF Core, NuGet, .NET version upgrades), see the **migration-workflow** skill. + +## Anti-patterns + +### Skipping Architecture Questionnaire + +``` +# BAD — Generating CLAUDE.md without asking about the project +"Here's your CLAUDE.md with VSA architecture..." +``` + +``` +# GOOD — Running the full questionnaire first +"Let me understand your project first. What's the domain complexity? How many developers? +How important is independent deployability? ..." +→ Based on answers: "I recommend Clean Architecture because..." +``` + +### Generic CLAUDE.md with Placeholders + +```markdown + +## Architecture +This project uses [ARCHITECTURE]. +Database: [DATABASE] +Auth: [AUTH_METHOD] +``` + +```markdown + +## Architecture +This project uses Vertical Slice Architecture. +Database: PostgreSQL with EF Core 10 +Auth: JWT Bearer with ASP.NET Core Identity +``` + +### Health Check Without MCP Tools + +``` +# BAD — Reading random files and guessing at quality +"I read Program.cs and it looks fine..." +``` + +``` +# GOOD — Systematic analysis with MCP tools +→ get_project_graph: 5 projects, clean dependency direction +→ detect_antipatterns: 3 violations (2 warning, 1 error) +→ get_diagnostics: 7 warnings (5 CS8600, 2 CS0219) +→ find_dead_code: 4 unused symbols +→ get_test_coverage_map: 65% coverage +Grade: B (78/100) +``` + +### Running Migration Without a Plan + +```bash +# BAD — Just changing the TFM and hoping for the best +sed -i 's/net8.0/net10.0/g' **/*.csproj +dotnet build # 47 errors +``` + +```bash +# GOOD — Systematic migration with verification at each step +# Phase 1: Update framework → build → fix +# Phase 2: Update packages → build → fix +# Phase 3: Adopt new features → build → test +# Phase 4: Full verification +``` + +## Decision Guide + +| Scenario | Workflow | Key Tool | +|----------|----------|----------| +| New greenfield project | Project Init | architecture-advisor skill | +| Joining existing project | Health Check → Init (for CLAUDE.md) | get_project_graph | +| "How's our codebase?" | Health Check | detect_antipatterns, get_diagnostics | +| "Upgrade to .NET 10" | Migration | get_project_graph, breaking-changes.md | +| "Generate CLAUDE.md for this project" | Project Init (skip new project steps) | get_project_graph | +| Code quality declining | Health Check → set baseline → periodic re-check | All MCP tools | +| Onboarding new developers | Health Check + Init (generates CLAUDE.md documenting conventions) | convention-learner skill | diff --git a/.opencode/skills/project-structure/SKILL.md b/.opencode/skills/project-structure/SKILL.md new file mode 100644 index 00000000..55bc7133 --- /dev/null +++ b/.opencode/skills/project-structure/SKILL.md @@ -0,0 +1,205 @@ +--- +name: project-structure +description: > + .NET solution and project structure conventions. Covers .slnx format, + Directory.Build.props, Directory.Packages.props for central package management, + global usings, and naming conventions. + Load this skill when setting up a new solution, adding projects, configuring + build properties, or when the user mentions "solution structure", ".slnx", + "Directory.Build.props", "central package management", "Directory.Packages.props", + "global usings", ".editorconfig", "project layout", or "naming conventions". +--- + +# Project Structure + +## Core Principles + +1. **Central package management** — Use `Directory.Packages.props` to manage NuGet package versions in one place. No version numbers in individual `.csproj` files. +2. **Shared build properties** — Use `Directory.Build.props` for common settings (target framework, nullable, implicit usings). Don't repeat in every project. +3. **.slnx for solutions** — The new XML-based solution format is cleaner and more merge-friendly than the legacy `.sln` format. +4. **src/tests separation** — Source projects in `src/`, test projects in `tests/`. Clear boundary. + +## Patterns + +### Solution Layout + +``` +MyApp/ +├── MyApp.slnx # Solution file +├── Directory.Build.props # Shared MSBuild properties +├── Directory.Packages.props # Central package management +├── .editorconfig # Code style rules +├── .gitignore +├── global.json # SDK version pinning +├── src/ +│ ├── MyApp.Api/ # Web API (entry point) +│ │ ├── MyApp.Api.csproj +│ │ ├── Program.cs +│ │ └── Features/ +│ ├── MyApp.Domain/ # Domain entities, value objects (optional) +│ │ └── MyApp.Domain.csproj +│ └── MyApp.Infrastructure/ # EF Core, external services (optional) +│ └── MyApp.Infrastructure.csproj +└── tests/ + └── MyApp.Api.Tests/ + └── MyApp.Api.Tests.csproj +``` + +### Directory.Build.props + +```xml + + + net10.0 + 14 + enable + enable + true + true + + +``` + +### Directory.Packages.props (Central Package Management) + +```xml + + + true + + + + + + + + + + + + + + + + + + + + + + +``` + +### Project File (.csproj) with Central Package Management + +```xml + + + + + + + + + + + + + + + + + + +``` + +### global.json (SDK Pinning) + +```json +{ + "sdk": { + "version": "10.0.100", + "rollForward": "latestFeature" + } +} +``` + +### .slnx Solution Format + +```xml + + + + + + + + + + +``` + +### Naming Conventions + +| Element | Convention | Example | +|---------|-----------|---------| +| Solution | `CompanyName.AppName` or `AppName` | `MyApp.slnx` | +| Project | `AppName.Layer` | `MyApp.Api`, `MyApp.Domain` | +| Namespace | Matches folder path | `MyApp.Api.Features.Orders` | +| Feature folder | PascalCase, plural | `Features/Orders/` | +| Test project | `ProjectName.Tests` | `MyApp.Api.Tests` | + +## Anti-patterns + +### Don't Scatter Package Versions + +```xml + + + + + + + +``` + +### Don't Repeat Build Properties + +```xml + + + net10.0 + enable + enable + + + +``` + +### Don't Mix Source and Test Projects + +``` +# BAD — tests mixed with source +src/ + MyApp.Api/ + MyApp.Api.Tests/ # test project in src/ + +# GOOD — clear separation +src/ + MyApp.Api/ +tests/ + MyApp.Api.Tests/ +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| New solution | `.slnx` format | +| Package version management | `Directory.Packages.props` (central) | +| Shared build settings | `Directory.Build.props` | +| SDK version pinning | `global.json` | +| Common using directives | Global usings in `Directory.Build.props` | +| Small API (1-2 devs) | Single project (`MyApp.Api`) | +| Medium API (3-5 devs) | 2-3 projects (`Api`, `Domain`, `Infrastructure`) | +| Large / modular app | Module-per-project with shared `Contracts` | diff --git a/.opencode/skills/resilience/SKILL.md b/.opencode/skills/resilience/SKILL.md new file mode 100644 index 00000000..2158383c --- /dev/null +++ b/.opencode/skills/resilience/SKILL.md @@ -0,0 +1,344 @@ +--- +name: resilience +description: > + Resilience patterns for .NET 10 applications using Polly v8. + Covers retry, circuit breaker, timeout, fallback, rate limiter, hedging, + and composing resilience pipelines. + Load this skill when implementing retry logic, circuit breakers, handling + transient failures, or when the user mentions "Polly", "resilience", + "retry", "circuit breaker", "timeout", "fallback", "rate limit", + "hedging", "transient fault", "HttpClient resilience", or "resilience pipeline". +--- + +# Resilience + +## Core Principles + +1. **Polly v8 resilience pipelines, not v7 policies** — Polly v8 replaced `Policy` with `ResiliencePipeline`. Never use `PolicyBuilder`, `Policy.Handle<>()`, or `ISyncPolicy`. The new API is composable, type-safe, and integrates natively with `IHttpClientFactory`. +2. **Configure via `AddResilienceHandler`, not manual wrapping** — For HTTP calls, use `Microsoft.Extensions.Http.Resilience` which adds pipelines directly to `HttpClient` via DI. No manual `ExecuteAsync` wrapping. +3. **Compose strategies, don't nest them** — A single `ResiliencePipeline` can chain retry + circuit breaker + timeout. Strategies execute outer-to-inner (first added = outermost). No need for nested try/catch or manual orchestration. +4. **Always set timeouts** — Every external call needs a timeout. Use Polly's `AddTimeout()` as the innermost strategy so it applies per-attempt, and optionally an outer timeout for total elapsed time. +5. **Instrument everything** — Polly v8 emits `Metering` events and supports `TelemetryOptions` for OpenTelemetry. Use them to monitor retry rates, circuit breaker state, and timeout frequency. + +## Patterns + +### HTTP Client Resilience (Recommended Default) + +```csharp +// Program.cs — Standard resilience handler covers 90% of use cases +builder.Services.AddHttpClient(client => +{ + client.BaseAddress = new Uri("https://api.payments.example.com"); +}) +.AddStandardResilienceHandler(); // Retry + circuit breaker + timeout out of the box + +// That's it. The standard handler configures: +// - Retry: 3 attempts, exponential backoff, jitter +// - Circuit breaker: 10% failure ratio over 30s sampling, 30s break +// - Attempt timeout: 10s per attempt +// - Total request timeout: 30s +``` + +**Why**: `AddStandardResilienceHandler()` from `Microsoft.Extensions.Http.Resilience` applies production-ready defaults. Override only when you need different thresholds. + +### Custom HTTP Resilience Configuration + +```csharp +builder.Services.AddHttpClient(client => +{ + client.BaseAddress = new Uri("https://api.catalog.example.com"); +}) +.AddResilienceHandler("catalog", builder => +{ + // Total timeout — outermost, caps total elapsed time + builder.AddTimeout(TimeSpan.FromSeconds(15)); + + // Retry — exponential backoff with jitter + builder.AddRetry(new HttpRetryStrategyOptions + { + MaxRetryAttempts = 3, + BackoffType = DelayBackoffType.Exponential, + UseJitter = true, + Delay = TimeSpan.FromMilliseconds(500), + ShouldHandle = static args => ValueTask.FromResult( + args.Outcome.Result?.StatusCode is HttpStatusCode.RequestTimeout + or HttpStatusCode.TooManyRequests + or HttpStatusCode.ServiceUnavailable + || args.Outcome.Exception is HttpRequestException) + }); + + // Circuit breaker — prevent cascading failures + builder.AddCircuitBreaker(new HttpCircuitBreakerStrategyOptions + { + FailureRatio = 0.5, + SamplingDuration = TimeSpan.FromSeconds(10), + MinimumThroughput = 10, + BreakDuration = TimeSpan.FromSeconds(30) + }); + + // Per-attempt timeout — innermost + builder.AddTimeout(TimeSpan.FromSeconds(5)); +}); +``` + +**Why**: Named resilience handlers let you tune per-service. The order matters: total timeout > retry > circuit breaker > attempt timeout. + +### Non-HTTP Resilience Pipeline + +```csharp +// For database calls, message queues, or any non-HTTP operation +builder.Services.AddResiliencePipeline("database", builder => +{ + builder + .AddRetry(new RetryStrategyOptions + { + MaxRetryAttempts = 3, + BackoffType = DelayBackoffType.Exponential, + Delay = TimeSpan.FromMilliseconds(200), + ShouldHandle = new PredicateBuilder() + .Handle() + .Handle(ex => + ex.Message.Contains("deadlock", StringComparison.OrdinalIgnoreCase)) + }) + .AddTimeout(TimeSpan.FromSeconds(10)); +}); + +// Inject and use +public sealed class OrderRepository( + AppDbContext db, + [FromKeyedServices("database")] ResiliencePipeline pipeline) +{ + public async Task GetByIdAsync(Guid id, CancellationToken ct) + { + return await pipeline.ExecuteAsync( + async token => await db.Orders.FindAsync([id], token), + ct); + } +} +``` + +**Why**: `AddResiliencePipeline` registers a named pipeline in DI. Inject with `[FromKeyedServices]` for clean, testable code. + +### Typed Resilience Pipeline + +```csharp +// When the operation returns a specific type, use ResiliencePipeline +builder.Services.AddResiliencePipeline("external-api", builder => +{ + builder + .AddFallback(new FallbackStrategyOptions + { + FallbackAction = static args => + { + var response = new HttpResponseMessage(HttpStatusCode.OK) + { + Content = new StringContent("{\"status\":\"degraded\",\"data\":[]}") + }; + return Outcome.FromResultAsValueTask(response); + }, + ShouldHandle = static args => ValueTask.FromResult( + args.Outcome.Exception is not null + || args.Outcome.Result?.IsSuccessStatusCode == false) + }) + .AddRetry(new RetryStrategyOptions + { + MaxRetryAttempts = 2, + Delay = TimeSpan.FromMilliseconds(500) + }) + .AddTimeout(TimeSpan.FromSeconds(5)); +}); +``` + +**Why**: Typed pipelines let you add fallback strategies that return a default value when all retries are exhausted — critical for graceful degradation. + +### Hedging (Parallel Requests) + +```csharp +builder.Services.AddHttpClient() + .AddResilienceHandler("search-hedging", builder => + { + builder.AddHedging(new HttpHedgingStrategyOptions + { + MaxHedgedAttempts = 2, + Delay = TimeSpan.FromMilliseconds(500) // Send parallel request after 500ms + }); + builder.AddTimeout(TimeSpan.FromSeconds(3)); + }); +``` + +**Why**: Hedging sends a parallel request if the first hasn't responded within the delay. Use for latency-sensitive reads where you can tolerate duplicate work. + +### Telemetry Integration + +```csharp +builder.Services.AddResiliencePipeline("monitored", (builder, context) => +{ + builder + .AddRetry(new RetryStrategyOptions { MaxRetryAttempts = 3 }) + .AddCircuitBreaker(new CircuitBreakerStrategyOptions()) + .AddTimeout(TimeSpan.FromSeconds(10)); + + // Polly v8 emits metrics via System.Diagnostics.Metrics automatically + // Configure enrichment for better dashboards + builder.TelemetryListener = new TelemetryOptions + { + LoggerFactory = context.ServiceProvider.GetRequiredService() + }.TelemetryListener; +}); + +// In Program.cs — wire up OpenTelemetry to capture Polly metrics +builder.Services.AddOpenTelemetry() + .WithMetrics(metrics => metrics.AddMeter("Polly")); +``` + +### Rate Limiting (.NET Built-in) + +.NET provides built-in rate limiting middleware via `AddRateLimiter()` — no external packages needed. Algorithms: `AddFixedWindowLimiter`, `AddSlidingWindowLimiter`, `AddTokenBucketLimiter`, `AddConcurrencyLimiter`. + +```csharp +builder.Services.AddRateLimiter(options => +{ + options.AddFixedWindowLimiter("fixed", opt => + { + opt.PermitLimit = 100; + opt.Window = TimeSpan.FromSeconds(60); + opt.QueueLimit = 0; + }); + + // Always return ProblemDetails with Retry-After on 429 + options.OnRejected = async (context, ct) => + { + context.HttpContext.Response.StatusCode = StatusCodes.Status429TooManyRequests; + if (context.Lease.TryGetMetadata(MetadataName.RetryAfter, out var retryAfter)) + context.HttpContext.Response.Headers.RetryAfter = + ((int)retryAfter.TotalSeconds).ToString(); + await context.HttpContext.Response.WriteAsJsonAsync( + new ProblemDetails { Title = "Too many requests", Status = 429 }, ct); + }; +}); + +app.UseRateLimiter(); +app.MapGet("/api/orders", ListOrders).RequireRateLimiting("fixed"); +``` + +## Anti-patterns + +### BAD: Using Polly v7 API + +```csharp +// BAD — v7 policy syntax, do not use +var retryPolicy = Policy + .Handle() + .WaitAndRetryAsync(3, attempt => TimeSpan.FromSeconds(Math.Pow(2, attempt))); + +var response = await retryPolicy.ExecuteAsync(() => httpClient.GetAsync("/api/data")); +``` + +### GOOD: Polly v8 Resilience Pipeline + +```csharp +// GOOD — v8 pipeline via DI +builder.Services.AddHttpClient() + .AddStandardResilienceHandler(); +``` + +--- + +### BAD: Wrapping Every Call Manually + +```csharp +// BAD — manual resilience per call site +public async Task GetOrderAsync(Guid id) +{ + try + { + return await _pipeline.ExecuteAsync(async ct => + await _httpClient.GetFromJsonAsync($"/orders/{id}", ct)); + } + catch (TimeoutRejectedException) + { + return Order.Empty; + } + catch (BrokenCircuitException) + { + return Order.Empty; + } +} +``` + +### GOOD: Pipeline Handles Everything via HttpClient DI + +```csharp +// GOOD — resilience is configured at the HttpClient level +public async Task GetOrderAsync(Guid id, CancellationToken ct) +{ + var response = await _httpClient.GetAsync($"/orders/{id}", ct); + if (!response.IsSuccessStatusCode) return null; + return await response.Content.ReadFromJsonAsync(ct); +} +``` + +--- + +### BAD: Retry on Non-Idempotent Operations + +```csharp +// BAD — retrying a POST that creates a resource risks duplicates +builder.AddRetry(new RetryStrategyOptions +{ + MaxRetryAttempts = 5 // This will create 5 orders on transient failures! +}); +``` + +### GOOD: Retry Only Idempotent Operations or Use Idempotency Keys + +```csharp +// GOOD — use idempotency key header for non-idempotent operations +builder.AddRetry(new HttpRetryStrategyOptions +{ + MaxRetryAttempts = 3, + ShouldHandle = static args => ValueTask.FromResult( + args.Outcome.Result?.StatusCode is HttpStatusCode.RequestTimeout + or HttpStatusCode.ServiceUnavailable) +}); + +// Pair with idempotency key in the request +httpClient.DefaultRequestHeaders.Add("Idempotency-Key", Guid.NewGuid().ToString()); +``` + +--- + +### BAD: Circuit Breaker Without Monitoring + +```csharp +// BAD — circuit breaker with no visibility into state changes +builder.AddCircuitBreaker(new CircuitBreakerStrategyOptions()); +// How do you know when it trips? You don't. +``` + +### GOOD: Circuit Breaker with Telemetry + +```csharp +// GOOD — Polly v8 metrics captured via OpenTelemetry +builder.Services.AddOpenTelemetry() + .WithMetrics(metrics => metrics.AddMeter("Polly")); + +// Dashboard alerts on: polly.circuit_breaker.state = Open +``` + +## Decision Guide + +| Scenario | Strategy | Configuration | +|----------|----------|---------------| +| HTTP calls to external APIs | `AddStandardResilienceHandler()` | Use defaults, override only specific thresholds | +| HTTP with custom thresholds | `AddResilienceHandler("name", ...)` | Named handler with per-service tuning | +| Database / EF Core calls | `AddResiliencePipeline("db", ...)` | Retry on deadlock/timeout, no circuit breaker | +| Message queue publishing | `AddResiliencePipeline("mq", ...)` | Retry with exponential backoff, timeout | +| Latency-sensitive reads | `AddHedging(...)` | Parallel request after delay threshold | +| Graceful degradation | `AddFallback(...)` | Return cached/default value on total failure | +| Per-attempt time limit | `AddTimeout(...)` innermost | 2-10s depending on operation | +| Total operation time limit | `AddTimeout(...)` outermost | Sum of all retries + buffer | +| Non-idempotent writes | Retry with idempotency key | Or no retry — fail fast | +| Read-heavy microservice | Standard handler + hedging | Low latency with redundancy | +| API rate limiting | `AddRateLimiter()` + `RequireRateLimiting()` | Fixed, sliding, or token bucket per endpoint | + diff --git a/.opencode/skills/scaffolding/SKILL.md b/.opencode/skills/scaffolding/SKILL.md new file mode 100644 index 00000000..1395138d --- /dev/null +++ b/.opencode/skills/scaffolding/SKILL.md @@ -0,0 +1,398 @@ +--- +name: scaffolding +description: > + Code scaffolding patterns for .NET 10 features, entities, and tests. + Generates complete feature slices, entities with EF Core configuration, + and integration tests following the project's chosen architecture. + Load when: "scaffold", "create feature", "add feature", "new endpoint", + "generate", "add entity", "new entity", "scaffold test", "add module". +--- + +# Scaffolding + +## Core Principles + +1. **Architecture-aware generation** — Never scaffold without knowing the project's architecture (VSA, CA, DDD, Modular Monolith). If unknown, ask first or run the architecture-advisor questionnaire. +2. **Complete vertical slices** — Never generate half a feature. A scaffold includes endpoint, handler, validation, DTOs, EF configuration, and tests as a single unit. +3. **Tests included by default** — Every scaffolded feature includes at least one integration test using `WebApplicationFactory` + `Testcontainers`. Skip only if explicitly told to. +4. **Modern C# 14 patterns** — Primary constructors, collection expressions, `file`-scoped types, records for DTOs, `sealed` on all handler classes. +5. **Convention-matching** — Before generating, check existing code for naming patterns (`*Handler`, `*Service`, `*Endpoint`), folder structure, and access modifiers. Match what exists. + +### Scaffold Checklist (MANDATORY) + +Every scaffolded feature MUST include ALL of the following. Do not skip any item: + +- [ ] **Result pattern** — Handlers return `Result`, not raw responses. Endpoints map Result to HTTP (success → TypedResults, failure → `ToProblemDetails()`) +- [ ] **CancellationToken** on every async method and passed to every async call +- [ ] **FluentValidation** validator class with meaningful rules (ranges, required fields, max lengths) +- [ ] **ValidationFilter wiring** — `.AddEndpointFilter>()` on mutating endpoints +- [ ] **OpenAPI metadata** — `.WithName()`, `.WithSummary()`, `.Produces()`, `.ProducesValidationProblem()`, `.ProducesProblem(404)` +- [ ] **Pagination** on list endpoints — `page`, `pageSize` with bounded max (e.g., 50) +- [ ] **Global error handler** — Verify `app.UseExceptionHandler()` exists in Program.cs; scaffold if missing +- [ ] **appsettings.json** — Verify connection string exists; scaffold with placeholder if missing +- [ ] **Integration test** with proper DI replacement using `services.RemoveAll>()` + +## Patterns + +### Feature Scaffold — Vertical Slice Architecture (VSA) + +Single-file feature with Result pattern, validation, and response: + +```csharp +// Features/Orders/CreateOrder.cs — handler returns Result, not raw response +namespace MyApp.Features.Orders; + +public static class CreateOrder +{ + public record Command(string CustomerId, List Items); + public record ItemDto(Guid ProductId, int Quantity, decimal UnitPrice); + public record Response(Guid Id, decimal Total, DateTimeOffset CreatedAt); + + internal sealed class Handler(AppDbContext db, TimeProvider clock) + { + public async Task> HandleAsync(Command command, CancellationToken ct) + { + var order = Order.Create(command.CustomerId, command.Items, clock.GetUtcNow()); + db.Orders.Add(order); + await db.SaveChangesAsync(ct); + return Result.Success(new Response(order.Id, order.Total, order.CreatedAt)); + } + } + + internal sealed class Validator : AbstractValidator + { + public Validator() + { + RuleFor(x => x.CustomerId).NotEmpty(); + RuleFor(x => x.Items).NotEmpty(); + RuleForEach(x => x.Items).ChildRules(item => + { + item.RuleFor(x => x.Quantity).InclusiveBetween(1, 1000); + item.RuleFor(x => x.UnitPrice).GreaterThan(0); + }); + } + } +} +``` + +Endpoint group — maps Result to HTTP, full OpenAPI metadata, validation, pagination: + +```csharp +// Features/Orders/OrderEndpoints.cs — auto-discovered via IEndpointGroup +public sealed class OrderEndpoints : IEndpointGroup +{ + public void Map(IEndpointRouteBuilder app) + { + var group = app.MapGroup("/api/orders").WithTags("Orders"); + + group.MapPost("/", CreateOrderHandler) + .WithName("CreateOrder").WithSummary("Create a new order") + .Produces(StatusCodes.Status201Created) + .ProducesValidationProblem() + .AddEndpointFilter>(); + + group.MapGet("/", ListOrdersHandler) + .WithName("ListOrders").WithSummary("List orders with pagination") + .Produces>(); + + group.MapGet("/{id:guid}", GetOrderHandler) + .WithName("GetOrder") + .Produces().ProducesProblem(StatusCodes.Status404NotFound); + } + + private static async Task CreateOrderHandler( + CreateOrder.Command cmd, CreateOrder.Handler handler, CancellationToken ct) + { + var result = await handler.HandleAsync(cmd, ct); + return result.IsSuccess + ? TypedResults.Created($"/api/orders/{result.Value.Id}", result.Value) + : result.ToProblemDetails(); + } + + private static async Task>> ListOrdersHandler( + [AsParameters] PaginationQuery paging, AppDbContext db, CancellationToken ct) + { + var query = db.Orders.OrderByDescending(o => o.CreatedAt); + var total = await query.CountAsync(ct); + var items = await query.Skip((paging.Page - 1) * paging.PageSize).Take(paging.PageSize) + .Select(o => new OrderSummary(o.Id, o.Total, o.CreatedAt)).ToListAsync(ct); + return TypedResults.Ok(new PagedList(items, total, paging.Page, paging.PageSize)); + } + + private static async Task, NotFound>> GetOrderHandler( + Guid id, AppDbContext db, CancellationToken ct) + { + var order = await db.Orders.Where(o => o.Id == id) + .Select(o => new OrderDetail(o.Id, o.CustomerId, o.Total, o.CreatedAt)).FirstOrDefaultAsync(ct); + return order is not null ? TypedResults.Ok(order) : TypedResults.NotFound(); + } +} + +// Common/PaginationQuery.cs +public record PaginationQuery(int Page = 1, int PageSize = 20) +{ + public int Page { get; init; } = Math.Max(1, Page); + public int PageSize { get; init; } = Math.Clamp(PageSize, 1, 50); +} +public record PagedList(List Items, int TotalCount, int Page, int PageSize); +``` + +### Feature Scaffold — Clean Architecture (CA) + +Separate files across layers. Domain → Application (Command + Handler + Validator) → Api (Endpoint): + +```csharp +// Application/Orders/CreateOrder/CreateOrderCommand.cs — uses Mediator (source-generated, MIT) +public record CreateOrderCommand(string CustomerId, List Items) : IRequest>; +public record CreateOrderResponse(Guid Id, decimal Total, DateTimeOffset CreatedAt); + +// Application/Orders/CreateOrder/CreateOrderHandler.cs +internal sealed class CreateOrderHandler(IAppDbContext db, TimeProvider clock) + : IRequestHandler> +{ + public async ValueTask> Handle(CreateOrderCommand request, CancellationToken ct) + { + var order = Order.Create(request.CustomerId, request.Items, clock.GetUtcNow()); + db.Orders.Add(order); + await db.SaveChangesAsync(ct); + return new CreateOrderResponse(order.Id, order.Total, order.CreatedAt); + } +} + +// Api/Endpoints/OrderEndpoints.cs — auto-discovered via IEndpointGroup +public sealed class OrderEndpoints : IEndpointGroup +{ + public void Map(IEndpointRouteBuilder app) + { + var group = app.MapGroup("/api/orders").WithTags("Orders"); + group.MapPost("/", async (CreateOrderCommand cmd, ISender sender, CancellationToken ct) => + { + var result = await sender.Send(cmd, ct); + return result.IsSuccess + ? TypedResults.Created($"/api/orders/{result.Value.Id}", result.Value) + : result.ToProblemDetails(); + }) + .WithName("CreateOrder").Produces(201) + .ProducesValidationProblem() + .AddEndpointFilter>(); + } +} +``` + +### Feature Scaffold — DDD + +Domain logic lives in the aggregate; handler orchestrates persistence: + +```csharp +// Domain/Orders/Order.cs — Aggregate root with invariant enforcement +public sealed class Order : AggregateRoot +{ + private readonly List _items = []; + public IReadOnlyList Items => _items.AsReadOnly(); + public decimal Total { get; private set; } + public OrderStatus Status { get; private set; } + + public static Order Place(string customerId, List<(Guid ProductId, int Qty, decimal Price)> items, DateTimeOffset now) + { + if (items.Count == 0) throw new DomainException("Order must have at least one item."); + var order = new Order { Id = Guid.NewGuid(), Status = OrderStatus.Placed }; + foreach (var (productId, qty, price) in items) + order._items.Add(OrderItem.Create(productId, qty, price)); + order.Total = order._items.Sum(i => i.LineTotal); + order.AddDomainEvent(new OrderPlacedEvent(order.Id, customerId, order.Total, now)); + return order; + } +} +``` + +### Feature Scaffold — Modular Monolith + +Feature within a module boundary with its own DbContext. Handler passes `CancellationToken`, publishes integration events: + +```csharp +// Modules/Orders/Features/PlaceOrder.cs +public static class PlaceOrder +{ + public record Command(string CustomerId, List Items); + public record Response(Guid OrderId, decimal Total); + + internal sealed class Handler(OrdersDbContext db, TimeProvider clock, IEventBus bus) + { + public async Task HandleAsync(Command command, CancellationToken ct) + { + var order = Order.Place(command.CustomerId, command.Items, clock.GetUtcNow()); + db.Orders.Add(order); + await db.SaveChangesAsync(ct); + await bus.PublishAsync(new OrderPlacedIntegrationEvent(order.Id, order.Total), ct); + return new Response(order.Id, order.Total); + } + } +} +``` + +### Entity Scaffold +Always pair entity + `IEntityTypeConfiguration`. No data annotations on entities. + +```csharp +// Domain/Entities/Product.cs — clean, no attributes +public sealed class Product +{ + public Guid Id { get; private set; } + public string Name { get; private set; } = string.Empty; + public string Sku { get; private set; } = string.Empty; + public decimal Price { get; private set; } + + public static Product Create(string name, string sku, decimal price) => + new() { Id = Guid.NewGuid(), Name = name, Sku = sku, Price = price }; +} + +// Persistence/Configurations/ProductConfiguration.cs — all EF config here +internal sealed class ProductConfiguration : IEntityTypeConfiguration +{ + public void Configure(EntityTypeBuilder builder) + { + builder.HasKey(x => x.Id); + builder.Property(x => x.Name).HasMaxLength(200).IsRequired(); + builder.Property(x => x.Sku).HasMaxLength(50).IsRequired(); + builder.HasIndex(x => x.Sku).IsUnique(); + builder.Property(x => x.Price).HasPrecision(18, 2); + } +} +``` + +After creating entity + config: `dotnet ef migrations add AddProduct` + +### Test Scaffold +Integration test with proper DI replacement (RemoveAll, not fragile name matching): + +```csharp +// Tests/Fixtures/ApiFixture.cs +public sealed class ApiFixture : WebApplicationFactory, IAsyncLifetime +{ + private readonly PostgreSqlContainer _postgres = new PostgreSqlBuilder().WithImage("postgres:17").Build(); + + protected override void ConfigureWebHost(IWebHostBuilder builder) + { + builder.ConfigureServices(services => + { + services.RemoveAll>(); + services.AddDbContext(o => o.UseNpgsql(_postgres.GetConnectionString())); + }); + } + + public async Task InitializeAsync() { await _postgres.StartAsync(); /* apply migrations */ } + public new async Task DisposeAsync() { await _postgres.DisposeAsync(); await base.DisposeAsync(); } +} +``` +```csharp +// Tests/Features/Orders/CreateOrderTests.cs +public sealed class CreateOrderTests(ApiFixture fixture) : IClassFixture +{ + private readonly HttpClient _client = fixture.CreateClient(); + + [Fact] + public async Task CreateOrder_ValidRequest_Returns201() + { + // Arrange + var command = new { CustomerId = "CUST-001", Items = new[] { new { ProductId = Guid.NewGuid(), Quantity = 2, UnitPrice = 29.99m } } }; + // Act + var response = await _client.PostAsJsonAsync("/api/orders", command); + // Assert + Assert.Equal(HttpStatusCode.Created, response.StatusCode); + var result = await response.Content.ReadFromJsonAsync(); + Assert.NotEqual(Guid.Empty, result.GetProperty("id").GetGuid()); + } + + [Fact] + public async Task CreateOrder_EmptyItems_ReturnsValidationProblem() + { + var response = await _client.PostAsJsonAsync("/api/orders", new { CustomerId = "CUST-001", Items = Array.Empty() }); + Assert.Equal(HttpStatusCode.BadRequest, response.StatusCode); + } +} +``` + +### Module Scaffold (Modular Monolith) +Module = DI registration class + `IEndpointGroup` endpoints + own DbContext with isolated schema: + +```csharp +// Modules/Inventory/InventoryModule.cs — DI only, no endpoint wiring +public static class InventoryModule +{ + public static IServiceCollection AddInventoryModule(this IServiceCollection services, IConfiguration config) + { + services.AddDbContext(o => o.UseNpgsql(config.GetConnectionString("Inventory"))); + return services; + } +} + +// Modules/Inventory/Endpoints/InventoryEndpoints.cs — auto-discovered via IEndpointGroup +public sealed class InventoryEndpoints : IEndpointGroup +{ + public void Map(IEndpointRouteBuilder app) + { + var group = app.MapGroup("/api/inventory").WithTags("Inventory"); + // endpoint definitions with full OpenAPI metadata + ValidationFilter + } +} + +// Modules/Inventory/Persistence/InventoryDbContext.cs — isolated schema +internal sealed class InventoryDbContext(DbContextOptions options) : DbContext(options) +{ + public DbSet StockItems => Set(); + protected override void OnModelCreating(ModelBuilder modelBuilder) + { + modelBuilder.HasDefaultSchema("inventory"); + modelBuilder.ApplyConfigurationsFromAssembly(typeof(InventoryDbContext).Assembly); + } +} +``` + +## Anti-patterns + +### Scaffolding Without Architecture + +```csharp +// BAD — Generating code without knowing if project uses VSA, CA, or DDD +public class CreateOrderHandler { /* random structure */ } + +// GOOD — Ask first: "I see feature folders, so I'll scaffold using VSA patterns." +public static class CreateOrder { /* VSA single-file feature */ } +``` + +### Feature Without Tests + +Always scaffold feature + test as a single unit. `CreateOrder.cs` + `CreateOrderTests.cs` are never generated separately. + +### Entity Without EF Configuration + +```csharp +// BAD — data annotations scattered in entity +public class Product { [Key] public Guid Id { get; set; } [MaxLength(200)] public string Name { get; set; } = ""; } + +// GOOD — clean entity + separate IEntityTypeConfiguration +public sealed class Product { /* No attributes */ } +internal sealed class ProductConfiguration : IEntityTypeConfiguration { /* All EF config */ } +``` + +### Anemic DTOs That Mirror Entities 1:1 + +```csharp +// BAD — DTO mirrors entity with no purpose +public record ProductDto(Guid Id, string Name, string Sku, decimal Price, bool IsActive, DateTime CreatedAt, DateTime? UpdatedAt); + +// GOOD — response shaped for the consumer +public record ProductSummary(Guid Id, string Name, decimal Price); +``` + +## Decision Guide + +| Scenario | Architecture | Scaffold Pattern | +|----------|-------------|-----------------| +| New CRUD endpoint | VSA | Single-file feature (Command + Handler + Validator + Response) | +| New business operation | CA | Command in Application/, Handler in Application/, Endpoint in Api/ | +| Complex domain logic | DDD | Aggregate method + Application handler + Domain event | +| Feature in a module | Modular Monolith | Feature file in Modules/{Name}/Features/ with module DbContext | +| New entity | Any | Entity class + `IEntityTypeConfiguration` + migration | +| New module | Modular Monolith | Module folder + DbContext + DI registration + integration events | +| Architecture unknown | Any | **Ask first** — run architecture-advisor questionnaire | diff --git a/.opencode/skills/scalar/SKILL.md b/.opencode/skills/scalar/SKILL.md new file mode 100644 index 00000000..17d1ffdf --- /dev/null +++ b/.opencode/skills/scalar/SKILL.md @@ -0,0 +1,251 @@ +--- +name: scalar +description: > + Scalar API documentation UI for .NET 10 applications. Covers setup, themes, + authentication prefill, multiple documents, layout options, and security. + A modern replacement for Swagger UI. + Load this skill when setting up API documentation UI, or when the user mentions + "Scalar", "MapScalarApiReference", "API reference", "Swagger UI replacement", + "API documentation UI", "Scalar theme", "interactive API docs", or "Try It". +--- + +# Scalar + +## Core Principles + +1. **Scalar replaces Swagger UI** — Scalar is the recommended API documentation UI for .NET 10. Faster rendering, built-in dark mode, code generation for dozens of languages, and full OpenAPI 3.1 support. +2. **Development only by default** — Wrap `MapScalarApiReference()` in an `IsDevelopment()` check. API documentation exposes internal structure. If needed in production, add authorization. +3. **Disable the proxy for sensitive APIs** — Scalar's "Try It" feature routes through `proxy.scalar.com` by default. Disable it with `.WithProxy(null)` to keep auth headers local. +4. **Security schemes come from OpenAPI** — Scalar reads security schemes from the OpenAPI document. Configure them via document transformers, not in Scalar directly. + +## Patterns + +### Basic Setup + +```csharp +using Scalar.AspNetCore; + +var builder = WebApplication.CreateBuilder(args); +builder.Services.AddOpenApi(); + +var app = builder.Build(); + +if (app.Environment.IsDevelopment()) +{ + app.MapOpenApi(); + app.MapScalarApiReference(); // UI at /scalar/v1 +} + +app.Run(); +``` + +### Customized Configuration + +```csharp +app.MapScalarApiReference(options => +{ + options + .WithTitle("Checkout API") + .WithTheme(ScalarTheme.Mars) + .WithDefaultHttpClient(ScalarTarget.CSharp, ScalarClient.HttpClient) + .WithPreferredScheme("Bearer") + .WithProxy(null) // Disable external proxy + .WithSidebar(true); +}); +``` + +### Authentication Prefill (Development Only) + +Pre-fill credentials so developers don't have to paste tokens manually. The OpenAPI document must already include the security scheme via a document transformer. + +```csharp +if (app.Environment.IsDevelopment()) +{ + app.MapScalarApiReference(options => + { + options + .WithPreferredScheme("Bearer") + .AddHttpAuthentication("Bearer", auth => + { + auth.Token = "dev-only-test-token"; + }); + }); +} +``` + +Other auth types: + +```csharp +// API Key +options.WithApiKeyAuthentication(apiKey => +{ + apiKey.Token = "dev-api-key"; +}); + +// OAuth2 +options.WithOAuth2Authentication(oauth => +{ + oauth.ClientId = "your-client-id"; + oauth.Scopes = ["openid", "profile"]; +}); +``` + +### Available Themes + +```csharp +// ScalarTheme options: Default, Moon, Purple, BluePlanet, Saturn, Mars, DeepSpace, Kepler, Solarized, Laserwave +options.WithTheme(ScalarTheme.Mars); +``` + +### Multiple API Documents + +```csharp +// Register multiple OpenAPI documents +builder.Services.AddOpenApi("v1"); +builder.Services.AddOpenApi("v2-beta"); + +// Scalar picks them up automatically +app.MapOpenApi(); +app.MapScalarApiReference(); +// Available at /scalar/v1 and /scalar/v2-beta +``` + +Or configure documents explicitly: + +```csharp +app.MapScalarApiReference(options => +{ + options + .AddDocument("v1", "Production API") + .AddDocument("v2-beta", "Beta API", isDefault: true); +}); +``` + +### Custom Route Prefix + +```csharp +// Default is /scalar/{documentName} +app.MapScalarApiReference("/api-docs"); +// Now at /api-docs/v1 +``` + +### Production with Authorization + +```csharp +// When partners need access to docs in production +app.MapOpenApi().RequireAuthorization("ApiDocs"); +app.MapScalarApiReference().RequireAuthorization("ApiDocs"); +``` + +### Force Dark Mode + +```csharp +options.ForceDarkMode(); +``` + +### Classic Layout (Swagger-like) + +```csharp +options.WithClassicLayout(); +``` + +## Anti-patterns + +### Don't Expose Scalar in Production Without Auth + +```csharp +// BAD — anyone can see your API structure +app.MapOpenApi(); +app.MapScalarApiReference(); + +// GOOD — development only +if (app.Environment.IsDevelopment()) +{ + app.MapOpenApi(); + app.MapScalarApiReference(); +} + +// GOOD — production with auth +app.MapOpenApi().RequireAuthorization("ApiDocs"); +app.MapScalarApiReference().RequireAuthorization("ApiDocs"); +``` + +### Don't Pre-fill Real Credentials + +```csharp +// BAD — real tokens visible in browser +options.AddHttpAuthentication("Bearer", auth => +{ + auth.Token = "eyJhbG...real-production-token"; +}); + +// GOOD — dev-only test tokens +if (app.Environment.IsDevelopment()) +{ + options.AddHttpAuthentication("Bearer", auth => + { + auth.Token = "dev-only-test-token"; + }); +} +``` + +### Don't Forget the Security Scheme Transformer + +```csharp +// BAD — no auth UI in Scalar because OpenAPI doc has no security schemes +builder.Services.AddOpenApi(); +app.MapScalarApiReference(options => +{ + options.WithPreferredScheme("Bearer"); // Does nothing! +}); + +// GOOD — register the document transformer first +builder.Services.AddOpenApi(options => +{ + options.AddDocumentTransformer(); +}); +app.MapScalarApiReference(options => +{ + options.WithPreferredScheme("Bearer"); +}); +``` + +### Don't Leave the Proxy Enabled for Sensitive APIs + +```csharp +// BAD — auth headers flow through proxy.scalar.com +app.MapScalarApiReference(); + +// GOOD — disable proxy for APIs with sensitive data +app.MapScalarApiReference(options => +{ + options.WithProxy(null); +}); +``` + +### Don't Use Swagger UI for New .NET 10 Projects + +```csharp +// BAD — Swashbuckle removed from templates, maintenance concerns +builder.Services.AddSwaggerGen(); +app.UseSwaggerUI(); + +// GOOD — built-in OpenAPI + Scalar +builder.Services.AddOpenApi(); +app.MapOpenApi(); +app.MapScalarApiReference(); +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| API documentation UI | `MapScalarApiReference()` with `MapOpenApi()` | +| Development environment | Default setup with `IsDevelopment()` guard | +| Production API docs | Add `.RequireAuthorization()` to both endpoints | +| Auth testing in dev | `AddHttpAuthentication()` with test tokens | +| Dark theme preference | `.ForceDarkMode()` or `.WithTheme(ScalarTheme.Moon)` | +| Multiple API versions | Multiple `AddOpenApi()` calls — Scalar detects automatically | +| Sensitive APIs | `.WithProxy(null)` to disable external proxy | +| Swagger-like layout | `.WithClassicLayout()` | +| Custom route | `app.MapScalarApiReference("/api-docs")` | diff --git a/.opencode/skills/security-scan/SKILL.md b/.opencode/skills/security-scan/SKILL.md new file mode 100644 index 00000000..c97cea2e --- /dev/null +++ b/.opencode/skills/security-scan/SKILL.md @@ -0,0 +1,381 @@ +--- +name: security-scan +description: > + Deep security scanning for .NET applications across 6 layers: vulnerable packages, + secrets detection, OWASP code patterns, auth configuration, CORS policy, and + data protection. Produces severity-rated findings with specific remediation steps. + Load this skill when: "security scan", "security audit", "check for vulnerabilities", + "find secrets", "OWASP", "auth review", "CORS check", "security review", + "penetration test prep", "CVE check", "vulnerability scan", "hardcoded password", + "data protection", "security posture". +--- + +# Security Scan + +## Core Principles + +1. **Defense in depth** — Scan multiple layers: packages, source code, configuration, and infrastructure. A project with zero CVEs can still have hardcoded secrets, SQL injection, and missing auth. Each layer catches different vulnerability classes. + +2. **Prioritize by exploitability** — A Critical SQL injection in a public endpoint is more urgent than a Low-severity info disclosure in an admin-only page. Prioritize findings by: exploitability (how easy to exploit), impact (what an attacker gains), and exposure (public vs internal endpoint). + +3. **No false sense of security** — This is static analysis, not a penetration test. It catches known patterns but misses business logic flaws, authorization bypass through complex flows, and runtime-only vulnerabilities. State this clearly in every report. + +4. **Actionable findings** — Every issue includes severity, file and line, description of the vulnerability, impact if exploited, and specific remediation code. "Fix the security issue" is not a finding. "OrderController.cs:23 — Missing `[Authorize]` on `DELETE /orders/{id}`. Impact: unauthenticated users can delete orders. Fix: Add `[Authorize(Policy = \"OrderAdmin\")]`" is. + +5. **Follow OWASP Top 10** — Structure the scan around known vulnerability categories. The OWASP Top 10 is the industry baseline for web application security. Every finding should map to an OWASP category. + +## Patterns + +### 6-Layer Security Scan Pipeline + +Execute all 6 layers. Each produces findings rated Critical, High, Medium, or Low. + +**Layer 1: Package Vulnerabilities** +```bash +dotnet list package --vulnerable --include-transitive +``` +Check for known CVEs in direct and transitive dependencies. + +Severity mapping: +- **Critical**: CVSS 9.0-10.0 — Remote code execution, authentication bypass +- **High**: CVSS 7.0-8.9 — Privilege escalation, data exposure +- **Medium**: CVSS 4.0-6.9 — Denial of service, information disclosure +- **Low**: CVSS 0.1-3.9 — Minor information leakage + +Remediation pattern: +```xml + + + + + +``` + +If a patch isn't available, document the risk and apply compensating controls. + +**Layer 2: Secrets Detection** + +Scan all `.cs`, `.json`, `.yml`, `.yaml`, `.xml`, and `.config` files for hardcoded secrets. + +Patterns to detect: +``` +HIGH-CONFIDENCE PATTERNS (almost always a real secret): +- "Password=" or "Pwd=" in connection strings outside appsettings.Development.json +- "Bearer " followed by a base64 token in source code +- "-----BEGIN PRIVATE KEY-----" or "-----BEGIN RSA PRIVATE KEY-----" +- AWS: "AKIA" followed by 16 alphanumeric characters +- Azure: strings matching Azure Storage/Service Bus key patterns + +MEDIUM-CONFIDENCE PATTERNS (need context to determine): +- "ApiKey", "Secret", "Token" as variable names with string literal assignments +- Base64-encoded strings longer than 40 characters in source code +- Connection strings with server addresses in non-Development config files + +FALSE-POSITIVE INDICATORS (skip these): +- Values in appsettings.Development.json (development-only) +- Placeholder values: "your-key-here", "changeme", "TODO", empty strings +- Test fixtures with obviously fake values +- User secrets references: "UserSecretsId" in .csproj +``` + +Remediation: +```csharp +// BAD — hardcoded connection string +var connectionString = "Server=prod-db;Database=Orders;User=admin;Password=S3cret!"; + +// GOOD — configuration with user secrets / environment variables +var connectionString = builder.Configuration.GetConnectionString("OrdersDb"); +// Store actual values in: +// - Development: dotnet user-secrets set "ConnectionStrings:OrdersDb" "..." +// - Production: Environment variable or Azure Key Vault +``` + +**Layer 3: OWASP Code Patterns** + +Scan source code for vulnerability patterns mapped to OWASP Top 10. + +``` +A03:2021 — Injection + Detect: String concatenation in SQL queries, raw SQL with user input + Pattern: FromSqlRaw($"SELECT * FROM Orders WHERE Id = '{userInput}'") + Fix: FromSqlInterpolated($"SELECT * FROM Orders WHERE Id = {userInput}") + or use parameterized queries / LINQ + +A07:2021 — Cross-Site Scripting (XSS) + Detect: Raw HTML output without encoding in Razor/Blazor + Pattern: @Html.Raw(userInput) + Fix: Use Razor's default encoding (@userInput) or sanitize explicitly + +A08:2021 — Insecure Deserialization + Detect: BinaryFormatter, JsonConvert with TypeNameHandling.All + Pattern: JsonConvert.DeserializeObject(json, new JsonSerializerSettings + { TypeNameHandling = TypeNameHandling.All }) + Fix: Use System.Text.Json (no type name handling by default) + If Newtonsoft is required: TypeNameHandling.None + explicit type converters + +A02:2021 — Cryptographic Failures + Detect: MD5, SHA1 for security purposes, ECB mode, hardcoded encryption keys + Pattern: MD5.Create().ComputeHash(...) + Fix: Use SHA256 minimum, prefer HMACSHA256 for authentication + Use AES-GCM for encryption, derive keys from passwords using Rfc2898DeriveBytes + +A04:2021 — Insecure Direct Object References + Detect: Endpoints that use user-supplied IDs without ownership verification + Pattern: GET /orders/{id} — returns any order regardless of who owns it + Fix: Add ownership check: where o.Id == id && o.CustomerId == currentUser.Id +``` + +**Layer 4: Auth Configuration** + +Review authentication and authorization setup across the application. + +``` +CHECKLIST: +1. All endpoints have explicit auth attributes + - MCP: find_references(symbolName: "AllowAnonymous") — list deliberately public endpoints + - MCP: find_references(symbolName: "Authorize") — list protected endpoints + - Gap: endpoints with neither attribute (implicit policy depends on global config) + +2. JWT validation settings are secure + - ValidateIssuer: true (prevents token from wrong issuer) + - ValidateAudience: true (prevents token meant for another app) + - ValidateLifetime: true (prevents expired tokens) + - ValidateIssuerSigningKey: true (prevents tampered tokens) + - ClockSkew: TimeSpan.FromMinutes(1) max (default 5 min is too generous) + +3. Authorization policies are specific + - BAD: [Authorize] with no policy — just checks "is authenticated" + - GOOD: [Authorize(Policy = "OrderAdmin")] — role/claim-based authorization + +4. No auth bypass patterns + - Middleware ordering: UseAuthentication() before UseAuthorization() + - No global AllowAnonymous that accidentally opens everything + - API key validation in middleware, not in each controller +``` + +```csharp +// BAD — JWT configuration with weak validation +builder.Services.AddAuthentication().AddJwtBearer(options => +{ + options.TokenValidationParameters = new() + { + ValidateIssuer = false, // Anyone can issue tokens + ValidateAudience = false, // Token works for any app + ValidateLifetime = false, // Expired tokens accepted + IssuerSigningKey = new SymmetricSecurityKey( + "short-key"u8.ToArray()) // Key too short (< 256 bits) + }; +}); + +// GOOD — secure JWT configuration +builder.Services.AddAuthentication().AddJwtBearer(options => +{ + options.TokenValidationParameters = new() + { + ValidateIssuer = true, + ValidIssuer = builder.Configuration["Jwt:Issuer"], + ValidateAudience = true, + ValidAudience = builder.Configuration["Jwt:Audience"], + ValidateLifetime = true, + ClockSkew = TimeSpan.FromMinutes(1), + ValidateIssuerSigningKey = true, + IssuerSigningKey = new SymmetricSecurityKey( + Convert.FromBase64String(builder.Configuration["Jwt:Key"]!)) + }; +}); +``` + +**Layer 5: CORS Configuration** + +Review Cross-Origin Resource Sharing policy for misconfigurations. + +```csharp +// CRITICAL — wildcard origin with credentials +builder.Services.AddCors(options => +{ + options.AddDefaultPolicy(policy => + { + policy.AllowAnyOrigin() // Any website can call this API + .AllowCredentials(); // ...and send cookies/auth headers + // This is a security vulnerability — browsers block this combo, + // but it signals a misunderstanding of CORS + }); +}); + +// HIGH — wildcard origin without credentials +policy.AllowAnyOrigin() // Any website can read API responses + .AllowAnyHeader() + .AllowAnyMethod(); +// Acceptable ONLY for truly public APIs (e.g., public data feeds) + +// GOOD — explicit origins +builder.Services.AddCors(options => +{ + options.AddDefaultPolicy(policy => + { + policy.WithOrigins( + builder.Configuration.GetSection("Cors:AllowedOrigins").Get()!) + .AllowCredentials() + .WithMethods("GET", "POST", "PUT", "DELETE") + .WithHeaders("Content-Type", "Authorization"); + }); +}); +``` + +Check for: +- Wildcard origins (`AllowAnyOrigin`) — should be restricted to specific domains +- Exposed headers that leak internal information +- Overly broad methods (allowing PATCH, OPTIONS when only GET/POST needed) +- CORS in development vs production — different policies for different environments + +**Layer 6: Data Protection** + +Scan for PII and sensitive data handling issues. + +``` +CHECKS: +1. PII in logs — email, phone, SSN, credit card in logging statements + Pattern: logger.LogInformation("User {Email} placed order", user.Email) + Fix: logger.LogInformation("User {UserId} placed order", user.Id) + Rule: Log identifiers (IDs), not identity data (email, name, phone) + +2. Sensitive data in responses — returning more data than needed + Pattern: Returning full User entity (with password hash) from an endpoint + Fix: Use a response DTO that excludes sensitive fields + +3. Missing Data Protection API — storing sensitive data without encryption + Pattern: Storing API keys as plain text in the database + Fix: Use IDataProtector to encrypt before storage + +4. Unencrypted sensitive configuration — secrets in appsettings.json + Pattern: "SmtpPassword": "actualpassword" in appsettings.json + Fix: Use user secrets (dev), Key Vault (prod), or environment variables +``` + +```csharp +// BAD — PII in logs +logger.LogInformation("Order placed by {Email} for {CreditCard}", + order.CustomerEmail, order.PaymentCard); + +// GOOD — identifiers only +logger.LogInformation("Order {OrderId} placed by customer {CustomerId}", + order.Id, order.CustomerId); +``` + +### Full Scan Report + +Each finding uses format: `#### [SEVERITY] File:Line — Title` with OWASP Category, Description, Impact, and Remediation (with code before/after). + +```markdown +## Security Scan Report + +**Project:** MyApp | **Date:** 2026-03-04 | **Scanner:** Claude (static analysis) + +> This is a static analysis scan. It catches known patterns but does not replace +> penetration testing, dynamic analysis, or threat modeling. + +### Summary + +| Severity | Count | +|----------|-------| +| Critical | 0 | +| High | 2 | +| Medium | 3 | +| Low | 1 | + +### Findings + +#### [HIGH] src/Orders/Features/SearchOrders.cs:34 — SQL Injection +... + +#### [HIGH] src/Api/Program.cs:12 — Missing Authorization on DELETE endpoint +... + +### Layer Results + +| Layer | Status | Findings | +|-------|--------|----------| +| 1. Package Vulnerabilities | PASS | 0 CVEs | +| 2. Secrets Detection | PASS | No hardcoded secrets | +| 3. OWASP Code Patterns | FAIL | 1 SQL injection, 1 insecure deserialization | +| 4. Auth Configuration | WARN | 2 endpoints missing explicit auth | +| 5. CORS Configuration | PASS | Origins properly restricted | +| 6. Data Protection | WARN | PII found in 2 log statements | +``` + +## Anti-patterns + +### Only Scanning Packages + +``` +# BAD — NuGet packages are clean, declare victory +dotnet list package --vulnerable → "No vulnerable packages found" +"Security scan passed!" +# Missed: hardcoded password in appsettings.json, SQL injection in SearchOrders, +# missing [Authorize] on 3 endpoints, PII in logs + +# GOOD — all 6 layers for complete coverage +Layer 1 (Packages): PASS +Layer 2 (Secrets): Found connection string in appsettings.Production.json +Layer 3 (OWASP): SQL injection in SearchOrders.cs:34 +Layer 4 (Auth): 3 endpoints without [Authorize] +Layer 5 (CORS): Wildcard origin in production config +Layer 6 (Data): Customer email logged at Information level +``` + +### Everything is Critical + +``` +# BAD — alert fatigue from over-classification +[CRITICAL] Missing XML comment on public method +[CRITICAL] Using var instead of explicit type +[CRITICAL] Connection string in appsettings.Development.json + +# GOOD — severity matches actual risk +[LOW] Missing XML comment on public method (not a security issue) +[INFO] appsettings.Development.json has connection string (expected for dev) +[HIGH] appsettings.Production.json has hardcoded password (real secret exposure) +``` + +### Scanning Without Context + +``` +# BAD — flagging test fixtures as security issues +[HIGH] Tests/OrderTests.cs:15 — Hardcoded API key: "test-key-12345" +# This is a test fixture with a fake value, not a real secret + +# GOOD — context-aware scanning +Skip files in test projects for secret detection (test data is expected to be fake). +Flag only if the pattern matches a real secret format (e.g., starts with "AKIA" for AWS). +``` + +### No Remediation Steps + +``` +# BAD — finding without a fix +[HIGH] SQL Injection in SearchOrders.cs:34 +"Fix this." + +# GOOD — finding with specific remediation +[HIGH] SQL Injection in SearchOrders.cs:34 +Current: FromSqlRaw($"SELECT * FROM Orders WHERE Name LIKE '%{search}%'") +Fix: Use parameterized query: + db.Orders.Where(o => EF.Functions.Like(o.Name, $"%{search}%")) +Impact: Attacker can read/modify/delete any data in the database. +``` + +## Decision Guide + +| Scenario | Layers | Notes | +|----------|--------|-------| +| Pre-release security gate | All 6 | Full scan, non-negotiable before production | +| After dependency update | 1 | Package vulnerabilities only | +| New endpoint added | 3, 4, 5 | OWASP, auth, CORS for the new endpoint | +| Auth system changes | 4 | Deep auth configuration review | +| Config file changes | 2 | Secrets detection in changed configs | +| Logging changes | 6 | Check for PII in new log statements | +| Pre-pentest preparation | All 6 | Fix static issues before paying for a pentest | +| Incident response | All 6 | Full scan after a security incident | +| Quarterly security review | All 6 | Regular cadence, also useful for onboarding | +| Public API exposure | 3, 4, 5 | Focus on external attack surface | +| Internal service | 1, 2, 3 | Lower CORS/auth scrutiny if truly internal | diff --git a/.opencode/skills/self-correction-loop/SKILL.md b/.opencode/skills/self-correction-loop/SKILL.md new file mode 100644 index 00000000..c353596c --- /dev/null +++ b/.opencode/skills/self-correction-loop/SKILL.md @@ -0,0 +1,182 @@ +--- +name: self-correction-loop +description: > + Self-improving correction capture system. After ANY user correction, detect it, + generalize the lesson, and store it as a reusable rule in MEMORY.md. Ensures + Claude's mistake rate drops over time by compounding corrections into permanent + knowledge. Load this skill when a user corrects Claude's output, mentions + "remember this", "don't do that again", "learn from mistakes", "update memory", + or when starting a new session (to review existing rules). +--- + +# Self-Correction Loop + +## Core Principles + +1. **Every correction is a compounding investment** — A correction costs the user 30 seconds today but saves hours across all future sessions. Treat every correction as high-priority knowledge capture, not a one-time fix. + +2. **Generalize before storing** — "Use `TimeProvider` not `DateTime.Now` in the Orders module" becomes "Always use `TimeProvider` instead of `DateTime.Now/UtcNow` across all modules." Specific corrections become class-level rules. + +3. **Categorize for retrieval** — Rules organized by category (Code Style, Architecture, Naming, Testing, Data Access, API Design, Configuration, Performance) are findable. Uncategorized rules are forgotten. + +4. **Deduplicate aggressively** — Before adding a rule, scan existing rules for overlap. Update an existing rule rather than adding a near-duplicate. Memory bloat defeats the purpose. + +5. **Review memory at session start** — The first thing Claude should do in a new session is check `MEMORY.md` for project-specific rules. Knowledge captured but never reviewed is wasted effort. + +## Patterns + +### Correction Detection & Capture Flow + +When a user corrects Claude's output, follow this exact sequence: + +``` +1. DETECT — User says something like: + - "No, use X instead of Y" + - "We don't do it that way here" + - "That's wrong, it should be..." + - "Always/Never do X in this project" + - "Remember this for next time" + +2. ACKNOWLEDGE — Confirm understanding of the correction + "Got it — using HybridCache instead of IMemoryCache." + +3. GENERALIZE — Extract the class-level rule + Specific: "Don't use IMemoryCache in the Orders endpoint" + General: "Always use HybridCache instead of IMemoryCache — it provides + stampede protection and L1+L2 caching out of the box." + +4. CHECK — Scan MEMORY.md for existing related rules + - If a related rule exists, UPDATE it (broader scope, better wording) + - If no related rule exists, ADD a new one under the right category + +5. STORE — Write to MEMORY.md under the appropriate category + +6. CONFIRM — Tell the user what was captured + "Added to Memory > Data Access: Always use HybridCache over IMemoryCache." +``` + +### MEMORY.md Organization Format + +Structure memory by category with consistent rule formatting: + +```markdown +# Project Memory + +## Code Style +- Always use file-scoped namespaces — never block-scoped +- Use primary constructors for DI injection in services and handlers + +## Architecture +- This project uses Vertical Slice Architecture — one file per feature operation + +## Data Access +- Always use HybridCache over IMemoryCache — stampede protection + L1/L2 +- Never use repository pattern over EF Core — use DbContext directly + +## Testing +- Integration tests use ApiFixture base class — never raw WebApplicationFactory +``` + +Use categories: Code Style, Architecture, Naming, Data Access, API Design, Testing, Configuration, Performance. Each rule: one line, actionable, with rationale after the dash. + +### Rule Generalization: Specific to Class + +Transform specific corrections into broadly applicable rules: + +``` +SPECIFIC CORRECTION: +"Don't use DateTime.Now in the CreateOrder handler" + +GENERALIZATION STEPS: +1. Is this specific to CreateOrder? → No, it's a .NET-wide concern +2. Is this specific to handlers? → No, applies everywhere +3. What's the underlying principle? → TimeProvider is testable, DateTime is not +4. What's the broadest correct statement? + +GENERALIZED RULE: +"Always use TimeProvider instead of DateTime.Now/UtcNow — TimeProvider is +injectable and testable. This applies to all production code." +``` + +### Periodic Memory Audit + +Every 5-10 sessions (or when memory exceeds 50 rules), audit for quality: + +``` +AUDIT CHECKLIST: +1. Remove rules that contradict each other — keep the most recent +2. Merge rules that overlap — combine into a single, clearer rule +3. Remove rules that are now obvious (e.g., captured early but now second nature) +4. Verify rules are still accurate — .NET evolves, patterns change +5. Check that categories are balanced — a category with 20+ rules needs subcategories +``` + +### Session-Start Memory Review + +At session start, read MEMORY.md and apply relevant rules proactively. Do not wait to be reminded of rules that were already captured. + +## Anti-patterns + +### Ignoring Corrections + +``` +// BAD — user corrects, Claude fixes but doesn't capture +User: "No, we use HybridCache here, not IMemoryCache" +Claude: "Fixed. Here's the updated code with HybridCache..." +*Next session: makes the same mistake* + +// GOOD — fix AND capture +User: "No, we use HybridCache here, not IMemoryCache" +Claude: "Fixed. Here's the updated code with HybridCache. + Added to Memory > Data Access: Always use HybridCache over IMemoryCache." +*Next session: checks memory, uses HybridCache from the start* +``` + +### Overly Specific Rules + +``` +// BAD — rule is too narrow to be useful +"In the CreateOrder handler on line 47, use TimeProvider" + +// GOOD — generalized to apply broadly +"Always use TimeProvider instead of DateTime.Now/UtcNow in all production code" +``` + +### Never Reviewing Memory + +``` +// BAD — 50 rules captured, none ever reviewed +MEMORY.md grows to 200 lines, contains duplicates and contradictions, +Claude doesn't read it because it's too long to be useful + +// GOOD — periodic audit keeps memory lean and accurate +MEMORY.md stays under 80 rules, well-categorized, no duplicates, +Claude reads it at session start and applies rules proactively +``` + +### Storing Session-Specific Context + +``` +// BAD — temporary state saved as permanent memory +"Currently working on the Orders module refactor, file is at src/Orders/Handler.cs" + +// GOOD — only permanent, reusable knowledge +"The Orders module uses VSA with one file per feature under Features/" +``` + +## Decision Guide + +| Scenario | Action | +|----------|--------| +| User explicitly corrects Claude's code | Capture generalized rule in MEMORY.md | +| User says "remember this" or "always/never" | Capture exactly as stated, generalize if possible | +| Same correction given twice | High priority — the rule wasn't captured or wasn't reviewed | +| Correction is project-specific | Store in MEMORY.md with project context | +| Correction is universal .NET | Store in MEMORY.md — it applies to this project | +| MEMORY.md exceeds 50 rules | Trigger an audit — deduplicate, merge, prune | +| Starting a new session | Review MEMORY.md before writing any code | +| Rule contradicts an existing rule | Keep the most recent correction, remove the old one | +| Correction is about a one-time task | Don't store — only capture reusable patterns | +| User asks to forget a rule | Remove it from MEMORY.md immediately | +| Pattern observed but not yet confirmed | Create an instinct via `instinct-system` skill (confidence 0.3) instead of a MEMORY.md rule | +| Instinct reaches 0.9 confidence | Promote to MEMORY.md as a permanent rule (see `instinct-system` skill) | diff --git a/.opencode/skills/serilog/SKILL.md b/.opencode/skills/serilog/SKILL.md new file mode 100644 index 00000000..185bcc56 --- /dev/null +++ b/.opencode/skills/serilog/SKILL.md @@ -0,0 +1,278 @@ +--- +name: serilog +description: > + Structured logging with Serilog for .NET 10 applications. Covers two-stage + bootstrap, appsettings configuration, enrichers, sinks, request logging, + destructuring, and Serilog.Expressions. + Load this skill when setting up Serilog, configuring log sinks, enrichers, + or structured logging, or when the user mentions "Serilog", "structured + logging", "log enrichment", "Seq", "LogContext", "UseSerilog", + "WriteTo", "message template", "Serilog.Expressions", "request logging", + "log sink", "rolling file", or "audit log". +--- + +# Serilog + +## Core Principles + +1. **Two-stage initialization** — Create a bootstrap logger for startup, then replace it with the full logger after DI is ready. This captures startup errors that would otherwise be lost. +2. **`AddSerilog()` over `UseSerilog()`** — Use `builder.Services.AddSerilog()` (the modern API) instead of `builder.Host.UseSerilog()`. It integrates with DI services via `ReadFrom.Services(services)`. +3. **Message templates, not interpolation** — `{PropertyName}` syntax creates structured data that can be queried. String interpolation (`$"..."`) breaks structure and allocates even when the log level is disabled. +4. **Configure via appsettings.json** — Keep log levels, sinks, and overrides in configuration so they can change per environment without redeployment. + +## Patterns + +### Two-Stage Bootstrap Setup + +```csharp +using Serilog; + +// Stage 1: Bootstrap logger — captures startup errors before DI +Log.Logger = new LoggerConfiguration() + .MinimumLevel.Override("Microsoft", LogEventLevel.Information) + .Enrich.FromLogContext() + .WriteTo.Console() + .CreateBootstrapLogger(); + +try +{ + Log.Information("Starting application"); + + var builder = WebApplication.CreateBuilder(args); + + // Stage 2: Full logger with DI and configuration + builder.Services.AddSerilog((services, lc) => lc + .ReadFrom.Configuration(builder.Configuration) + .ReadFrom.Services(services) + .Enrich.FromLogContext() + .Enrich.WithMachineName() + .Enrich.WithEnvironmentName() + .Enrich.WithProperty("Application", "MyApp.Api")); + + var app = builder.Build(); + + app.UseSerilogRequestLogging(options => + { + options.EnrichDiagnosticContext = (diagnosticContext, httpContext) => + { + diagnosticContext.Set("RequestHost", httpContext.Request.Host.Value); + diagnosticContext.Set("UserAgent", + httpContext.Request.Headers.UserAgent.ToString()); + }; + }); + + app.Run(); +} +catch (Exception ex) +{ + Log.Fatal(ex, "Application terminated unexpectedly"); +} +finally +{ + await Log.CloseAndFlushAsync(); +} +``` + +### appsettings.json Configuration + +```json +{ + "Serilog": { + "MinimumLevel": { + "Default": "Information", + "Override": { + "Microsoft": "Warning", + "Microsoft.AspNetCore": "Warning", + "Microsoft.EntityFrameworkCore": "Warning", + "Microsoft.Hosting.Lifetime": "Information", + "System": "Warning" + } + }, + "WriteTo": [ + { + "Name": "Console", + "Args": { + "outputTemplate": "[{Timestamp:HH:mm:ss} {Level:u3}] {Message:lj} {Properties:j}{NewLine}{Exception}" + } + }, + { + "Name": "File", + "Args": { + "path": "logs/app-.log", + "rollingInterval": "Day", + "retainedFileCountLimit": 30, + "fileSizeLimitBytes": 104857600 + } + }, + { + "Name": "Seq", + "Args": { "serverUrl": "http://localhost:5341" } + } + ], + "Enrich": ["FromLogContext", "WithMachineName", "WithEnvironmentName"], + "Destructure": [ + { "Name": "ToMaximumDepth", "Args": { "maximumDestructuringDepth": 4 } }, + { "Name": "ToMaximumStringLength", "Args": { "maximumStringLength": 1024 } }, + { "Name": "ToMaximumCollectionCount", "Args": { "maximumCollectionCount": 10 } } + ] + } +} +``` + +Override section uses namespace prefixes matched against `SourceContext`. More specific prefixes take precedence. + +### Request Logging Middleware + +Replaces the multiple per-request log events from ASP.NET Core with a single summary event. + +```csharp +app.UseSerilogRequestLogging(options => +{ + options.MessageTemplate = + "HTTP {RequestMethod} {RequestPath} responded {StatusCode} in {Elapsed:0.0000} ms"; + + options.GetLevel = (httpContext, elapsed, ex) => ex is not null + ? LogEventLevel.Error + : httpContext.Response.StatusCode >= 500 + ? LogEventLevel.Error + : LogEventLevel.Information; + + options.EnrichDiagnosticContext = (diagnosticContext, httpContext) => + { + diagnosticContext.Set("UserId", + httpContext.User.FindFirstValue(ClaimTypes.NameIdentifier) ?? "anonymous"); + }; +}); +``` + +### Structured Logging and Destructuring + +```csharp +// Named properties — creates queryable structured data +logger.LogInformation("Order {OrderId} placed by {CustomerId} for {Total:C}", + orderId, customerId, total); + +// @ operator preserves object structure as properties +logger.LogInformation("Processing {@SensorInput}", sensorInput); +// Output: Processing {"Latitude": 25, "Longitude": 134} + +// $ operator forces ToString() +logger.LogInformation("Received {$Data}", new[] { 1, 2, 3 }); +// Output: Received "System.Int32[]" +``` + +### Scoped Properties with LogContext + +```csharp +using (LogContext.PushProperty("CorrelationId", correlationId)) +using (LogContext.PushProperty("TenantId", tenantId)) +{ + logger.LogInformation("Processing order {OrderId}", orderId); + // CorrelationId and TenantId attached to ALL log events in this scope +} +``` + +Requires `.Enrich.FromLogContext()` on the logger configuration. + +### OpenTelemetry Sink (OTLP Export) + +Export Serilog events directly to any OTLP backend without the OpenTelemetry SDK: + +```csharp +.WriteTo.OpenTelemetry(options => +{ + options.Endpoint = "http://localhost:4317"; + options.Protocol = OtlpProtocol.Grpc; + options.ResourceAttributes = new Dictionary + { + ["service.name"] = "MyApp.Api", + ["deployment.environment"] = "production" + }; +}) +``` + +### Serilog.Expressions for Filtering + +```csharp +// Exclude health check noise +.Filter.ByExcluding("RequestPath like '/health%'") + +// Route errors to a separate file +.WriteTo.Conditional("@l = 'Error'", + wt => wt.File("logs/errors-.log", rollingInterval: RollingInterval.Day)) +``` + +## Anti-patterns + +### Don't Use String Interpolation + +```csharp +// BAD — breaks structured logging, allocates even when level is disabled +logger.LogInformation($"Order {orderId} created for {customerId}"); + +// GOOD — message template with named parameters +logger.LogInformation("Order {OrderId} created for {CustomerId}", orderId, customerId); +``` + +### Don't Skip CloseAndFlush + +```csharp +// BAD — async sinks (Seq, OTLP, Elasticsearch) lose buffered events +app.Run(); + +// GOOD — wrap in try/finally +try { app.Run(); } +catch (Exception ex) { Log.Fatal(ex, "Unhandled exception"); } +finally { await Log.CloseAndFlushAsync(); } +``` + +### Don't Log Sensitive Data + +```csharp +// BAD — passwords and tokens in logs +logger.LogInformation("Login: {Email} with password {Password}", email, password); + +// GOOD — never log secrets, passwords, tokens, or PII +logger.LogInformation("Login: {Email}", email); +``` + +### Don't Destructure Without Limits + +```csharp +// BAD — large object graphs cause memory issues and massive log entries +logger.LogInformation("Request: {@Request}", httpContext.Request); + +// GOOD — configure destructuring limits +.Destructure.ToMaximumDepth(4) +.Destructure.ToMaximumStringLength(1024) +.Destructure.ToMaximumCollectionCount(10) + +// BETTER — destructure to specific properties +.Destructure.ByTransforming(r => new { r.Method, r.Path }) +``` + +### Don't Use the Deprecated Elasticsearch Sink + +```csharp +// BAD — Serilog.Sinks.Elasticsearch is deprecated +.WriteTo.Elasticsearch(...) + +// GOOD — use the official Elastic sink with ECS formatting +// Package: Elastic.Serilog.Sinks +.WriteTo.Elasticsearch(...) +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Application logging | Serilog with `AddSerilog()` and appsettings.json | +| Log storage (development) | Seq (free single-user) or Aspire Dashboard | +| Log storage (production) | Seq, Elasticsearch (Elastic sink), or OTLP backend | +| Request logging | `UseSerilogRequestLogging()` (replaces per-request noise) | +| Scoped properties | `LogContext.PushProperty()` in middleware | +| Log filtering | `Serilog.Expressions` for expression-based filtering | +| High-performance paths | `[LoggerMessage]` source generator | +| Audit trails | `AuditTo` (synchronous, exceptions propagate) | +| Log levels by environment | `MinimumLevel.Override` per namespace in appsettings | +| OpenTelemetry integration | `Serilog.Sinks.OpenTelemetry` (no SDK dependency) | diff --git a/.opencode/skills/session-management/SKILL.md b/.opencode/skills/session-management/SKILL.md new file mode 100644 index 00000000..b7341aa7 --- /dev/null +++ b/.opencode/skills/session-management/SKILL.md @@ -0,0 +1,351 @@ +--- +name: session-management +description: > + End-to-end session lifecycle management for .NET projects. Handles session start + (load handoff, MEMORY.md, instincts, detect .NET solution), session end (capture + completed work, persist learnings, write handoff), and context preservation across + sessions. Load this skill when starting a new session, ending a session, when the + user says "new session", "pick up where we left off", "what were we working on", + "session start", "session end", "handoff", "context", "resume", or when Claude + needs to bootstrap itself in an unfamiliar project. +--- + +# Session Management + +## Core Principles + +1. **Sessions start with context, not from scratch** — Every session begins by loading three files: `.claude/handoff.md` (pending work), `MEMORY.md` (permanent rules), and `.claude/instincts.md` (learned patterns). Then detect the .NET solution so MCP tools are connected. A session that starts blind wastes the first 10 minutes re-discovering what was already known. + +2. **Sessions end with capture, never abruptly** — When a session ends, three things are captured: what was DONE, what is PENDING, and what was LEARNED. This is non-negotiable. Context lost between sessions is context the user must re-provide, which wastes their time. + +3. **Context preservation is a chain** — Handoff files pass state session-to-session. MEMORY.md accumulates permanent rules. Instincts track emerging patterns. Git commits preserve code state. Together, these four mechanisms create continuity that no single mechanism can provide alone. + +4. **Solution detection enables tooling** — .NET MCP tools (`get_diagnostics`, `find_symbol`, `get_project_graph`) require a loaded solution. Detecting the `.slnx`/`.sln` file on session start ensures these tools are available from the first prompt, not discovered mid-conversation. + +5. **Graceful degradation over hard failure** — If no handoff file exists, start clean. If no MEMORY.md exists, offer to create one on first learning. If no solution file is found, work without MCP tools. Never block a session because a context file is missing. + +## Patterns + +### Session Start Protocol + +Execute this sequence at the beginning of every session: + +``` +STEP 1: Load Handoff + → Check for .claude/handoff.md + → If found: read and summarize pending work + → If not found: note "No handoff file — starting fresh" + +STEP 2: Load Memory + → Check for MEMORY.md (project root or .claude/) + → If found: scan for rules relevant to the likely task + → If not found: note "No memory file — will create on first learning" + +STEP 3: Load Instincts + → Check for .claude/instincts.md + → If found: load instincts at 0.7+ into active context + → If not found: note "No instincts file — will create on first observation" + +STEP 4: Detect .NET Solution + → Search for .slnx files in current directory + → If not found, search for .sln files + → If not found, search parent directories (up to 3 levels) + → If not found, search child directories (1 level) + → If found: confirm MCP tools are connected + → If not found: warn "No solution detected — MCP tools unavailable" + +STEP 5: Present Summary + "Session context loaded: + - Last session: [summary from handoff or 'no previous session'] + - Pending tasks: [list from handoff or 'none'] + - Active rules: [count from MEMORY.md] + - Active instincts: [count at 0.7+] + - Solution: [solution name and path, or 'not detected'] + Ready to continue. What would you like to work on?" +``` + +### Session End Protocol + +Execute this sequence when the session is ending: + +``` +STEP 1: Review Accomplishments + → List everything completed this session with file paths + → Include line numbers for significant changes + +STEP 2: Check for Uncommitted Changes + → Run git status + → If uncommitted changes exist: + "You have uncommitted changes. Want me to commit before wrapping up?" + → If clean: note "All changes committed" + +STEP 3: Write Handoff + → Write .claude/handoff.md using the Handoff File Template (see below) + +STEP 4: Extract Learnings + → Review session for corrections from the user + → Generalize corrections into rules (via self-correction-loop) + → Write to MEMORY.md under appropriate category + +STEP 5: Update Instincts + → Review any new patterns observed during the session + → Update confidence scores in .claude/instincts.md + → Promote any instincts that reached 0.9 (via instinct-system) + +STEP 6: Confirm + "Session wrapped up: + - Handoff written to .claude/handoff.md + - [N] learnings added to MEMORY.md + - [N] instincts updated + Next session will pick up right where we left off." +``` + +### Solution Detection Strategy + +Find the .NET solution for MCP tool connectivity: + +``` +SEARCH ORDER: +1. Current directory: *.slnx, *.sln +2. Parent directory: *.slnx, *.sln (common in src/ subdirectory layouts) +3. Grandparent directory: *.slnx, *.sln (up to 3 levels) +4. Child directories: */**.slnx, */**.sln (1 level deep) + +PREFERENCE: +- .slnx over .sln (modern format) +- If multiple solutions found, prefer the one matching the directory name +- If still ambiguous, list all and ask the user + +AFTER DETECTION: +- Confirm MCP connection by running get_project_graph +- If MCP returns "loading", wait briefly and retry (solution may be initializing) +- Cache the solution path for the session — don't re-detect on every tool call +``` + +### Context Preservation Architecture + +Four mechanisms work together to prevent context loss: + +``` +FILE SCOPE LIFETIME PURPOSE +.claude/handoff.md Session Overwritten Pass state between sessions +MEMORY.md Project Permanent Store confirmed rules +.claude/instincts.md Project Evolving Track emerging patterns +Git commits Code Permanent Preserve code state + +FLOW: + Session N ends → writes handoff.md, updates MEMORY.md, updates instincts.md + Session N+1 starts → reads handoff.md, MEMORY.md, instincts.md + Result: zero context loss between sessions +``` + +### Handoff File Template + +The standard format for `.claude/handoff.md`: + +```markdown +# Session Handoff + +> Generated: 2025-07-15 | Branch: feature/order-validation + +## Completed +- [x] Added FluentValidation to CreateOrder command + - File: `src/Orders/Features/CreateOrder.cs` (lines 15-35) + - Validator: non-empty CustomerId, at least 1 item, positive quantities +- [x] Fixed N+1 query in GetOrderDetails + - File: `src/Orders/Features/GetOrderDetails.cs` (line 28) + - Added `.Include(o => o.Items)` to the query + +## Pending +- [ ] Add validation to UpdateOrder command (same pattern as CreateOrder) + - Start from: `src/Orders/Features/UpdateOrder.cs` + - Reference: CreateOrder validator for the established pattern +- [ ] Run full test suite — last run had 2 unrelated failures in Catalog module + +## Learned +- FluentValidation validators must be registered in the module's DI setup +- The N+1 in GetOrderDetails was hidden because test data seeds only 1 item per order + +## Context +- Branch: feature/order-validation +- Last commit: "Add CreateOrder validation + tests" +- Uncommitted changes: no +- Solution: src/MyApp.slnx +``` + +### Resuming from Handoff + +When a handoff file exists, present a clear summary and let the user decide: + +``` +SESSION RESUME FLOW: +1. Read .claude/handoff.md +2. Summarize concisely: + "Last session (2025-07-15) on branch feature/order-validation: + - Completed: CreateOrder validation, N+1 fix in GetOrderDetails + - Pending: UpdateOrder validation, test suite failures + Shall I continue with the UpdateOrder validation?" +3. Wait for user direction — never auto-start pending work +4. If the user wants something different, acknowledge and proceed + "Got it, setting aside the pending tasks. What would you like to work on?" +``` + +### First Session Bootstrap + +When no context files exist (brand new project or first Claude session): + +``` +BOOTSTRAP PROTOCOL: +1. No handoff.md → "No previous session found. Starting fresh." +2. No MEMORY.md → "No project memory found. I'll create one when we discover + project-specific rules." +3. No instincts.md → "No instincts file. I'll start tracking patterns + as we work together." +4. Detect solution → Run full detection, report findings +5. Offer convention scan: + "This is our first session. Want me to scan the codebase to learn + the project's conventions? (Uses the convention-learner skill)" +``` + +### Multi-Developer Handoff + +When the handoff might be read by a different developer or Claude session: + +``` +ENHANCED HANDOFF (add when multiple developers use Claude on the project): + +## Decisions Made +| Decision | Choice | Rationale | +|----------|--------|-----------| +| Validation library | FluentValidation | Already used in Catalog module | +| Error format | ProblemDetails | RFC 7807, consistent with API standard | + +## Open Questions +- Should we validate product existence at command level or handler level? + - Current: handler level (after DB lookup) + - Trade-off: invalid product IDs return 500 instead of 400 + +## Dependencies +- Requires FluentValidation 11.x NuGet package (already referenced) +- No new infrastructure dependencies +``` + +## Anti-patterns + +### Starting Blind + +``` +# BAD — ignoring existing context +*Session starts* +Claude: "How can I help you today?" +*User: "Continue the order validation work"* +Claude: "What order validation work? Can you tell me what files to look at?" +*User wastes 5 minutes re-explaining* + +# GOOD — loading context automatically +*Session starts* +Claude: "Last session completed CreateOrder validation. Pending: UpdateOrder + validation (start from src/Orders/Features/UpdateOrder.cs). + Shall I continue with that?" +``` + +### Ending Without Capture + +``` +# BAD — session ends, context evaporates +User: "Thanks, that's all for today!" +Claude: "You're welcome! Happy to help anytime." +*Next session starts from scratch* + +# GOOD — capture before closing +User: "Thanks, that's all for today!" +Claude: "Before we wrap up, let me save our progress to .claude/handoff.md + so the next session picks up right where we left off." +*Writes handoff, extracts learnings, updates instincts* +``` + +### Overwriting Without Consent + +``` +# BAD — silently overwriting an existing handoff with different pending tasks +*Previous handoff has 3 pending tasks from another developer* +*This session worked on something different* +*Overwrites handoff, losing the other developer's pending tasks* + +# GOOD — check before overwriting +"There's an existing handoff from [date] with pending tasks: + - [task 1], [task 2], [task 3] + These appear unrelated to our session. Should I: + a) Merge our session with existing pending tasks + b) Overwrite (their tasks are done or no longer relevant) + c) Skip writing handoff this time" +``` + +### Skipping Solution Detection + +``` +# BAD — trying to use MCP tools without a loaded solution +Claude: "Let me check diagnostics..." *tool fails* +Claude: "Let me find the symbol..." *tool fails* +Claude: "I'll just read the files manually" *misses project-wide context* + +# GOOD — detect solution on start, verify MCP connectivity +*Session start* +Claude: "Detected solution at src/MyApp.slnx. MCP tools connected. + Project graph shows 5 projects with 3 test projects." +*All MCP tools work throughout the session* +``` + +### Bloated Handoffs + +``` +# BAD — handoff file is 500 lines with every detail +## Completed +- Changed line 15 in file A from X to Y because Z and also considered W... +*So long that the next session's context window is wasted on the handoff* + +# GOOD — concise, actionable handoff +## Completed +- [x] Added CreateOrder validation (src/Orders/Features/CreateOrder.cs:15-35) +*Reference the diff or commit for full details, don't duplicate them* +``` + +### Context File Sprawl + +``` +# BAD — multiple context files with overlapping purposes +.claude/ + handoff.md + handoff-backup.md + session-notes-july.md + session-notes-august.md + todo.md + context.md +*6 files, unclear which is authoritative* + +# GOOD — exactly 3 context files, each with a clear purpose +.claude/ + handoff.md ← session-to-session state (overwritten each time) + instincts.md ← emerging patterns (evolving) +MEMORY.md ← permanent rules (append-only, audited) +``` + +## Decision Guide + +| Scenario | Action | +|----------|--------| +| Starting a new session | Run full Session Start Protocol (5 steps) | +| User says "wrap up" / "done" / "that's all" | Run full Session End Protocol (6 steps) | +| No handoff.md exists | Start clean, create on first session end | +| No MEMORY.md exists | Offer to create on first correction or learning | +| No solution file found | Warn user, work without MCP tools, suggest creating one | +| Multiple solution files found | List all, ask user which to use | +| Handoff has pending tasks from another dev | Ask before overwriting: merge, overwrite, or skip | +| User wants to resume pending work | Summarize and confirm before starting | +| User wants something different from handoff | Acknowledge, proceed with new task, update handoff at end | +| Session had user corrections | Extract to MEMORY.md before ending | +| Session discovered new patterns | Update instincts.md before ending | +| First-ever session on a project | Run bootstrap protocol, offer convention scan | +| Solution is still loading (MCP returns "loading") | Wait 5 seconds, retry once, then proceed without MCP | +| Mid-session context getting large | Offload research to subagents, keep main context focused | +| User asks "what were we working on?" | Read handoff.md and summarize | diff --git a/.opencode/skills/split-memory/SKILL.md b/.opencode/skills/split-memory/SKILL.md new file mode 100644 index 00000000..c476e00d --- /dev/null +++ b/.opencode/skills/split-memory/SKILL.md @@ -0,0 +1,242 @@ +--- +name: split-memory +description: > + Modular CLAUDE.md management strategy for projects that outgrow a single + instruction file. Covers when and how to split a monolithic CLAUDE.md into + multiple files, organizing by concern, module, or team. Includes precedence + rules to prevent conflicting instructions. Load this skill when CLAUDE.md + exceeds 300 lines, when multiple teams need different instructions, when + the user mentions "split CLAUDE.md", "modular instructions", "too long", + "organize instructions", or "multiple CLAUDE files". +--- + +# Split Memory: Modular CLAUDE.md Strategy + +## Core Principles + +1. **Start monolithic, split when it hurts** — A single CLAUDE.md is simpler to maintain, easier to understand, and has no conflict risk. Only split when the file exceeds ~300 lines, when multiple teams need different instructions, or when finding the right rule takes too long. + +2. **Root CLAUDE.md is the index** — After splitting, the root CLAUDE.md becomes a concise index that points to detailed files. It contains only universal rules and references. Think of it as a table of contents, not the full book. + +3. **Claude auto-discovers `.claude/` files** — Claude Code automatically reads files in the `.claude/` directory. Use this to your advantage: place instruction files where Claude will find them without explicit loading directives. + +4. **No conflicting instructions across files** — When instructions span multiple files, contradictions cause unpredictable behavior. Establish clear precedence: root CLAUDE.md > module-level > team-level. Never define the same rule in two places. + +5. **Split by a single axis** — Split by concern (architecture, testing, API) OR by module (Orders, Catalog, Identity) OR by team. Never mix axes — it creates overlapping ownership and conflicting rules. + +## Patterns + +### Pattern 1: Single File (Default) + +For most projects, one CLAUDE.md is sufficient: + +``` +project-root/ +├── CLAUDE.md # Everything in one file (under 300 lines) +├── src/ +└── tests/ +``` + +**When this works:** +- Single team, single architecture +- Under 300 lines of instructions +- Rules are easy to find with Ctrl+F + +**When to move on:** +- File exceeds 300 lines +- You spend time scrolling to find rules +- Multiple concerns compete for space (architecture, testing, deployment, conventions) + +### Pattern 2: Split by Concern + +Group instructions by domain (architecture, testing, deployment, etc.): + +``` +project-root/ +├── CLAUDE.md # Index + universal rules (~50 lines) +├── .claude/ +│ └── instructions/ +│ ├── architecture.md # Architecture patterns, module boundaries +│ ├── coding-standards.md # C# conventions, naming, formatting +│ ├── testing.md # Test strategy, fixtures, conventions +│ ├── api-design.md # Endpoint patterns, versioning, auth +│ ├── data-access.md # EF Core patterns, migrations +│ └── deployment.md # Docker, CI/CD, environments +``` + +Root CLAUDE.md becomes an index: + +```markdown +# [Project Name] + +## Universal Rules +- .NET 10 / C# 14 — use modern language features everywhere +- TimeProvider over DateTime.Now — always +- No repository pattern over EF Core + +## Detailed Instructions +See `.claude/instructions/` for topic-specific guidance: +- `architecture.md` — Project structure, module boundaries, patterns +- `coding-standards.md` — C# conventions, naming, formatting rules +- `testing.md` — Test strategy, fixtures, what to test and how +- `api-design.md` — Endpoint patterns, versioning, authentication +- `data-access.md` — EF Core usage, query patterns, migrations +- `deployment.md` — Docker, CI/CD pipeline, environment config +``` + +### Pattern 3: Split by Module + +For modular monoliths or large solutions, place instructions near the code they govern: + +``` +project-root/ +├── CLAUDE.md # Index + cross-cutting rules +├── src/ +│ ├── Modules/ +│ │ ├── Orders/ +│ │ │ ├── CLAUDE.md # Orders-specific patterns and rules +│ │ │ └── ... +│ │ ├── Catalog/ +│ │ │ ├── CLAUDE.md # Catalog-specific patterns and rules +│ │ │ └── ... +│ │ └── Identity/ +│ │ ├── CLAUDE.md # Identity-specific patterns and rules +│ │ └── ... +│ └── Shared/ +│ └── CLAUDE.md # Shared kernel rules +``` + +Module CLAUDE.md example: + +```markdown +# Orders Module + +## Architecture +This module uses Vertical Slice Architecture. Each feature is one file under Features/. + +## Domain Rules +- OrderId is a strongly-typed ID (not raw Guid) +- All monetary values use decimal, never double +- Order state transitions: Draft → Confirmed → Shipped → Delivered → Cancelled + +## Integration Events Published +- OrderCreated, OrderConfirmed, OrderShipped, OrderCancelled + +## Integration Events Consumed +- ProductPriceChanged (from Catalog), PaymentCompleted (from Billing) +``` + +### Pattern 4: Split by Team + +Place team-specific files in `.claude/teams/` (e.g., `backend.md`, `frontend.md`, `platform.md`). Root CLAUDE.md holds shared standards. Each team file covers only that team's conventions. + +### Pattern 5: Conditional Loading + +In root CLAUDE.md, add a "Load When Working On..." section that maps task domains to instruction files (e.g., "**API endpoints** → See `.claude/instructions/api-design.md`"). Universal rules stay in an "Always Loaded" section. + +### Precedence Rules + +When instructions exist in multiple files, apply this precedence: + +``` +HIGHEST PRIORITY: +1. Root CLAUDE.md — universal rules override everything +2. .claude/instructions/*.md — concern-specific rules +3. Module-level CLAUDE.md (src/Modules/X/CLAUDE.md) — module-specific rules +LOWEST PRIORITY: +4. Team-level files (.claude/teams/*.md) — team conventions + +CONFLICT RESOLUTION: +- If root says "use TimeProvider" and module says "use DateTime.Now" + → Root wins. Module file is wrong and should be fixed. +- If root is silent on a topic and module defines a rule + → Module rule applies within its scope. +- If two module files contradict each other + → Each applies only within its own module. No cross-module conflicts. +``` + +## Anti-patterns + +### Premature Splitting + +Do not split a sub-300-line CLAUDE.md into multiple files. The maintenance overhead of 6 tiny files exceeds the benefit. Keep it monolithic until finding rules becomes painful. + +### Conflicting Cross-File Instructions + +``` +// BAD — same topic defined differently in two files +# .claude/instructions/api-design.md +"Use Results.Ok() for all endpoint return types" + +# .claude/instructions/coding-standards.md +"Use TypedResults.Ok() for all endpoint return types" +*Claude gets contradictory instructions. Behavior is unpredictable.* + +// GOOD — one owner per topic +# .claude/instructions/api-design.md +"Use TypedResults.Ok() for all endpoint return types — provides OpenAPI metadata" + +# .claude/instructions/coding-standards.md +(no mention of API return types — that's api-design.md's domain) +``` + +### Split Without an Index + +``` +// BAD — files scattered without a map +project/ +├── CLAUDE.md (doesn't mention the other files) +├── .claude/ +│ └── instructions/ +│ ├── architecture.md +│ ├── testing.md +│ └── data-access.md +*Claude may not know these files exist or how they relate* + +// GOOD — root CLAUDE.md is the table of contents +project/ +├── CLAUDE.md (lists all instruction files and their scope) +├── .claude/ +│ └── instructions/ +│ ├── architecture.md +│ ├── testing.md +│ └── data-access.md +``` + +### Mixing Split Axes + +``` +// BAD — split by concern AND by module simultaneously +.claude/ +├── instructions/ +│ ├── architecture.md # talks about Orders module +│ └── testing.md # also talks about Orders module +├── modules/ +│ └── orders/ +│ └── instructions.md # also talks about architecture and testing +*Three files all have opinions about Orders testing. Who wins?* + +// GOOD — pick one axis +# Option A: Split by concern (if cross-cutting rules dominate) +.claude/instructions/architecture.md +.claude/instructions/testing.md + +# Option B: Split by module (if module-specific rules dominate) +src/Modules/Orders/CLAUDE.md +src/Modules/Catalog/CLAUDE.md +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| CLAUDE.md under 300 lines | Keep it as a single file | +| CLAUDE.md over 300 lines, single team | Split by concern into `.claude/instructions/` | +| Modular monolith with module-specific rules | Split by module with per-module CLAUDE.md | +| Multiple teams, different conventions | Split by team into `.claude/teams/` | +| Just started the project | Single CLAUDE.md — split later when needed | +| Rules are hard to find | Time to split — group by the most common lookup pattern | +| Two files contradict each other | Fix immediately — one owner per topic, clear precedence | +| Want to split by concern AND module | Pick one axis — the one that reduces conflicts most | +| Team member asks "where's the rule for X?" | If the answer isn't obvious in 5 seconds, reorganize | +| New module added to the system | Add a module-level CLAUDE.md only if it has unique rules | diff --git a/.opencode/skills/testing/SKILL.md b/.opencode/skills/testing/SKILL.md new file mode 100644 index 00000000..9867196f --- /dev/null +++ b/.opencode/skills/testing/SKILL.md @@ -0,0 +1,334 @@ +--- +name: testing +description: > + Testing strategy for .NET 10 applications. Covers xUnit v3, WebApplicationFactory + for integration tests, Testcontainers for real database testing, Verify for + snapshot testing, and the AAA pattern. + Load this skill when writing tests, setting up test infrastructure, reviewing + test coverage, or when the user mentions "test", "xUnit", "WebApplicationFactory", + "Testcontainers", "integration test", "unit test", "bUnit", "snapshot test", + "Verify", "test coverage", "AAA pattern", "WireMock", or "FakeTimeProvider". +--- + +# Testing (.NET 10) + +## Core Principles + +1. **Integration tests are the highest-value tests** — A single `WebApplicationFactory` test covers routing, binding, validation, business logic, and persistence in one shot. Start here before writing unit tests. +2. **Real databases in tests** — Use Testcontainers to spin up real PostgreSQL/SQL Server instances. In-memory providers hide real bugs (transactions, constraints, SQL generation). +3. **AAA pattern is mandatory** — Every test has three clearly separated sections: Arrange, Act, Assert. No mixing. +4. **Test behavior, not implementation** — Tests should survive refactoring. Test what the system does, not how it does it. + +## Patterns + +### xUnit v3 Basics + +```csharp +public class OrderServiceTests +{ + [Fact] + public async Task CreateOrder_WithValidItems_ReturnsSuccessResult() + { + // Arrange + var db = CreateInMemoryDb(); + var clock = new FakeTimeProvider(new DateTimeOffset(2025, 1, 15, 0, 0, 0, TimeSpan.Zero)); + var service = new OrderService(db, clock); + var request = new CreateOrderRequest("customer-1", [new("product-1", 2)]); + + // Act + var result = await service.CreateAsync(request); + + // Assert + Assert.True(result.IsSuccess); + Assert.NotEqual(Guid.Empty, result.Value.Id); + Assert.Equal(clock.GetUtcNow(), result.Value.CreatedAt); + } + + [Theory] + [InlineData("")] + [InlineData(null)] + public async Task CreateOrder_WithInvalidCustomerId_ReturnsFailure(string? customerId) + { + // Arrange + var service = CreateService(); + + // Act + var result = await service.CreateAsync(new CreateOrderRequest(customerId!, [])); + + // Assert + Assert.False(result.IsSuccess); + } +} +``` + +### Integration Tests with WebApplicationFactory + +The highest-value test pattern. Tests the full HTTP pipeline. + +```csharp +// Fixtures/ApiFixture.cs +public class ApiFixture : WebApplicationFactory, IAsyncLifetime +{ + private readonly PostgreSqlContainer _postgres = new PostgreSqlBuilder() + .WithImage("postgres:17") + .Build(); + + protected override void ConfigureWebHost(IWebHostBuilder builder) + { + builder.ConfigureServices(services => + { + // Replace the real DB with Testcontainers + services.RemoveAll>(); + services.AddDbContext(options => + options.UseNpgsql(_postgres.GetConnectionString())); + }); + } + + public async Task InitializeAsync() + { + await _postgres.StartAsync(); + + // Apply migrations + using var scope = Services.CreateScope(); + var db = scope.ServiceProvider.GetRequiredService(); + await db.Database.MigrateAsync(); + } + + public new async Task DisposeAsync() + { + await _postgres.DisposeAsync(); + await base.DisposeAsync(); + } +} +``` + +```csharp +// Tests/Orders/CreateOrderTests.cs +public class CreateOrderTests(ApiFixture fixture) : IClassFixture +{ + private readonly HttpClient _client = fixture.CreateClient(); + + [Fact] + public async Task CreateOrder_ReturnsCreated_WithValidRequest() + { + // Arrange + var request = new CreateOrderRequest("customer-1", [new("product-1", 2)]); + + // Act + var response = await _client.PostAsJsonAsync("/api/orders", request); + + // Assert + Assert.Equal(HttpStatusCode.Created, response.StatusCode); + + var order = await response.Content.ReadFromJsonAsync(); + Assert.NotNull(order); + Assert.NotEqual(Guid.Empty, order.Id); + Assert.Contains("/api/orders/", response.Headers.Location?.ToString()); + } + + [Fact] + public async Task CreateOrder_ReturnsValidationProblem_WithEmptyItems() + { + // Arrange + var request = new CreateOrderRequest("customer-1", []); + + // Act + var response = await _client.PostAsJsonAsync("/api/orders", request); + + // Assert + Assert.Equal(HttpStatusCode.BadRequest, response.StatusCode); + } +} +``` + +### Testcontainers for Real Database Testing + +```csharp +// For SQL Server +private readonly MsSqlContainer _mssql = new MsSqlBuilder() + .WithImage("mcr.microsoft.com/mssql/server:2022-latest") + .Build(); + +// For PostgreSQL +private readonly PostgreSqlContainer _postgres = new PostgreSqlBuilder() + .WithImage("postgres:17") + .Build(); + +// For Redis +private readonly RedisContainer _redis = new RedisBuilder() + .WithImage("redis:7") + .Build(); +``` + +### Verify Snapshot Testing + +Use Verify for complex response objects where manual assertions would be fragile. + +```csharp +[Fact] +public async Task GetOrder_MatchesSnapshot() +{ + // Arrange + await SeedOrder(fixture); + + // Act + var response = await _client.GetAsync("/api/orders/known-id"); + var content = await response.Content.ReadAsStringAsync(); + + // Assert — compares against a stored .verified.txt file + await Verify(content); +} +``` + +On first run, Verify creates a `.verified.txt` file. On subsequent runs, it compares output. If the output changes, the test fails and shows a diff. + +### Test Data Builders + +```csharp +public class OrderBuilder +{ + private string _customerId = "default-customer"; + private List _items = [new("product-1", 1, 9.99m)]; + private OrderStatus _status = OrderStatus.Pending; + + public OrderBuilder WithCustomer(string customerId) + { + _customerId = customerId; + return this; + } + + public OrderBuilder WithItems(params OrderItem[] items) + { + _items = [..items]; + return this; + } + + public OrderBuilder WithStatus(OrderStatus status) + { + _status = status; + return this; + } + + public Order Build() => Order.Create(_customerId, _items, _status); +} + +// Usage in tests +var order = new OrderBuilder() + .WithCustomer("vip-customer") + .WithStatus(OrderStatus.Confirmed) + .Build(); +``` + +### Testing Time-Dependent Code + +Use `TimeProvider` (built into .NET 8+) and `FakeTimeProvider` from `Microsoft.Extensions.TimeProvider.Testing`. + +```csharp +[Fact] +public async Task ExpireOrders_MarksOldPendingOrdersAsExpired() +{ + // Arrange + var clock = new FakeTimeProvider(new DateTimeOffset(2025, 6, 1, 0, 0, 0, TimeSpan.Zero)); + var db = CreateDb(); + var order = Order.Create("customer-1", items, clock.GetUtcNow()); + db.Orders.Add(order); + await db.SaveChangesAsync(); + + // Advance time past expiry threshold + clock.Advance(TimeSpan.FromDays(31)); + + var handler = new ExpireOrders.Handler(db, clock); + + // Act + await handler.Handle(new ExpireOrders.Command(), CancellationToken.None); + + // Assert + var updated = await db.Orders.FindAsync(order.Id); + Assert.Equal(OrderStatus.Expired, updated!.Status); +} +``` + +### Test Naming Convention + +Use the pattern: `MethodName_StateUnderTest_ExpectedBehavior` + +```csharp +[Fact] public async Task CreateOrder_WithValidItems_ReturnsSuccessResult() { } +[Fact] public async Task CreateOrder_WithEmptyItems_ReturnsValidationError() { } +[Fact] public async Task GetOrder_WithNonExistentId_ReturnsNotFound() { } +[Fact] public async Task CancelOrder_WhenAlreadyShipped_ReturnsConflict() { } +``` + +## Anti-patterns + +### Don't Use In-Memory Database for Integration Tests + +```csharp +// BAD — hides real SQL behavior, transactions, constraints +services.AddDbContext(options => + options.UseInMemoryDatabase("TestDb")); + +// GOOD — Testcontainers with real database +services.AddDbContext(options => + options.UseNpgsql(testContainer.GetConnectionString())); +``` + +### Don't Test Implementation Details + +```csharp +// BAD — testing that a specific repository method was called +mock.Verify(x => x.AddAsync(It.IsAny()), Times.Once); +mock.Verify(x => x.SaveChangesAsync(), Times.Once); + +// GOOD — test the observable outcome +var order = await db.Orders.FindAsync(orderId); +Assert.NotNull(order); +Assert.Equal(OrderStatus.Created, order.Status); +``` + +### Don't Share Mutable State Between Tests + +```csharp +// BAD — static shared state +private static readonly AppDbContext SharedDb = CreateDb(); + +// GOOD — fresh state per test (or use IAsyncLifetime for shared fixtures) +private AppDbContext CreateDb() => new(new DbContextOptionsBuilder()...); +``` + +### Don't Write Assertion-Free Tests + +```csharp +// BAD — no assertion, only checks it doesn't throw +[Fact] +public async Task CreateOrder_Works() +{ + await service.CreateAsync(request); + // "it didn't throw, so it works!" — NO +} + +// GOOD — assert the expected outcome +[Fact] +public async Task CreateOrder_PersistsOrderToDatabase() +{ + var result = await service.CreateAsync(request); + + var persisted = await db.Orders.FindAsync(result.Value.Id); + Assert.NotNull(persisted); + Assert.Equal(request.CustomerId, persisted.CustomerId); +} +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Testing an API endpoint | `WebApplicationFactory` integration test | +| Testing business logic in isolation | Unit test with fakes/stubs | +| Database-dependent tests | Testcontainers (real DB) | +| Complex response validation | Verify snapshot testing | +| Time-dependent logic | `FakeTimeProvider` | +| External API dependency | `WireMock.Net` or `HttpMessageHandler` stub | +| Parameterized test cases | `[Theory]` with `[InlineData]` or `[MemberData]` | +| Test data setup | Builder pattern | +| Shared expensive fixture | `IClassFixture` with `IAsyncLifetime` | diff --git a/.opencode/skills/verification-loop/SKILL.md b/.opencode/skills/verification-loop/SKILL.md new file mode 100644 index 00000000..2c62832c --- /dev/null +++ b/.opencode/skills/verification-loop/SKILL.md @@ -0,0 +1,265 @@ +--- +name: verification-loop +description: > + 7-phase .NET verification pipeline with structured PASS/FAIL reporting. + Ensures every change is build-verified, diagnostics-clean, anti-pattern-free, + test-passing, security-scanned, format-compliant, and diff-reviewed before + marking work as complete. Load this skill when: "verify", "check everything", + "run verification", "pre-PR check", "is this ready", "validate changes", + "build and test", "quality gate", "pipeline check", "ready to merge". +--- + +# Verification Loop + +## Core Principles + +1. **Build is the minimum bar** — Never mark a task complete without a green `dotnet build`. If it doesn't compile, nothing else matters. A broken build wastes every subsequent phase's time. + +2. **Automated checks before manual review** — Tools catch what humans miss. `get_diagnostics` finds nullability issues, `detect_antipatterns` catches `DateTime.Now` usage, and `dotnet test` proves behavior. Run all of these before eyeballing code. + +3. **Short-circuit on critical failures** — If the build fails, don't run tests. If tests fail, don't run formatting checks. Failing fast saves time and keeps the feedback loop tight. Fix the most fundamental issue first. + +4. **Structured reporting** — Every phase gets an explicit PASS, FAIL, or WARN status with details. No ambiguity. "It looks fine" is not a verification result. A table with statuses is. + +5. **Verification is iterative** — A single pass rarely produces all-green. Fix the first failure, re-run from that phase, repeat until clean. The loop is the point. + +## Patterns + +### 7-Phase Verification Pipeline + +Execute phases in order. Short-circuit on CRITICAL failures (Phase 1 and Phase 4). + +**Phase 1: Build (CRITICAL)** +``` +dotnet build --no-restore +``` +Status: PASS (0 errors) / FAIL (N errors) +Short-circuit: If FAIL, stop all subsequent phases. Fix build errors first. +Note: Capture warning count even on PASS — new warnings are tracked in Phase 2. + +**Phase 2: Diagnostics** +``` +MCP: get_diagnostics(scope: "file", path: each changed file) +MCP: get_diagnostics(scope: "project", path: changed project) +``` +Status: PASS (0 new warnings) / WARN (N new warnings) / FAIL (new errors) +Check: Compare against baseline — only flag NEW warnings introduced by the current changes. +Common findings: CS8600 (null reference), CS8602 (dereference of nullable), CS0219 (unused variable). + +**Phase 3: Anti-pattern Scan** +``` +MCP: detect_antipatterns(file: each changed file) +``` +Status: PASS (0 anti-patterns) / WARN (N anti-patterns found) +Catches: async void, sync-over-async, `new HttpClient()`, `DateTime.Now`, broad catch, +logging string interpolation, missing CancellationToken, EF queries without AsNoTracking. + +**Phase 4: Tests (CRITICAL)** +``` +dotnet test --no-build +``` +Status: PASS (all green) / FAIL (N failures) +Short-circuit: If FAIL, stop remaining phases. Fix failing tests before proceeding. +Note: If no test project exists, status is SKIP with a recommendation to add tests. + +**Phase 5: Security Scan** +``` +dotnet list package --vulnerable --include-transitive +``` +Scan changed files for: hardcoded secrets, connection strings, API keys. +Check: OWASP patterns in new code (SQL injection, XSS, missing auth attributes). +Status: PASS / WARN (medium/low findings) / FAIL (critical/high vulnerabilities) + +**Phase 6: Format Compliance** +``` +dotnet format --verify-no-changes +``` +Status: PASS (no changes needed) / FAIL (formatting violations found) +Fix: Run `dotnet format` and include formatting changes in the commit. + +**Phase 7: Diff Review** +``` +git diff --stat +git diff +``` +Status: PASS (changes match intent) / WARN (unexpected files changed) +Check: +- No accidental files (`.vs/`, `bin/`, `obj/`, `.env`, secrets) +- No unrelated changes mixed in +- Commit scope matches the task description +- No debug code (`Console.WriteLine`, `#if DEBUG` blocks in production paths) + +### Structured Report Format + +After running all phases, produce a verification report: + +```markdown +## Verification Report + +| Phase | Status | Details | +|-------|--------|---------| +| 1. Build | PASS | 0 errors, 2 warnings (pre-existing) | +| 2. Diagnostics | WARN | CS8600 in OrderService.cs:47 — possible null reference | +| 3. Anti-patterns | PASS | 0 anti-patterns in changed files | +| 4. Tests | PASS | 42 passed, 0 failed, 0 skipped | +| 5. Security | PASS | No vulnerable packages, no secrets detected | +| 6. Format | PASS | No formatting violations | +| 7. Diff Review | PASS | 3 files changed, all match task scope | + +**Overall: PASS (with warnings)** + +### Action Items +- [ ] Fix CS8600 in OrderService.cs:47 — add null check or use null-forgiving operator with justification +``` + +### Fix-and-Retry Loop + +When a phase fails, follow this exact sequence: + +``` +1. IDENTIFY — Which phase failed? What's the specific error? +2. FIX — Make the minimal change to resolve the issue +3. RE-RUN — Start from the failed phase (not from Phase 1, unless the fix changed code) + Exception: If the fix involved code changes, re-run from Phase 1 (build) +4. REPEAT — Until all phases pass or you've identified an issue that needs user input + +EXAMPLE: +Phase 4 fails: OrderServiceTests.CreateOrder_ReturnsCreated fails with 404 + → Fix: Route was "/orders" but test expected "/api/orders" + → Re-run from Phase 1 (code changed) → Build PASS → Phase 4 PASS + → Continue Phase 5, 6, 7 +``` + +### Pre-PR Verification + +Before creating any pull request, run the full 7-phase pipeline. This is non-negotiable. + +``` +PRE-PR CHECKLIST: +1. All 7 phases PASS (warnings acceptable only if pre-existing) +2. No new warnings introduced +3. All new code has corresponding tests +4. Diff review confirms changes match the PR description +5. No TODO comments left unresolved (or tracked in issues) + +Only after all 7 phases pass: +→ Create the PR with the verification report as part of the PR description +``` + +### Quick Verification + +For minor changes (typo fix, config change, documentation), run a subset: + +``` +QUICK VERIFICATION (3 phases): +Phase 1: dotnet build +Phase 4: dotnet test +Phase 6: dotnet format --verify-no-changes + +Use when: +- Single-line bug fix with existing test coverage +- Configuration change +- Documentation-only change +- Dependency version bump (also add Phase 5 for security) +``` + +### Post-Refactor Verification + +After structural refactoring, focus on correctness: + +``` +POST-REFACTOR VERIFICATION (4 phases): +Phase 1: dotnet build — refactors often break compilation +Phase 2: get_diagnostics — refactors often introduce new warnings +Phase 3: detect_antipatterns — refactors can accidentally introduce anti-patterns +Phase 4: dotnet test — the ultimate refactor validation + +Skip Phase 5-7 unless the refactor touches security-sensitive code or public APIs. +``` + +## Anti-patterns + +### Skipping Verification Entirely + +``` +# BAD — "it compiles in my head" +"I've made the changes. The code looks correct. Let me create the PR." +# No build run, no tests, no diagnostics — hope-driven development + +# GOOD — verify before declaring done +"Let me run the verification pipeline before we create the PR." +→ Phase 1: Build PASS → Phase 2: Diagnostics PASS → ... → Phase 7: PASS +"All 7 phases passed. Creating the PR now." +``` + +### Running All Phases When Build Fails + +``` +# BAD — wasting time on downstream checks +Phase 1: Build FAIL (3 errors) +Phase 2: Running diagnostics anyway... +Phase 3: Running anti-pattern scan... +Phase 4: Running tests... (they'll fail because build failed) + +# GOOD — short-circuit and fix +Phase 1: Build FAIL (3 errors) +→ STOP. Fix the 3 build errors first. +→ Re-run from Phase 1. +``` + +### Ignoring Warnings + +``` +# BAD — "warnings are just suggestions" +Phase 2: 12 new CS8600 warnings introduced +"These are just warnings, not errors. Moving on." +# Three weeks later: NullReferenceException in production + +# GOOD — treat new warnings as failures +Phase 2: 12 new CS8600 warnings introduced +"12 new nullability warnings detected. Fixing before proceeding." +→ Add null checks or non-nullable assertions with justification +→ Re-run Phase 2: 0 new warnings. PASS. +``` + +### Manual-Only Verification + +``` +# BAD — reading code instead of running tools +"Let me read through OrderService.cs... looks good to me." +# Missed: DateTime.Now on line 23, async void on line 67, CS8600 on line 91 + +# GOOD — tools first, then human judgment +→ detect_antipatterns: Found DateTime.Now (line 23), async void (line 67) +→ get_diagnostics: CS8600 on line 91 +"Found 3 issues via automated analysis. Fixing all three before manual review." +``` + +### Cherry-Picking Phases + +``` +# BAD — only running the phases you think are relevant +"It's just a service change, I'll skip the security scan." +# The service change added a raw SQL query with string concatenation + +# GOOD — full pipeline for non-trivial changes +When in doubt, run all 7 phases. The cost of running extra phases is minutes. +The cost of missing a security issue is days of incident response. +``` + +## Decision Guide + +| Scenario | Phases | Notes | +|----------|--------|-------| +| Feature complete | All 7 | Full pipeline, no shortcuts | +| Bug fix (with test) | 1, 2, 4 | Build, diagnostics, tests | +| Bug fix (no test) | 1, 2, 3, 4 | Add a test, then verify | +| Formatting only | 6 | Format check is sufficient | +| Dependency update | 1, 4, 5 | Build, tests, security | +| Pre-PR | All 7 | Non-negotiable full pipeline | +| After refactor | 1, 2, 3, 4 | Focus on correctness | +| Config change | 1, 4 | Build and test | +| New endpoint added | All 7 | Full pipeline including security | +| Test-only changes | 1, 4 | Build and run the new tests | +| Performance optimization | 1, 2, 3, 4 | Correctness first, benchmark separately | +| CI failure investigation | Run the failing phase locally | Reproduce, fix, verify | diff --git a/.opencode/skills/vertical-slice/SKILL.md b/.opencode/skills/vertical-slice/SKILL.md new file mode 100644 index 00000000..edf223e5 --- /dev/null +++ b/.opencode/skills/vertical-slice/SKILL.md @@ -0,0 +1,297 @@ +--- +name: vertical-slice +description: > + Vertical Slice Architecture (VSA) for .NET applications — one of several + supported architectures in dotnet-claude-kit. Covers feature folders, endpoint + grouping, and handler patterns for Mediator, Wolverine, and raw handler classes. + Load this skill when the architecture-advisor recommends VSA, when working in + an existing VSA codebase, when adding features to a feature-folder project, + or when discussing vertical slice patterns, feature folders, or handler patterns. +--- + +# Vertical Slice Architecture (VSA) + +## Core Principles + +1. **Organize by feature, not by layer** — Each feature is a self-contained vertical slice containing its endpoint, handler, request/response types, and validation. No more jumping between Controllers/, Services/, Repositories/ folders. +2. **Minimize cross-feature coupling** — Features should not reference each other directly. Shared concerns go in a `Common/` or `Shared/` directory. +3. **One file per feature is fine** — A simple CRUD endpoint doesn't need 5 files spread across layers. Start with everything in one file, extract only when complexity demands it. +4. **The handler is the unit of work** — Each handler does one thing. No god-services with 20 methods. + +## Patterns + +### Feature Folder Structure + +``` +src/ + MyApp.Api/ + Features/ + Orders/ + CreateOrder.cs # Request, Handler, Response, Endpoint — all in one file + GetOrder.cs + ListOrders.cs + CancelOrder.cs + Shared/ + OrderMapper.cs # Shared within the Orders feature only + Products/ + CreateProduct.cs + GetProduct.cs + Common/ + Behaviors/ + ValidationBehavior.cs # Cross-cutting Mediator pipeline behavior + Persistence/ + AppDbContext.cs + Extensions/ + ServiceCollectionExtensions.cs + Program.cs +``` + +### Pattern A: Mediator Handlers (Recommended Default) + +Source-generated mediator — MIT licensed, no reflection, Native AOT compatible. Uses `IRequest` / `IRequestHandler` with pipeline behaviors. Near-identical API to MediatR but faster and free. Package: `Mediator.Abstractions` + `Mediator.SourceGenerator`. + +```csharp +// Features/Orders/CreateOrder.cs + +public static class CreateOrder +{ + public record Command(string CustomerId, List Items) : IRequest>; + + public record OrderItemDto(string ProductId, int Quantity); + + public record OrderResponse(Guid Id, decimal Total, DateTime CreatedAt); + + public class Validator : AbstractValidator + { + public Validator() + { + RuleFor(x => x.CustomerId).NotEmpty(); + RuleFor(x => x.Items).NotEmpty(); + RuleForEach(x => x.Items).ChildRules(item => + { + item.RuleFor(x => x.ProductId).NotEmpty(); + item.RuleFor(x => x.Quantity).GreaterThan(0); + }); + } + } + + internal sealed class Handler(AppDbContext db, TimeProvider clock) : IRequestHandler> + { + public async ValueTask> Handle(Command request, CancellationToken ct) + { + var order = Order.Create(request.CustomerId, request.Items, clock.GetUtcNow()); + db.Orders.Add(order); + await db.SaveChangesAsync(ct); + + return Result.Success(new OrderResponse(order.Id, order.Total, order.CreatedAt)); + } + } +} + +// Registration in Program.cs or module DI +builder.Services.AddMediator(); + +// Features/Orders/OrderEndpoints.cs — auto-discovered via IEndpointGroup +public sealed class OrderEndpoints : IEndpointGroup +{ + public void Map(IEndpointRouteBuilder app) + { + var group = app.MapGroup("/api/orders").WithTags("Orders"); + + group.MapPost("/", async (CreateOrder.Command command, ISender sender, CancellationToken ct) => + { + var result = await sender.Send(command, ct); + return result.IsSuccess + ? TypedResults.Created($"/api/orders/{result.Value.Id}", result.Value) + : result.ToProblemDetails(); + }) + .WithName("CreateOrder").Produces(201) + .ProducesValidationProblem() + .AddEndpointFilter>(); + } +} +``` + +### Pattern B: Wolverine Handlers + +Convention-based — no interfaces to implement. Wolverine discovers handlers by method signature. + +```csharp +// Features/Orders/CreateOrder.cs + +public static class CreateOrder +{ + public record Command(string CustomerId, List Items); + + public record OrderItemDto(string ProductId, int Quantity); + + public record OrderResponse(Guid Id, decimal Total, DateTime CreatedAt); + + // Wolverine discovers this by convention (static Handle method) + public static async Task> Handle( + Command command, + AppDbContext db, + TimeProvider clock, + CancellationToken ct) + { + var order = Order.Create(command.CustomerId, command.Items, clock.GetUtcNow()); + db.Orders.Add(order); + await db.SaveChangesAsync(ct); + return Result.Success(new OrderResponse(order.Id, order.Total, order.CreatedAt)); + } +} +``` + +### Pattern C: Raw Handler Classes (No Library) + +Direct handler classes with no external dependency. Good for small projects or teams that want full control. + +```csharp +// Features/Orders/CreateOrder.cs + +public static class CreateOrder +{ + public record Command(string CustomerId, List Items); + + public record OrderItemDto(string ProductId, int Quantity); + + public record OrderResponse(Guid Id, decimal Total, DateTime CreatedAt); + + internal class Handler(AppDbContext db, TimeProvider clock) + { + public async Task> ExecuteAsync(Command command, CancellationToken ct) + { + var order = Order.Create(command.CustomerId, command.Items, clock.GetUtcNow()); + db.Orders.Add(order); + await db.SaveChangesAsync(ct); + + return Result.Success(new OrderResponse(order.Id, order.Total, order.CreatedAt)); + } + } +} + +// Endpoint wiring — Result maps to HTTP response +group.MapPost("/", async (CreateOrder.Command command, CreateOrder.Handler handler, CancellationToken ct) => +{ + var result = await handler.ExecuteAsync(command, ct); + return result.IsSuccess + ? TypedResults.Created($"/api/orders/{result.Value.Id}", result.Value) + : result.ToProblemDetails(); +}); +``` + +### Adding Module Boundaries (Optional) + +For larger applications that grow beyond a single project, introduce module boundaries. Each module is a separate class library with its own features and DbContext. + +``` +src/ + MyApp.Api/ # Host — wires modules together + Program.cs + Modules/ + ModuleExtensions.cs # app.MapOrderModule(), app.MapCatalogModule() + MyApp.Orders/ # Module — own features, own DbContext + Features/ + CreateOrder.cs + Persistence/ + OrdersDbContext.cs + OrdersModule.cs # IServiceCollection + IEndpointRouteBuilder extensions + MyApp.Catalog/ # Module + Features/ + CreateProduct.cs + Persistence/ + CatalogDbContext.cs + CatalogModule.cs +``` + +Modules communicate via: +- **Integration events** (preferred) — async, decoupled via Wolverine or MassTransit +- **Shared contracts** — a `MyApp.Contracts` project with DTOs/interfaces (use sparingly) + +### Shared Concerns + +Cross-cutting concerns live outside feature folders: + +```csharp +// Common/Behaviors/ValidationBehavior.cs (Mediator pipeline) +public sealed class ValidationBehavior(IEnumerable> validators) + : IPipelineBehavior + where TRequest : IMessage +{ + public async ValueTask Handle( + TRequest request, + MessageHandlerDelegate next, + CancellationToken ct) + { + var context = new ValidationContext(request); + var failures = validators + .Select(v => v.Validate(context)) + .SelectMany(r => r.Errors) + .Where(f => f is not null) + .ToList(); + + if (failures.Count > 0) + throw new ValidationException(failures); + + return await next(request, ct); + } +} +``` + +## Anti-patterns + +### Don't Create Layered Abstractions Within a Slice + +```csharp +// BAD — a feature folder with its own service layer and repository +Features/ + Orders/ + CreateOrder.cs + IOrderService.cs # unnecessary abstraction + OrderService.cs # unnecessary abstraction + IOrderRepository.cs # unnecessary abstraction + OrderRepository.cs # unnecessary abstraction + +// GOOD — handler talks directly to DbContext +Features/ + Orders/ + CreateOrder.cs # handler uses AppDbContext directly +``` + +### Don't Cross-reference Features Directly + +```csharp +// BAD — CreateOrder directly calls GetProduct handler +var product = await _getProductHandler.Handle(new GetProduct.Query(productId)); + +// GOOD — query the database directly or use a shared read model +var product = await db.Products.FindAsync(productId, ct); +``` + +### Don't Put Everything in One God Feature File + +```csharp +// BAD — 500-line file with CRUD + business logic + mapping +public static class Orders +{ + // Create, Read, Update, Delete, Cancel, Refund, Export... +} + +// GOOD — one file per operation +Features/Orders/CreateOrder.cs +Features/Orders/GetOrder.cs +Features/Orders/CancelOrder.cs +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| New project (default) | Pattern A — Mediator (source-generated, MIT, fast) | +| Need mediator + messaging in one lib | Pattern B — Wolverine (also handles events/queues) | +| Want full control, no dependencies | Pattern C — Raw handler classes | +| Existing MediatR codebase with license | Keep MediatR if licensed; otherwise migrate to Mediator (near-identical API) | +| Monolith growing complex | Add module boundaries, keep VSA within each module | +| Simple CRUD feature | Single file: request + handler + endpoint | +| Complex feature (saga, events) | Multiple files in feature folder, still colocated | +| Sharing logic between features | Extract to `Common/` — not to another feature | diff --git a/.opencode/skills/workflow-mastery/SKILL.md b/.opencode/skills/workflow-mastery/SKILL.md new file mode 100644 index 00000000..53d7d6da --- /dev/null +++ b/.opencode/skills/workflow-mastery/SKILL.md @@ -0,0 +1,257 @@ +--- +name: workflow-mastery +description: > + Claude Code workflow mastery for .NET developers. Covers parallel execution + with git worktrees, plan mode strategy, verification loops, auto-formatting + hooks, permission setup for dotnet CLI, prompting techniques, and subagent + patterns — all adapted for the .NET ecosystem. + Load this skill when setting up Claude Code for a .NET project, optimizing + workflows, running parallel sessions, or when the user mentions "productivity", + "workflow", "parallel", "worktree", "plan mode", "permissions", "hooks", + "10x", "setup Claude Code", or "speed up development". + Inspired by tips from Boris Cherny (creator of Claude Code) and the Anthropic team. +--- + +# Workflow Mastery for .NET + +## Core Principles + +1. **Parallel over sequential** — Run 3-5 Claude sessions simultaneously using git worktrees. Build a feature in one, fix a bug in another, run tests in a third. The single biggest productivity unlock. +2. **Plan then execute** — For any non-trivial task, start in plan mode, iterate until the plan is bulletproof, then switch to auto-accept. A good plan means Claude 1-shots the implementation. +3. **Verification closes the loop** — Give Claude a way to prove its work: `dotnet build`, `dotnet test`, `get_diagnostics` via MCP. This single practice 2-3x the quality of the output. +4. **Automate the repetitive** — If you do it more than once a day, make it a hook, a slash command, or a subagent. Pre-allow safe permissions. Eliminate friction. +5. **Compound your knowledge** — Every correction becomes a rule in `MEMORY.md` (see `self-correction-loop` skill). Every PR review adds a learning. Over time, Claude's mistake rate drops because your project's knowledge base grows. + +## Patterns + +### Parallel Sessions with Git Worktrees + +The biggest productivity multiplier. Each worktree gets its own Claude session, its own files, zero conflicts. + +```bash +# Create worktrees for parallel work +git worktree add ../my-project-feature origin/main +git worktree add ../my-project-bugfix origin/main +git worktree add ../my-project-tests origin/main + +# Start Claude in each (separate terminal tabs) +cd ../my-project-feature && claude +cd ../my-project-bugfix && claude +cd ../my-project-tests && claude +``` + +**Practical .NET workflow:** + +| Worktree | Task | Claude Session | +|----------|------|---------------| +| `feature` | Build new endpoint + handler | Main development | +| `bugfix` | Fix the failing CI test | Autonomous bug fix | +| `tests` | Write integration tests for existing feature | Test generation | +| `analysis` | Query the Roslyn MCP, read logs, review architecture | Read-only research | + +**Tips:** +- Name your terminal tabs by task so you never lose track +- Use shell aliases (`alias zf='cd ../my-project-feature'`) for one-keystroke switching +- Enable terminal notifications so you know when a session needs input + +### Auto-Format Hook for .NET + +Catch formatting issues on every file write — eliminates the "CI failed on formatting" loop. + +```json +// .claude/settings.json +{ + "hooks": { + "PostToolUse": [ + { + "matcher": "Write|Edit", + "hooks": [ + { + "type": "command", + "command": "dotnet format --include \"$CLAUDE_FILE_PATH\" --no-restore 2>/dev/null || true" + } + ] + } + ] + } +} +``` + +Why `|| true`: The hook should never block Claude's workflow. If formatting fails (e.g., on a non-C# file), silently continue. + +### Pre-Allow Safe .NET Permissions + +Stop clicking "allow" for every `dotnet` command. Add these to `.claude/settings.json`: + +```json +{ + "permissions": { + "allow": [ + "Bash(dotnet build *)", + "Bash(dotnet test *)", + "Bash(dotnet run *)", + "Bash(dotnet ef *)", + "Bash(dotnet format *)", + "Bash(dotnet restore *)", + "Bash(dotnet pack *)", + "Bash(dotnet tool *)" + ] + } +} +``` + +Check this into git so the whole team gets frictionless workflows. + +### Plan Mode Strategy + +For any task touching 3+ files or involving architecture decisions: + +``` +Step 1: Enter plan mode (Shift+Tab twice) +Step 2: Describe the task with full context +Step 3: Iterate on the plan — challenge assumptions, ask "what about edge cases?" +Step 4: Once the plan is solid, switch to normal mode +Step 5: Claude executes with auto-accept — often 1-shots the implementation +``` + +**Advanced pattern:** Have one Claude write the plan, then spin up a second Claude session to review it as a staff engineer: + +``` +"Review this plan as a staff .NET engineer. Challenge every assumption. +What could go wrong? What's missing? What would you do differently?" +``` + +**When things go sideways:** The moment implementation deviates from the plan, STOP. Don't push through. Switch back to plan mode, understand what changed, re-plan, then resume. + +### Verification Loop for .NET + +> For the full 7-phase verification pipeline (build, diagnostics, anti-patterns, tests, security, format, diff review) with structured PASS/FAIL reporting, see the **verification-loop** skill. + +Boris's #1 tip: "Give Claude a way to verify its work." The short version: always tell Claude to run `dotnet build`, `dotnet test`, `get_diagnostics`, and `dotnet format --verify-no-changes` before declaring done. The verification-loop skill has the complete pipeline with short-circuit rules and report templates. + +### Compounding Knowledge via Corrections + +For the full correction capture system — detection, generalization, categorized storage, and periodic audits — see the **`self-correction-loop`** skill. The short version: after every correction, capture a generalized rule in `MEMORY.md` so the same mistake never recurs. + +### Prompting Techniques for .NET + +**Challenge Claude's work:** +``` +"Grill me on these changes. Would this pass a staff .NET engineer's code review? +Check for: N+1 queries, missing CancellationToken, exposed domain entities, +missing validation, incorrect service lifetimes." +``` + +**Demand proof:** +``` +"Prove this works. Run the tests, show me the output. +Then diff the API response between main and this branch." +``` + +**After a mediocre fix:** +``` +"Knowing everything you know now, scrap this and implement the elegant solution. +No hacks, no workarounds." +``` + +**For EF Core migrations:** +``` +"Generate the migration, then show me the raw SQL it produces. +I want to verify the migration before applying it." +``` + +### Subagent Patterns for .NET + +> **Context-aware delegation:** For guidance on when to use subagents to manage token budgets effectively, see the **`context-discipline`** skill. + +Create reusable subagents in `.claude/agents/`: + +```markdown + +You are a .NET API verification agent. Your job: +1. Run `dotnet build` and fix any compilation errors +2. Run `dotnet test` and fix any test failures +3. Use `get_diagnostics` to check for warnings +4. Verify all endpoints return proper TypedResults +5. Check that no domain entities leak into API responses +Report: PASS with summary, or FAIL with specific issues. +``` + +```markdown + +You are a code simplification agent for .NET projects. +Review the recent changes and simplify: +- Replace verbose LINQ with simpler alternatives +- Use primary constructors where applicable +- Replace manual null checks with pattern matching +- Consolidate duplicated code +- Remove unnecessary using statements +Do not change behavior. Only simplify. +``` + +**Use them:** +``` +"Run the verify-api agent on my changes before I create the PR." +"Run code-simplifier on the files I just modified." +``` + +## Anti-patterns + +### Don't Skip Plan Mode for Complex Tasks + +``` +// BAD — dive straight into a multi-file refactor +"Refactor the Orders module to use DDD with aggregates and value objects" +*Claude modifies 15 files, misses half the invariants, tangles the migration* + +// GOOD — plan first, execute after +"Enter plan mode. I want to refactor the Orders module to use DDD. +Let's plan which files change, what the aggregate boundary is, +how value objects map to EF Core, and what the migration strategy is." +``` + +### Don't Work in a Single Session When You Could Parallelize + +``` +// BAD — sequential work in one session +1. Build feature (20 min) +2. Write tests (15 min) +3. Fix formatting (5 min) +4. Update docs (10 min) +Total: 50 minutes + +// GOOD — parallel worktrees +Worktree 1: Build feature (20 min) +Worktree 2: Write tests (15 min, started simultaneously) +Worktree 3: Update docs (10 min, started simultaneously) +Total: ~20 minutes (wall clock) +``` + +### Don't Accept the First Solution + +``` +// BAD — accept mediocre code +Claude: "Here's the implementation" *generic, works but not great* +You: "Looks good, ship it" + +// GOOD — push for quality +Claude: "Here's the implementation" +You: "Would a staff .NET engineer approve this? + What about the service lifetime? Is this N+1 safe? + Is there a more elegant way using C# 14 features?" +``` + +## Decision Guide + +| Scenario | Recommendation | +|----------|---------------| +| Task touches 3+ files | Plan mode first | +| Task is a simple bug fix | Just fix it, verify with `dotnet test` | +| Need to build + test + review | 3 parallel worktrees | +| CI keeps failing on format | Add PostToolUse format hook | +| Tired of permission prompts | Pre-allow `dotnet *` commands | +| Claude made a mistake | "Update CLAUDE.md so you don't make that mistake again" | +| Code feels hacky | "Knowing everything you know now, implement the elegant solution" | +| Want to verify architecture | Spin up a second session as staff reviewer | +| Repetitive PR workflow | Create a subagent (verify-api, code-simplifier) | +| Learning a new codebase | Use "Explanatory" output style via `/config` | diff --git a/.opencode/skills/wrap-up-ritual/SKILL.md b/.opencode/skills/wrap-up-ritual/SKILL.md new file mode 100644 index 00000000..062ff9b6 --- /dev/null +++ b/.opencode/skills/wrap-up-ritual/SKILL.md @@ -0,0 +1,203 @@ +--- +name: wrap-up-ritual +description: > + Structured session ending ritual that captures completed work, pending tasks, + and learnings before a session ends. Writes a handoff note to .claude/handoff.md + so the next session (or a different developer) can pick up exactly where this + session left off. Load this skill when the user signals they're wrapping up, + says "let's stop here", "that's all for now", "end of session", "wrap up", + "save progress", "handoff", or "I'm done for today". +--- + +# Wrap-Up Ritual + +## Core Principles + +1. **Sessions are ephemeral, knowledge is permanent** — When a session ends, context is lost. But learnings, decisions, and progress don't have to be. The wrap-up ritual bridges the gap between sessions by writing a handoff note. + +2. **Three captures every time** — Every session ending captures exactly three things: what was DONE, what is PENDING, and what was LEARNED. No exceptions. Skipping any of these creates gaps for the next session. + +3. **Handoff notes are written for a stranger** — Write the handoff as if the next person has zero context. Include file paths, decision rationale, and specific next steps. "Continue the refactor" is useless. "Refactor `src/Orders/CreateOrder.cs` to use the Result pattern — see the Catalog module for the established pattern" is actionable. + +4. **Consistent location, always overwritten** — The handoff file lives at `.claude/handoff.md`. Each session overwrites the previous one — there's only ever one active handoff. Old handoffs are not valuable; current state is. + +5. **Learnings flow to permanent memory** — The "learned" section of a wrap-up is a trigger for the `self-correction-loop` skill. Any correction or discovery worth remembering should be captured in `MEMORY.md` as a permanent rule, not just in the ephemeral handoff. + +## Patterns + +### Session Summary Template + +The handoff file follows a consistent structure: + +```markdown +# Session Handoff + +> Generated: 2025-07-15 | Branch: feature/order-validation + +## Completed +- [x] Added FluentValidation to CreateOrder command + - File: `src/Orders/Features/CreateOrder.cs` (lines 15-35) + - Validator checks: non-empty CustomerId, at least 1 item, positive quantities +- [x] Added integration test for validation + - File: `tests/Orders.Tests/Features/CreateOrderTests.cs` + - Tests: InvalidCustomerId_Returns400, EmptyItems_Returns400 +- [x] Fixed N+1 query in GetOrderDetails + - File: `src/Orders/Features/GetOrderDetails.cs` (line 28) + - Added `.Include(o => o.Items)` to the query + +## Pending +- [ ] Add validation to UpdateOrder command (same pattern as CreateOrder) + - Start from: `src/Orders/Features/UpdateOrder.cs` + - Reference: CreateOrder validator for the established pattern +- [ ] Run full test suite — last run had 2 unrelated failures in Catalog module + - Failures: `CatalogTests.GetProduct_NotFound` and `CatalogTests.ListProducts_Pagination` + - These appear pre-existing, not caused by today's changes + +## Learned +- FluentValidation validators must be registered in the module's DI setup + (added to `OrdersModule.cs` line 12) — easy to forget +- The N+1 in GetOrderDetails was not caught by existing tests because the test + fixture seeds only 1 item per order. Consider adding multi-item test data. + +## Context +- Working in the `feature/order-validation` branch +- All changes committed up to "Add CreateOrder validation + tests" +- No uncommitted changes +``` + +### Trigger Detection + +Recognize when the user is ending a session: + +``` +EXPLICIT SIGNALS: +- "Let's wrap up" +- "That's all for today" +- "I'm done" +- "Save progress" +- "Let's stop here" +- "End of session" +- "Handoff" +- "Pick this up tomorrow" + +IMPLICIT SIGNALS: +- User says "thanks" after a series of completed tasks +- User says "good enough for now" +- Long pause after completing a task followed by no new request + +RESPONSE: +When you detect a wrap-up signal, offer to write the handoff: +"Let me write a handoff note to .claude/handoff.md so the next session +can pick up exactly where we left off." +``` + +### Learning Extraction at Session End + +Before writing the handoff, review the session for learnings: + +``` +LEARNING EXTRACTION CHECKLIST: +1. Did the user correct anything? → Capture in MEMORY.md (via self-correction-loop) +2. Did we discover something non-obvious? → Add to "Learned" section +3. Did we make a decision with rationale? → Document the "why" in handoff +4. Did a tool/approach not work? → Note it so the next session doesn't retry +5. Did we find a pattern worth reusing? → Note for potential skill/memory addition + +EXAMPLES OF GOOD LEARNINGS: +- "EF Core compiled queries don't work with Include() — use regular queries" +- "The payment gateway sandbox returns 500 on amounts > $10,000" +- "FluentValidation async validators need .MustAsync(), not .Must() with await" + +EXAMPLES OF BAD LEARNINGS: +- "We worked on the Orders module" (too vague) +- "Things went well" (not actionable) +- "Used TimeProvider" (already a known rule, not a learning) +``` + +## Anti-patterns + +### Abrupt Endings + +``` +// BAD — session ends with no capture +User: "Thanks, that's all!" +Claude: "You're welcome! Happy to help." +*Next session: "What were we working on? Let me read through the git log..."* + +// GOOD — session ends with a handoff +User: "Thanks, that's all!" +Claude: "Before we wrap up, let me write a handoff note to .claude/handoff.md + so the next session can pick up where we left off." +*Next session: reads handoff, immediately knows the state* +``` + +### Vague Handoffs + +``` +// BAD — handoff that doesn't help +## Completed +- Worked on Orders module + +## Pending +- More work on Orders + +// GOOD — specific, actionable handoff +## Completed +- [x] Added CreateOrder validation (src/Orders/Features/CreateOrder.cs:15-35) +- [x] Integration test for validation (tests/Orders.Tests/Features/CreateOrderTests.cs) + +## Pending +- [ ] Add same validation pattern to UpdateOrder (src/Orders/Features/UpdateOrder.cs) + - Reference CreateOrder validator for the pattern +``` + +### Accumulating Handoff Files + +``` +// BAD — creating timestamped handoff files +.claude/ +├── handoff-2025-07-13.md +├── handoff-2025-07-14.md +├── handoff-2025-07-15.md +*3 files, unclear which is current, stale data accumulating* + +// GOOD — single file, always overwritten +.claude/ +├── handoff.md ← always the current state +*Permanent learnings go to MEMORY.md, not handoff files* +``` + +### Skipping the Learning Extraction + +``` +// BAD — handoff without learnings +## Completed +- Fixed N+1 query +## Pending +- Nothing + +*The discovery that seeded test data only has 1 item per order (hiding N+1s) + is lost forever* + +// GOOD — extract and preserve the insight +## Learned +- Test fixture seeds only 1 item per order, which hides N+1 queries. + Consider adding multi-item test data to the default fixture. +→ Also added to MEMORY.md > Testing: "Seed test data with multiple + child entities to catch N+1 queries" +``` + +## Decision Guide + +| Scenario | Action | +|----------|--------| +| User says "wrap up" / "that's all" / "done" | Write handoff to `.claude/handoff.md` | +| Session completed multiple tasks | List each with file paths and line numbers | +| Session had user corrections | Extract to MEMORY.md AND note in handoff Learned section | +| Next session is likely a different person | Include Decisions Made table with rationale | +| Session had no pending work | Still write a handoff — document what was completed and learned | +| Previous handoff exists | Overwrite it — only the current state matters | +| Work was on a feature branch | Include branch name and last commit message in handoff | +| Session ended with failing tests | Document which tests fail and suspected cause in Pending | +| User doesn't want a handoff | Respect it — but suggest capturing learnings in MEMORY.md at minimum | +| Session was purely exploratory (no code changes) | Write a lighter handoff with findings and recommendations | diff --git a/Core/Resgrid.Model/Autofill.cs b/Core/Resgrid.Model/Autofill.cs index d3fa5805..a63d62dd 100644 --- a/Core/Resgrid.Model/Autofill.cs +++ b/Core/Resgrid.Model/Autofill.cs @@ -19,6 +19,7 @@ public class Autofill : IEntity public string AddedByUserId { get; set; } + [MaxLength(256)] public string Name { get; set; } public string Data { get; set; } diff --git a/Core/Resgrid.Model/Repositories/IShiftsRepository.cs b/Core/Resgrid.Model/Repositories/IShiftsRepository.cs index 90c9cc42..d66460ee 100644 --- a/Core/Resgrid.Model/Repositories/IShiftsRepository.cs +++ b/Core/Resgrid.Model/Repositories/IShiftsRepository.cs @@ -1,4 +1,5 @@ -using System.Collections.Generic; +using System; +using System.Collections.Generic; using System.Threading.Tasks; namespace Resgrid.Model.Repositories @@ -23,6 +24,14 @@ public interface IShiftsRepository: IRepository /// Task<IEnumerable<Shift>>. Task> GetAllShiftAndDaysAsync(); + /// + /// Gets upcoming shifts (with days in the next 2 days from the reference time) and their associated data asynchronous. + /// Filters at the database level to avoid loading the entire Shifts table. + /// + /// The reference time from which to look for upcoming shifts (e.g. caller-computed currentTime). + /// Task<IEnumerable<Shift>>. + Task> GetUpcomingShiftAndDaysAsync(DateTime referenceTime); + /// /// Gets the shift and days by department identifier asynchronous. /// diff --git a/Core/Resgrid.Services/ShiftsService.cs b/Core/Resgrid.Services/ShiftsService.cs index ad9a5efc..2b8d0c24 100644 --- a/Core/Resgrid.Services/ShiftsService.cs +++ b/Core/Resgrid.Services/ShiftsService.cs @@ -273,7 +273,7 @@ public async Task> GetShiftsStartingNextDayAsync(DateTime currentTim { var upcomingShifts = new List(); - var shifts = await _shiftsRepository.GetAllShiftAndDaysAsync(); + var shifts = await _shiftsRepository.GetUpcomingShiftAndDaysAsync(currentTime); if (shifts != null && shifts.Any()) { diff --git a/Providers/Resgrid.Providers.Weather/NwsWeatherAlertProvider.cs b/Providers/Resgrid.Providers.Weather/NwsWeatherAlertProvider.cs index b4a24bf9..458c293d 100644 --- a/Providers/Resgrid.Providers.Weather/NwsWeatherAlertProvider.cs +++ b/Providers/Resgrid.Providers.Weather/NwsWeatherAlertProvider.cs @@ -5,6 +5,7 @@ using System.Net.Http; using System.Net.Http.Headers; using System.Text.Json; +using System.Text.RegularExpressions; using System.Threading; using System.Threading.Tasks; using Resgrid.Model; @@ -17,6 +18,14 @@ public class NwsWeatherAlertProvider : IWeatherAlertProvider private static readonly HttpClient _httpClient = new HttpClient(); private const string DefaultBaseUrl = "https://api.weather.gov/alerts/active"; + /// + /// Regex matching valid NWS zone/county codes (e.g. MIC081, MIZ037, TXZ123). + /// State abbreviation + C (county) or Z (zone) + 3 digits. + /// + private static readonly Regex NwsZoneCodeRegex = new Regex( + @"^(A[KLMNRSZ]|C[AOT]|D[CE]|F[LM]|G[AMU]|I[ADLN]|K[SY]|L[ACEHMOS]|M[ADEHINOPST]|N[CDEHJMVY]|O[HKR]|P[AHKMRSWZ]|S[CDL]|T[NX]|UT|V[AIT]|W[AIVY]|[HR]I)[CZ]\d{3}$", + RegexOptions.Compiled | RegexOptions.IgnoreCase); + static NwsWeatherAlertProvider() { _httpClient.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/geo+json")); @@ -45,6 +54,14 @@ public async Task> FetchAlertsAsync(WeatherAlertSource source // the NWS API requires separate query parameters for each type. var stateCodes = zones.Where(z => z.Length == 2).ToArray(); var zoneCodes = zones.Where(z => z.Length > 2).ToArray(); + + // Validate zone codes before calling the NWS API to produce a clear error + // instead of a cryptic 400 Bad Request from the upstream API. + var validationError = GetZoneValidationError(source.AreaFilter); + if (validationError != null) + throw new HttpRequestException( + $"Invalid NWS zone code in area filter for department {source.DepartmentId}: {validationError}"); + var queryParams = new List(); if (stateCodes.Length > 0) queryParams.Add($"area={string.Join(",", stateCodes)}"); @@ -345,5 +362,67 @@ private static string[] ParseAreaFilter(string areaFilter) // Fall back to comma-separated string return trimmed.Split(',', StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries); } + + /// + /// Returns true if the code is a valid NWS zone/county code (e.g. MIC081, MIZ037, TXZ123). + /// State abbreviation + C (county) or Z (zone) + 3 digits. + /// + public static bool IsValidNwsZoneCode(string code) + { + if (string.IsNullOrWhiteSpace(code)) + return false; + + return NwsZoneCodeRegex.IsMatch(code.Trim()); + } + + /// + /// Validates that all non-state zone codes in the area filter are valid NWS zone codes. + /// Returns an error message describing the first invalid code, or null if all are valid. + /// 2-letter codes are assumed to be state abbreviations and are not validated as zones. + /// + public static string GetZoneValidationError(string areaFilter) + { + var zones = ParseAreaFilter(areaFilter); + foreach (var code in zones) + { + // 2-letter codes are state abbreviations (e.g. TX, MI) — skip + if (code.Length <= 2) + continue; + + if (!IsValidNwsZoneCode(code)) + { + var hint = GetCodeFormatHint(code); + return $"'{code}' is not a valid NWS zone code. " + + $"Expected format: state abbreviation + C or Z + 3 digits (e.g. MIC081, TXZ123). {hint}"; + } + } + + return null; + } + + /// + /// Attempts to identify what kind of code the user entered and provides a helpful hint. + /// + private static string GetCodeFormatHint(string code) + { + if (string.IsNullOrWhiteSpace(code)) + return ""; + + var upper = code.Trim().ToUpperInvariant(); + + // ICAO airport code (4 chars, starts with K for continental US) + if (upper.Length == 4 && upper.StartsWith("K") && upper[1] >= 'A' && upper[1] <= 'Z') + return $"'{code}' looks like an ICAO airport code. Use an NWS zone code instead (e.g. MIZ037 for Grand Rapids, MI area)."; + + // US ZIP code (5 digits) + if (upper.Length == 5 && int.TryParse(upper, out _)) + return $"'{code}' looks like a ZIP code. NWS uses zone codes, not ZIP codes. Find your zone at https://www.weather.gov/gis/."; + + // FIPS county code (state FIPS + county FIPS, typically 5 digits) + if (upper.Length == 5 && int.TryParse(upper, out _)) + return $"'{code}' looks like a FIPS code. NWS zone codes use the format MIC081 (state + C/Z + 3 digits)."; + + return "Find your NWS zone code at https://www.weather.gov/gis/"; + } } } diff --git a/Repositories/Resgrid.Repositories.DataRepository/Configs/SqlConfiguration.cs b/Repositories/Resgrid.Repositories.DataRepository/Configs/SqlConfiguration.cs index 59731b79..36bde069 100644 --- a/Repositories/Resgrid.Repositories.DataRepository/Configs/SqlConfiguration.cs +++ b/Repositories/Resgrid.Repositories.DataRepository/Configs/SqlConfiguration.cs @@ -279,6 +279,7 @@ protected SqlConfiguration() { } public string SelectShiftAndDaysByShiftIdQuery { get; set; } public string SelectShiftAndDaysQuery { get; set; } public string SelectShiftAndDaysJSONQuery { get; set; } + public string SelectUpcomingShiftAndDaysJSONQuery { get; set; } public string SelectShiftSignupByUserIdQuery { get; set; } public string SelectShiftSignupTradeByUserIdQuery { get; set; } public string SelectOpenShiftSignupTradesByUserIdQuery { get; set; } diff --git a/Repositories/Resgrid.Repositories.DataRepository/Queries/Shifts/SelectUpcomingShiftAndDaysQuery.cs b/Repositories/Resgrid.Repositories.DataRepository/Queries/Shifts/SelectUpcomingShiftAndDaysQuery.cs new file mode 100644 index 00000000..a02afb39 --- /dev/null +++ b/Repositories/Resgrid.Repositories.DataRepository/Queries/Shifts/SelectUpcomingShiftAndDaysQuery.cs @@ -0,0 +1,48 @@ +using Resgrid.Model; +using Resgrid.Model.Repositories.Queries.Contracts; +using Resgrid.Repositories.DataRepository.Configs; +using Resgrid.Repositories.DataRepository.Extensions; + +namespace Resgrid.Repositories.DataRepository.Queries.Shifts +{ + public class SelectUpcomingShiftAndDaysQuery : ISelectQuery + { + private readonly SqlConfiguration _sqlConfiguration; + public SelectUpcomingShiftAndDaysQuery(SqlConfiguration sqlConfiguration) + { + _sqlConfiguration = sqlConfiguration; + } + + public string GetQuery() + { + var query = _sqlConfiguration.SelectUpcomingShiftAndDaysJSONQuery + .ReplaceQueryParameters(_sqlConfiguration, _sqlConfiguration.SchemaName, + string.Empty, + _sqlConfiguration.ParameterNotation, + new string[] { + "%STARTDATE%", + "%ENDDATE%" + }, + new string[] { + "StartDate", + "EndDate" + }, + new string[] { + "%SHIFTSTABLE%", + "%SHIFTDAYSTABLE%" + }, + new string[] { + _sqlConfiguration.ShiftsTable, + _sqlConfiguration.ShiftDaysTable + } + ); + + return query; + } + + public string GetQuery() where TEntity : class, IEntity + { + throw new System.NotImplementedException(); + } + } +} diff --git a/Repositories/Resgrid.Repositories.DataRepository/Servers/PostgreSql/PostgreSqlConfiguration.cs b/Repositories/Resgrid.Repositories.DataRepository/Servers/PostgreSql/PostgreSqlConfiguration.cs index edcf6025..e2ba300b 100644 --- a/Repositories/Resgrid.Repositories.DataRepository/Servers/PostgreSql/PostgreSqlConfiguration.cs +++ b/Repositories/Resgrid.Repositories.DataRepository/Servers/PostgreSql/PostgreSqlConfiguration.cs @@ -57,7 +57,7 @@ public PostgreSqlConfiguration() SELECT %SCHEMA%.%ACTIONLOGSTABLE%.*, %SCHEMA%.%ASPNETUSERSTABLE%.* FROM %SCHEMA%.%ACTIONLOGSTABLE% INNER JOIN %SCHEMA%.%ASPNETUSERSTABLE% ON %SCHEMA%.%ASPNETUSERSTABLE%.Id = %SCHEMA%.%ACTIONLOGSTABLE%.UserId - UserId = %ID%"; + WHERE UserId = %USERID%"; SelectALogsByUserInDateRangQuery = @" SELECT %SCHEMA%.%ACTIONLOGSTABLE%.*, %SCHEMA%.%ASPNETUSERSTABLE%.* FROM %SCHEMA%.%ACTIONLOGSTABLE% @@ -833,8 +833,88 @@ from shiftsignups ss where ss.shiftid = sh.shiftid ) - from shifts sh - ) j"; + from shifts sh + ) j"; + // Filtered variant: only shifts with at least one ShiftDay in the next 2 days. + // Used by GetUpcomingShiftAndDaysAsync to avoid loading the entire Shifts table + // for the GetShiftsStartingNextDayAsync worker, which only needs near-term shifts. + SelectUpcomingShiftAndDaysJSONQuery = @" + select json_agg(row_to_json(j)) as JsonResult + from ( + select *, ( + select row_to_json(d) from departments d where d.departmentid = sh.departmentid + ) department, ( + SELECT + jsonb_agg( + jsonb_build_object( + 'ShiftGroupId', sg.shiftgroupid, + 'ShiftId', sg.shiftid, + 'DepartmentGroupId', sg.departmentgroupid, + 'shift', (SELECT row_to_json(s1) from shifts s1 where s1.shiftid = sg.shiftid), + 'departmentgroup', (SELECT row_to_json(dg1) from departmentgroups dg1 WHERE dg1.departmentgroupid = sg.departmentgroupid), + 'roles', (SELECT json_agg(row_to_json(sgr1)) from shiftgrouproles sgr1 WHERE sgr1.shiftgroupid = sg.shiftgroupid) + ) + ) + from shiftgroups sg + where sg.shiftid = sh.shiftid + ) groups, ( + SELECT + jsonb_agg( + jsonb_build_object( + 'ShiftDayId', sd.shiftdayid, + 'ShiftId', sd.shiftid, + 'Day', sd.day, + 'shift', (SELECT row_to_json(s2) from shifts s2 where s2.shiftid = sd.shiftid) + ) + ) + from shiftdays sd + WHERE sd.shiftid = sh.shiftid + ) days, ( + SELECT + jsonb_agg( + jsonb_build_object( + 'ShiftPersonId', sp.shiftpersonid, + 'ShiftId', sp.shiftid, + 'UserId', sp.userid, + 'shift', (SELECT row_to_json(s3) from shifts s3 where s3.shiftid = sp.shiftid) + ) + ) + FROM shiftpersons sp + WHERE sp.shiftid = sh.shiftid + ) personnel, ( + SELECT + jsonb_agg( + jsonb_build_object( + 'ShiftAdminId', sa.shiftadminid, + 'ShiftId', sa.shiftid, + 'UserId', sa.userid, + 'shift', (SELECT row_to_json(s4) from shifts s4 where s4.shiftid = sa.shiftid) + ) + ) + FROM shiftadmins sa + WHERE sa.shiftid = sh.shiftid + ) admins, ( + SELECT + jsonb_agg( + jsonb_build_object( + 'ShiftSignupId', ss.shiftsignupid, + 'ShiftId', ss.shiftid, + 'DepartmentGroupId', ss.departmentgroupid, + 'UserId', ss.userid, + 'SignupTimestamp', ss.signuptimestamp, + 'ShiftDay', ss.shiftday, + 'Denied', ss.denied, + 'shift', (SELECT row_to_json(s5) from shifts s5 where s5.shiftid = ss.shiftid), + 'departmentgroup', (SELECT row_to_json(dg2) from departmentgroups dg2 WHERE dg2.departmentgroupid = ss.departmentgroupid) + ) + ) + from shiftsignups ss + where ss.shiftid = sh.shiftid + ) + + from shifts sh + where exists (select 1 from shiftdays sd where sd.shiftid = sh.shiftid and sd.day >= %startdate% and sd.day < %enddate%) + ) j"; SelectShiftSignupByUserIdQuery = "SELECT * FROM %SCHEMA%.%TABLENAME% WHERE UserId = %USERID%"; SelectShiftSignupTradeByUserIdQuery = @" SELECT %SCHEMA%.%SHIFTSIGNUPTRADESTABLE%.*, %SCHEMA%.%SHIFTSIGNUPTRADEUSERSTABLE%.* diff --git a/Repositories/Resgrid.Repositories.DataRepository/Servers/SqlServer/SqlServerConfiguration.cs b/Repositories/Resgrid.Repositories.DataRepository/Servers/SqlServer/SqlServerConfiguration.cs index c93c8150..c7aaf620 100644 --- a/Repositories/Resgrid.Repositories.DataRepository/Servers/SqlServer/SqlServerConfiguration.cs +++ b/Repositories/Resgrid.Repositories.DataRepository/Servers/SqlServer/SqlServerConfiguration.cs @@ -55,7 +55,7 @@ public SqlServerConfiguration() SELECT %SCHEMA%.%ACTIONLOGSTABLE%.*, %SCHEMA%.%ASPNETUSERSTABLE%.* FROM %SCHEMA%.%ACTIONLOGSTABLE% INNER JOIN %SCHEMA%.%ASPNETUSERSTABLE% ON %SCHEMA%.%ASPNETUSERSTABLE%.Id = %SCHEMA%.%ACTIONLOGSTABLE%.UserId - [UserId] = %ID%"; + WHERE [UserId] = %USERID%"; SelectALogsByUserInDateRangQuery = @" SELECT %SCHEMA%.%ACTIONLOGSTABLE%.*, %SCHEMA%.%ASPNETUSERSTABLE%.* FROM %SCHEMA%.%ACTIONLOGSTABLE% @@ -821,6 +821,75 @@ FROM [dbo].[ShiftSignups] ss FROM [dbo].[Shifts] sh FOR JSON PATH) AS 'JsonResult'"; + // Filtered variant: only shifts with at least one ShiftDay in the next 2 days. + // Used by GetUpcomingShiftAndDaysAsync to avoid loading the entire Shifts table + // for the GetShiftsStartingNextDayAsync worker, which only needs near-term shifts. + SelectUpcomingShiftAndDaysJSONQuery = @" + SELECT (SELECT *, + JSON_QUERY((SELECT * + FROM [dbo].[Departments] + WHERE [dbo].[Departments].DepartmentId = sh.DepartmentId + FOR JSON PATH, WITHOUT_ARRAY_WRAPPER)) AS 'Department', + (SELECT *, + JSON_QUERY((SELECT * + FROM [dbo].[Shifts] + WHERE [dbo].[Shifts].ShiftId = sh.ShiftId + FOR JSON PATH, WITHOUT_ARRAY_WRAPPER)) AS 'Shift', + JSON_QUERY((SELECT * + FROM [dbo].[DepartmentGroups] + WHERE [dbo].[DepartmentGroups].DepartmentGroupId = sg.DepartmentGroupId + FOR JSON PATH, WITHOUT_ARRAY_WRAPPER)) AS 'DepartmentGroup', + (SELECT * + FROM [dbo].[ShiftGroupRoles] + WHERE [dbo].[ShiftGroupRoles].ShiftGroupId = sg.ShiftGroupId + FOR JSON PATH) AS 'Roles', + (SELECT * + FROM [dbo].[ShiftGroupAssignments] + WHERE [dbo].[ShiftGroupAssignments].ShiftGroupId = sg.ShiftGroupId + FOR JSON PATH) AS 'Assignments' + FROM [dbo].[ShiftGroups] sg + WHERE sg.ShiftId = sh.ShiftId + FOR JSON PATH) AS 'Groups', + (SELECT *, + JSON_QUERY((SELECT * + FROM [dbo].[Shifts] + WHERE [dbo].[Shifts].ShiftId = sh.ShiftId + FOR JSON PATH, WITHOUT_ARRAY_WRAPPER)) AS 'Shift' + FROM [dbo].[ShiftDays] sd + WHERE sd.ShiftId = sh.ShiftId + FOR JSON PATH) AS 'Days', + (SELECT *, + JSON_QUERY((SELECT * + FROM [dbo].[Shifts] + WHERE [dbo].[Shifts].ShiftId = sh.ShiftId + FOR JSON PATH, WITHOUT_ARRAY_WRAPPER)) AS 'Shift' + FROM [dbo].[ShiftPersons] sp + WHERE sp.ShiftId = sh.ShiftId + FOR JSON PATH) AS 'Personnel', + (SELECT *, + JSON_QUERY((SELECT * + FROM [dbo].[Shifts] + WHERE [dbo].[Shifts].ShiftId = sh.ShiftId + FOR JSON PATH, WITHOUT_ARRAY_WRAPPER)) AS 'Shift' + FROM [dbo].[ShiftAdmins] sa + WHERE sa.ShiftId = sh.ShiftId + FOR JSON PATH) AS 'Admins', + (SELECT *, + JSON_QUERY((SELECT * + FROM [dbo].[Shifts] + WHERE [dbo].[Shifts].ShiftId = sh.ShiftId + FOR JSON PATH, WITHOUT_ARRAY_WRAPPER)) AS 'Shift', + JSON_QUERY((SELECT * + FROM [dbo].[DepartmentGroups] + WHERE [dbo].[DepartmentGroups].DepartmentGroupId = ss.DepartmentGroupId + FOR JSON PATH, WITHOUT_ARRAY_WRAPPER)) AS 'Group' + FROM [dbo].[ShiftSignups] ss + WHERE ss.ShiftId = sh.ShiftId + FOR JSON PATH) AS 'Signups' + + FROM [dbo].[Shifts] sh + WHERE EXISTS (SELECT 1 FROM [dbo].[ShiftDays] sd WHERE sd.[ShiftId] = sh.[ShiftId] AND sd.[Day] >= %STARTDATE% AND sd.[Day] < %ENDDATE%) + FOR JSON PATH) AS 'JsonResult'"; SelectShiftSignupByUserIdQuery = "SELECT * FROM %SCHEMA%.%TABLENAME% WHERE [UserId] = %USERID%"; SelectShiftSignupTradeByUserIdQuery = @" SELECT %SCHEMA%.%SHIFTSIGNUPTRADESTABLE%.*, %SCHEMA%.%SHIFTSIGNUPTRADEUSERSTABLE%.* diff --git a/Repositories/Resgrid.Repositories.DataRepository/ShiftsRepository.cs b/Repositories/Resgrid.Repositories.DataRepository/ShiftsRepository.cs index 902342e4..74eeaa62 100644 --- a/Repositories/Resgrid.Repositories.DataRepository/ShiftsRepository.cs +++ b/Repositories/Resgrid.Repositories.DataRepository/ShiftsRepository.cs @@ -198,6 +198,61 @@ public async Task> GetAllShiftAndDaysAsync() } } + public async Task> GetUpcomingShiftAndDaysAsync(DateTime referenceTime) + { + try + { + var selectFunction = new Func>>(async x => + { + var dynamicParameters = new DynamicParametersExtension(); + dynamicParameters.Add("StartDate", referenceTime.Date); + dynamicParameters.Add("EndDate", referenceTime.Date.AddDays(2)); + + if (DataConfig.DatabaseType == DatabaseTypes.SqlServer) + dynamicParameters.Add("JsonResult", null, DbType.String, ParameterDirection.Output, int.MaxValue); + + var query = _queryFactory.GetQuery(); + + var result = await x.QueryAsync(sql: query, + param: dynamicParameters, + transaction: _unitOfWork.Transaction); + + if (result != null) + { + var singleResult = result.FirstOrDefault(); + if (singleResult != null) + { + return JsonConvert.DeserializeObject>(singleResult); + } + } + + return null; + }); + + DbConnection conn = null; + if (_unitOfWork?.Connection == null) + { + using (conn = _connectionProvider.Create()) + { + await conn.OpenAsync(); + + return await selectFunction(conn); + } + } + else + { + conn = _unitOfWork.CreateOrGetConnection(); + return await selectFunction(conn); + } + } + catch (Exception ex) + { + Logging.LogException(ex); + + return null; + } + } + private static Func ShiftDayMapping(Dictionary dictionary) { return new Func((shift, shiftDay) => diff --git a/Web/Resgrid.Web.Services/Controllers/v4/WeatherAlertsController.cs b/Web/Resgrid.Web.Services/Controllers/v4/WeatherAlertsController.cs index dafd8c61..f25a100e 100644 --- a/Web/Resgrid.Web.Services/Controllers/v4/WeatherAlertsController.cs +++ b/Web/Resgrid.Web.Services/Controllers/v4/WeatherAlertsController.cs @@ -5,6 +5,7 @@ using Resgrid.Model.Helpers; using Resgrid.Model.Services; using Resgrid.Providers.Claims; +using Resgrid.Providers.Weather; using Resgrid.Web.Services.Helpers; using Resgrid.Web.Services.Models.v4; using Resgrid.Web.Services.Models.v4.WeatherAlerts; @@ -193,6 +194,18 @@ public async Task> SaveSource([FromBo source.PollIntervalMinutes = Math.Max(input.PollIntervalMinutes, 15); source.Active = input.Active; + // Validate NWS zone codes at save time so users get immediate feedback + // instead of a cryptic 400 from the NWS API during polling. + if (input.SourceType == 0 && !string.IsNullOrWhiteSpace(source.AreaFilter)) + { + var zoneError = NwsWeatherAlertProvider.GetZoneValidationError(source.AreaFilter); + if (zoneError != null) + { + ModelState.AddModelError("AreaFilter", zoneError); + return ValidationProblem(ModelState); + } + } + await _weatherAlertService.SaveSourceAsync(source); var department = await _departmentsService.GetDepartmentByIdAsync(DepartmentId, false); diff --git a/Web/Resgrid.Web.Services/Resgrid.Web.Services.xml b/Web/Resgrid.Web.Services/Resgrid.Web.Services.xml index f2a9b747..c3fe583a 100644 --- a/Web/Resgrid.Web.Services/Resgrid.Web.Services.xml +++ b/Web/Resgrid.Web.Services/Resgrid.Web.Services.xml @@ -3573,52 +3573,6 @@ Is the user a group admin - - - UserId (GUID/UUID) of the User to set. This field will be ignored if the input is used on a - function that is setting status for the current user. - - - - - The state/staffing level of the user to set for the user. - - - - - Note for the staffing level - - - - - The result object for a state/staffing level request. - - - - - The UserId GUID/UUID for the user state/staffing level being return - - - - - The full name of the user for the state/staffing level being returned - - - - - The current staffing level (state) type for the user - - - - - The timestamp of the last state/staffing level. This is converted UTC to the departments, or users, TimeZone. - - - - - Staffing note for the User's staffing - - Input data to add a staffing schedule in the Resgrid system @@ -3724,6 +3678,52 @@ Note for this staffing schedule + + + UserId (GUID/UUID) of the User to set. This field will be ignored if the input is used on a + function that is setting status for the current user. + + + + + The state/staffing level of the user to set for the user. + + + + + Note for the staffing level + + + + + The result object for a state/staffing level request. + + + + + The UserId GUID/UUID for the user state/staffing level being return + + + + + The full name of the user for the state/staffing level being returned + + + + + The current staffing level (state) type for the user + + + + + The timestamp of the last state/staffing level. This is converted UTC to the departments, or users, TimeZone. + + + + + Staffing note for the User's staffing + + A resrouce in the system this could be a user or unit @@ -7536,379 +7536,209 @@ Identifier of the new npte - + - The result of getting all personnel filters for the system + A GPS location for a point in time of a specificed person - + - The Id value of the filter + PersonId of the person that the location is for - + - The type of the filter + The timestamp of the location in UTC - + - The filters name + GPS Latitude of the Person - + - Result containing all the data required to populate the New Call form + GPS Longitude of the Person - + - Response Data + GPS Latitude\Longitude Accuracy of the Person - + - Result that contains all the options available to filter personnel against compatible Resgrid APIs + GPS Altitude of the Person - + - Response Data + GPS Altitude Accuracy of the Person - + - Result containing all the data required to populate the New Call form + GPS Speed of the Person - + - Response Data + GPS Heading of the Person - + - Information about a User + A unit location in the Resgrid system - + - The UserId GUID/UUID for the user + Response Data - + - DepartmentId of the deparment the user belongs to + The information about a specific unit's location - + - Department specificed ID number for this user + Id of the Person - + - The Users First Name + The Timestamp for the location in UTC - + - The Users Last Name + GPS Latitude of the Person - + - The Users Email Address + GPS Longitude of the Person - + - The Users Mobile Telephone Number + GPS Latitude\Longitude Accuracy of the Person - + - GroupId the user is assigned to (0 for no group) + GPS Altitude of the Person - + - Name of the group the user is assigned to + GPS Altitude Accuracy of the Person - + - Enumeration/List of roles the user currently holds + GPS Speed of the Person - + - The current action/status type for the user + GPS Heading of the Person - + - The current action/status string for the user + The result of getting the current staffing for a user - + - The current action/status color hex string for the user + Response Data - + - The timestamp of the last action. This is converted UTC to the departments, or users, TimeZone. + Information about a User staffing - + - The current action/status destination id for the user + The UserId GUID/UUID for the user status being return - + - The current action/status destination name for the user + DepartmentId of the deparment the user belongs to - + - The current staffing level (state) type for the user + The current staffing type for the user - + - The current staffing level (state) string for the user + The timestamp of the last staffing. This is converted UTC version of the timestamp. - + - The current staffing level (state) color hex string for the user + The timestamp of the last staffing. This is converted UTC to the departments, or users, TimeZone. - + - The timestamp of the last state/staffing level. This is converted UTC to the departments, or users, TimeZone. + Note for this staffing - + - Users last known location + Saves (sets) and Personnel Staffing in the system, for a single user - + - Sorting weight for the user + UnitId of the apparatus that the state is being set for - + - User Defined Field values for this personnel record + The UnitStateType of the Unit - + - A GPS location for a point in time of a specificed person + The timestamp of the status event in UTC - + - PersonId of the person that the location is for + The timestamp of the status event in the local time of the device - + - The timestamp of the location in UTC + User provided note for this event - + - GPS Latitude of the Person + The event id used for queuing on mobile applications - + - GPS Longitude of the Person + Depicts a result after saving a person status - + - GPS Latitude\Longitude Accuracy of the Person + Response Data - + - GPS Altitude of the Person - - - - - GPS Altitude Accuracy of the Person - - - - - GPS Speed of the Person - - - - - GPS Heading of the Person - - - - - A unit location in the Resgrid system - - - - - Response Data - - - - - The information about a specific unit's location - - - - - Id of the Person - - - - - The Timestamp for the location in UTC - - - - - GPS Latitude of the Person - - - - - GPS Longitude of the Person - - - - - GPS Latitude\Longitude Accuracy of the Person - - - - - GPS Altitude of the Person - - - - - GPS Altitude Accuracy of the Person - - - - - GPS Speed of the Person - - - - - GPS Heading of the Person - - - - - The result of getting the current staffing for a user - - - - - Response Data - - - - - Information about a User staffing - - - - - The UserId GUID/UUID for the user status being return - - - - - DepartmentId of the deparment the user belongs to - - - - - The current staffing type for the user - - - - - The timestamp of the last staffing. This is converted UTC version of the timestamp. - - - - - The timestamp of the last staffing. This is converted UTC to the departments, or users, TimeZone. - - - - - Note for this staffing - - - - - Saves (sets) and Personnel Staffing in the system, for a single user - - - - - UnitId of the apparatus that the state is being set for - - - - - The UnitStateType of the Unit - - - - - The timestamp of the status event in UTC - - - - - The timestamp of the status event in the local time of the device - - - - - User provided note for this event - - - - - The event id used for queuing on mobile applications - - - - - Depicts a result after saving a person status - - - - - Response Data - - - - - Saves (sets) and Personnel Status in the system, for a single user + Saves (sets) and Personnel Status in the system, for a single user @@ -8209,112 +8039,282 @@ Response Data - + - Result containing all the data required to populate the New Call form + The result of getting all personnel filters for the system - + - Response Data + The Id value of the filter - + - Details of a protocol + The type of the filter - + - Protocol id + The filters name - + - Department id + Result containing all the data required to populate the New Call form - + - Name of the Protocol + Response Data - + - Protocol code + Result that contains all the options available to filter personnel against compatible Resgrid APIs - + - This this protocol disabled + Response Data - + - Protocol description + Result containing all the data required to populate the New Call form - + - Text of the protocol + Response Data - + - UTC date and time when the Protocol was created + Information about a User - + - UserId of the user who created the protocol + The UserId GUID/UUID for the user - + - UTC timestamp of when the Protocol was updated + DepartmentId of the deparment the user belongs to - + - Minimum triggering Weight of the Protocol + Department specificed ID number for this user - + - UserId that last updated the Protocol + The Users First Name - + - Triggers used to activate this Protocol + The Users Last Name - + - Attachments for this Protocol + The Users Email Address - + - Questions used to determine if this Protocol needs to be used or not + The Users Mobile Telephone Number - + - State type + GroupId the user is assigned to (0 for no group) - + - Result containing all the data required to populate the New Call form + Name of the group the user is assigned to - + - Response Data + Enumeration/List of roles the user currently holds - + + + The current action/status type for the user + + + + + The current action/status string for the user + + + + + The current action/status color hex string for the user + + + + + The timestamp of the last action. This is converted UTC to the departments, or users, TimeZone. + + + + + The current action/status destination id for the user + + + + + The current action/status destination name for the user + + + + + The current staffing level (state) type for the user + + + + + The current staffing level (state) string for the user + + + + + The current staffing level (state) color hex string for the user + + + + + The timestamp of the last state/staffing level. This is converted UTC to the departments, or users, TimeZone. + + + + + Users last known location + + + + + Sorting weight for the user + + + + + User Defined Field values for this personnel record + + + + + Result containing all the data required to populate the New Call form + + + + + Response Data + + + + + Details of a protocol + + + + + Protocol id + + + + + Department id + + + + + Name of the Protocol + + + + + Protocol code + + + + + This this protocol disabled + + + + + Protocol description + + + + + Text of the protocol + + + + + UTC date and time when the Protocol was created + + + + + UserId of the user who created the protocol + + + + + UTC timestamp of when the Protocol was updated + + + + + Minimum triggering Weight of the Protocol + + + + + UserId that last updated the Protocol + + + + + Triggers used to activate this Protocol + + + + + Attachments for this Protocol + + + + + Questions used to determine if this Protocol needs to be used or not + + + + + State type + + + + + Result containing all the data required to populate the New Call form + + + + + Response Data + + + A role in the Resgrid system @@ -9508,545 +9508,545 @@ Default constructor - + - Result that contains all the options available to filter units against compatible Resgrid APIs + Depicts a result after saving a unit status - + Response Data - + - A unit in the Resgrid system + Object inputs for setting a users Status/Action. If this object is used in an operation that sets + a status for the current user the UserId value in this object will be ignored. - + - Response Data + UnitId of the apparatus that the state is being set for - + - The information about a specific unit + The UnitStateType of the Unit - + - Id of the Unit + The Call/Station the unit is responding to - + - The Id of the department the unit is under + Destination type for RespondingTo (Station = 1, Call = 2, POI = 3). - + - Name of the Unit + The timestamp of the status event in UTC - + - Department assigned type for the unit + The timestamp of the status event in the local time of the device - + - Department assigned type id for the unit + User provided note for this event - + - Custom Statuses Set Id + GPS Latitude of the Unit - + - Station Id of the station housing the unit (0 means no station) + GPS Longitude of the Unit - + - Name of the station the unit is under + GPS Latitude\Longitude Accuracy of the Unit - + - Vehicle Identification Number for the unit + GPS Altitude of the Unit - + - Plate Number for the Unit + GPS Altitude Accuracy of the Unit - + - Is the unit 4-Wheel drive + GPS Speed of the Unit - + - Does the unit require a special permit to drive + GPS Heading of the Unit - + - Id number of the units current destionation (0 means no destination) + The event id used for queuing on mobile applications - + - The current status/state of the Unit + The accountability roles filed for this event - + - The Timestamp of the status + Role filled by a User on a Unit for an event - + - The units current Latitude + Id of the locally stored event - + - The units current Longitude + Local Event Id - + - Current user provide status note + UserId of the user filling the role - + - User Defined Field values for this unit + RoleId of the role being filled - + - Unit role information for roles on a unit + The name of the Role - + - Unit Role Id + Depicts a unit status in the Resgrid system. - + - User Id of the user in the role (could be null) + Response Data - + - Name of the Role + Depicts a unit's status - + - Name of the user in the role (could be null) + Unit Id - + - Multiple Unit infos Result + Units Name - + - Response Data + The Type of the Unit - + - Default constructor + Units current Status (State) - + - The information about a specific unit + CSS for status (for display) - + - Id of the Unit + CSS Style for status (for display) - + - The Id of the department the unit is under + Timestamp of this Unit State - + - Name of the Unit + Timestamp in Utc of this Unit State - + - Department assigned type for the unit + Destination Id (Station or Call) - + - Department assigned type id for the unit + Destination type (Station, Call, or POI). - + - Custom Statuses Set Id + Name of the Desination (Call or Station) - + - Station Id of the station housing the unit (0 means no station) + Destination address. - + - Name of the station the unit is under + Localized display label for the destination type (e.g. "Station", "Call", "POI"). Not + suitable for programmatic branching; use as the + machine-readable discriminator instead. - + - Vehicle Identification Number for the unit + Note for the State - + - Plate Number for the Unit + Latitude - + - Is the unit 4-Wheel drive + Longitude - + - Does the unit require a special permit to drive + Name of the Group the Unit is in - + - Id number of the units current destination (0 means no destination) + Id of the Group the Unit is in - + - Name of the units current destination (0 means no destination) + Unit statuses (states) - + - The current status/state of the Unit + Response Data - + - The current status/state of the Unit as a name + Default constructor - + - The current status/state of the Unit color + Result that contains all the options available to filter units against compatible Resgrid APIs - + - The Timestamp of the status + Response Data - + - The Timestamp of the status in UTC/GMT + A unit in the Resgrid system - + - The units current Latitude + Response Data - + - The units current Longitude + The information about a specific unit - + - Current user provide status note + Id of the Unit - + - Units Roles + The Id of the department the unit is under - + - Multiple Units Result + Name of the Unit - + - Response Data + Department assigned type for the unit - + - Default constructor + Department assigned type id for the unit - + - Depicts a result after saving a unit status + Custom Statuses Set Id - + - Response Data + Station Id of the station housing the unit (0 means no station) - + - Object inputs for setting a users Status/Action. If this object is used in an operation that sets - a status for the current user the UserId value in this object will be ignored. + Name of the station the unit is under - + - UnitId of the apparatus that the state is being set for + Vehicle Identification Number for the unit - + - The UnitStateType of the Unit + Plate Number for the Unit - + - The Call/Station the unit is responding to + Is the unit 4-Wheel drive - + - Destination type for RespondingTo (Station = 1, Call = 2, POI = 3). + Does the unit require a special permit to drive - + - The timestamp of the status event in UTC + Id number of the units current destionation (0 means no destination) - + - The timestamp of the status event in the local time of the device + The current status/state of the Unit - + - User provided note for this event + The Timestamp of the status - + - GPS Latitude of the Unit + The units current Latitude - + - GPS Longitude of the Unit + The units current Longitude - + - GPS Latitude\Longitude Accuracy of the Unit + Current user provide status note - + - GPS Altitude of the Unit + User Defined Field values for this unit - + - GPS Altitude Accuracy of the Unit + Unit role information for roles on a unit - + - GPS Speed of the Unit + Unit Role Id - + - GPS Heading of the Unit + User Id of the user in the role (could be null) - + - The event id used for queuing on mobile applications + Name of the Role - + - The accountability roles filed for this event + Name of the user in the role (could be null) - + - Role filled by a User on a Unit for an event + Multiple Unit infos Result - + - Id of the locally stored event + Response Data - + - Local Event Id + Default constructor - + - UserId of the user filling the role + The information about a specific unit - + - RoleId of the role being filled + Id of the Unit - + - The name of the Role + The Id of the department the unit is under - + - Depicts a unit status in the Resgrid system. + Name of the Unit - + - Response Data + Department assigned type for the unit - + - Depicts a unit's status + Department assigned type id for the unit - + - Unit Id + Custom Statuses Set Id - + - Units Name + Station Id of the station housing the unit (0 means no station) - + - The Type of the Unit + Name of the station the unit is under - + - Units current Status (State) + Vehicle Identification Number for the unit - + - CSS for status (for display) + Plate Number for the Unit - + - CSS Style for status (for display) + Is the unit 4-Wheel drive - + - Timestamp of this Unit State + Does the unit require a special permit to drive - + - Timestamp in Utc of this Unit State + Id number of the units current destination (0 means no destination) - + - Destination Id (Station or Call) + Name of the units current destination (0 means no destination) - + - Destination type (Station, Call, or POI). + The current status/state of the Unit - + - Name of the Desination (Call or Station) + The current status/state of the Unit as a name - + - Destination address. + The current status/state of the Unit color - + - Localized display label for the destination type (e.g. "Station", "Call", "POI"). Not - suitable for programmatic branching; use as the - machine-readable discriminator instead. + The Timestamp of the status - + - Note for the State + The Timestamp of the status in UTC/GMT - + - Latitude + The units current Latitude - + - Longitude + The units current Longitude - + - Name of the Group the Unit is in + Current user provide status note - + - Id of the Group the Unit is in + Units Roles - + - Unit statuses (states) + Multiple Units Result - + Response Data - + Default constructor diff --git a/Web/Resgrid.Web/Areas/User/Controllers/CustomStatusesController.cs b/Web/Resgrid.Web/Areas/User/Controllers/CustomStatusesController.cs index 4121e01a..c3ce6368 100644 --- a/Web/Resgrid.Web/Areas/User/Controllers/CustomStatusesController.cs +++ b/Web/Resgrid.Web/Areas/User/Controllers/CustomStatusesController.cs @@ -186,7 +186,15 @@ public async Task EditDetail(int stateDetailId) var model = new EditDetailView(); model.Detail = await _customStateService.GetCustomDetailByIdAsync(stateDetailId); + + if (model.Detail == null) + return RedirectToAction("Index"); + model.Detail.CustomState = await _customStateService.GetCustomSateByIdAsync(model.Detail.CustomStateId); + + if (model.Detail.CustomState == null) + return RedirectToAction("Index"); + model.DetailTypes = model.DetailType.ToSelectList(); model.NoteTypes = model.NoteType.ToSelectList(); model.BaseTypes = model.BaseType.ToSelectList(); @@ -205,13 +213,19 @@ public async Task EditDetail(int stateDetailId) [Authorize(Policy = ResgridResources.CustomStates_Update)] public async Task EditDetail(EditDetailView model, CancellationToken cancellationToken) { + if (model.Detail == null) + return RedirectToAction("Index"); + if (!await _authorizationService.CanUserModifyCustomStateDetailAsync(UserId, model.Detail.CustomStateDetailId)) return Unauthorized(); + model.Detail.CustomState = await _customStateService.GetCustomSateByIdAsync(model.Detail.CustomStateId); + if (model.Detail.CustomState == null) + return RedirectToAction("Index"); + model.DetailTypes = model.DetailType.ToSelectList(); model.NoteTypes = model.NoteType.ToSelectList(); model.BaseTypes = model.BaseType.ToSelectList(); - model.Detail.CustomState = await _customStateService.GetCustomSateByIdAsync(model.Detail.CustomStateId); if (ModelState.IsValid) { diff --git a/Web/Resgrid.Web/Areas/User/Controllers/DispatchController.cs b/Web/Resgrid.Web/Areas/User/Controllers/DispatchController.cs index 89e0173f..f191451f 100644 --- a/Web/Resgrid.Web/Areas/User/Controllers/DispatchController.cs +++ b/Web/Resgrid.Web/Areas/User/Controllers/DispatchController.cs @@ -591,6 +591,11 @@ public async Task UpdateCall(UpdateCallView model, IFormCollectio return Unauthorized(); model = await FillUpdateCallView(model); + + // Populate navigation properties (References, Contacts, etc.) so the view can + // render safely if we return it on validation failure. + model.Call = await _callsService.PopulateCallData(model.Call, true, true, true, true, true, true, true, true, true); + var destinationPoi = await GetValidatedDestinationPoiAsync(model.Call?.DestinationPoiId); if (model.Call?.DestinationPoiId.HasValue == true && model.Call.DestinationPoiId.Value > 0 && destinationPoi == null) diff --git a/Web/Resgrid.Web/Areas/User/Models/Templates/NewCallNoteModel.cs b/Web/Resgrid.Web/Areas/User/Models/Templates/NewCallNoteModel.cs index 57fe27af..4ce348ef 100644 --- a/Web/Resgrid.Web/Areas/User/Models/Templates/NewCallNoteModel.cs +++ b/Web/Resgrid.Web/Areas/User/Models/Templates/NewCallNoteModel.cs @@ -9,6 +9,7 @@ public class NewCallNoteModel public int Sort { get; set; } [Required] + [MaxLength(256)] public string Name { get; set; } [Required] diff --git a/Web/Resgrid.Web/Areas/User/Views/Dispatch/UpdateCall.cshtml b/Web/Resgrid.Web/Areas/User/Views/Dispatch/UpdateCall.cshtml index ebbb5be7..5361e6b4 100644 --- a/Web/Resgrid.Web/Areas/User/Views/Dispatch/UpdateCall.cshtml +++ b/Web/Resgrid.Web/Areas/User/Views/Dispatch/UpdateCall.cshtml @@ -236,8 +236,10 @@ - @foreach (var callReference in Model.Call.References) + @if (Model.Call.References != null) { + foreach (var callReference in Model.Call.References) + { var linkedCallStateText = DispatchDisplayHelper.GetLocalizedCallState(callReference.TargetCall.State, localizer, commonLocalizer); @@ -253,6 +255,7 @@ + } }