diff --git a/CHANGELOG.md b/CHANGELOG.md index b9570e1..fd901f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## Unreleased + +- Fixed Gemini CLI integration https://github.com/codeaholicguy/ai-devkit/issues/3 +- Added test for TemplateManager.ts + ## [0.4.0] - 2025-10-31 ### Added diff --git a/src/__tests__/lib/EnvironmentSelector.test.ts b/src/__tests__/lib/EnvironmentSelector.test.ts index c5989e3..172515b 100644 --- a/src/__tests__/lib/EnvironmentSelector.test.ts +++ b/src/__tests__/lib/EnvironmentSelector.test.ts @@ -135,8 +135,8 @@ describe('EnvironmentSelector', () => { selector.displaySelectionSummary(['cursor', 'claude']); expect(consoleSpy).toHaveBeenCalledWith('\nSelected environments:'); - expect(consoleSpy).toHaveBeenCalledWith(' [OK] Cursor'); - expect(consoleSpy).toHaveBeenCalledWith(' [OK] Claude Code'); + expect(consoleSpy).toHaveBeenCalledWith(' Cursor'); + expect(consoleSpy).toHaveBeenCalledWith(' Claude Code'); expect(consoleSpy).toHaveBeenCalledWith(''); }); @@ -144,7 +144,7 @@ describe('EnvironmentSelector', () => { selector.displaySelectionSummary(['cursor']); expect(consoleSpy).toHaveBeenCalledWith('\nSelected environments:'); - expect(consoleSpy).toHaveBeenCalledWith(' [OK] Cursor'); + expect(consoleSpy).toHaveBeenCalledWith(' Cursor'); expect(consoleSpy).toHaveBeenCalledWith(''); }); }); diff --git a/src/__tests__/lib/PhaseSelector.test.ts b/src/__tests__/lib/PhaseSelector.test.ts index 2c844e1..acc1232 100644 --- a/src/__tests__/lib/PhaseSelector.test.ts +++ b/src/__tests__/lib/PhaseSelector.test.ts @@ -82,8 +82,8 @@ describe('PhaseSelector', () => { selector.displaySelectionSummary(['requirements', 'design']); expect(consoleSpy).toHaveBeenCalledWith('\nSelected phases:'); - expect(consoleSpy).toHaveBeenCalledWith(' [OK] Requirements & Problem Understanding'); - expect(consoleSpy).toHaveBeenCalledWith(' [OK] System Design & Architecture'); + expect(consoleSpy).toHaveBeenCalledWith(' Requirements & Problem Understanding'); + expect(consoleSpy).toHaveBeenCalledWith(' System Design & Architecture'); expect(consoleSpy).toHaveBeenCalledWith(''); }); @@ -91,7 +91,7 @@ describe('PhaseSelector', () => { selector.displaySelectionSummary(['requirements']); expect(consoleSpy).toHaveBeenCalledWith('\nSelected phases:'); - expect(consoleSpy).toHaveBeenCalledWith(' [OK] Requirements & Problem Understanding'); + expect(consoleSpy).toHaveBeenCalledWith(' Requirements & Problem Understanding'); expect(consoleSpy).toHaveBeenCalledWith(''); }); }); diff --git a/src/__tests__/lib/TemplateManager.test.ts b/src/__tests__/lib/TemplateManager.test.ts new file mode 100644 index 0000000..01310fe --- /dev/null +++ b/src/__tests__/lib/TemplateManager.test.ts @@ -0,0 +1,221 @@ +import * as fs from 'fs-extra'; +import * as path from 'path'; +import { TemplateManager } from '../../lib/TemplateManager'; +import { EnvironmentDefinition } from '../../types'; + +jest.mock('fs-extra'); + +describe('TemplateManager', () => { + let templateManager: TemplateManager; + let mockFs: jest.Mocked; + + beforeEach(() => { + mockFs = fs as jest.Mocked; + templateManager = new TemplateManager('/test/target'); + + jest.clearAllMocks(); + }); + + afterEach(() => { + jest.restoreAllMocks(); + }); + + describe('setupSingleEnvironment', () => { + it('should copy context file when it exists', async () => { + const env: EnvironmentDefinition = { + code: 'test-env', + name: 'Test Environment', + contextFileName: '.test-context.md', + commandPath: '.test', + isCustomCommandPath: false + }; + + (mockFs.pathExists as any) + .mockResolvedValueOnce(true) + .mockResolvedValueOnce(true); + + (mockFs.readdir as any).mockResolvedValue(['command1.md', 'command2.toml']); + + const result = await (templateManager as any).setupSingleEnvironment(env); + + expect(mockFs.copy).toHaveBeenCalledWith( + path.join(templateManager['templatesDir'], 'env', 'base.md'), + path.join(templateManager['targetDir'], env.contextFileName) + ); + expect(result).toContain(path.join(templateManager['targetDir'], env.contextFileName)); + }); + + it('should warn when context file does not exist', async () => { + const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation(); + + const env: EnvironmentDefinition = { + code: 'test-env', + name: 'Test Environment', + contextFileName: '.test-context.md', + commandPath: '.test', + isCustomCommandPath: false + }; + + (mockFs.pathExists as any) + .mockResolvedValueOnce(false) + .mockResolvedValueOnce(true); + + (mockFs.readdir as any).mockResolvedValue(['command1.md']); + + const result = await (templateManager as any).setupSingleEnvironment(env); + + expect(consoleWarnSpy).toHaveBeenCalledWith( + expect.stringContaining('Warning: Context file not found') + ); + expect(result).toEqual([path.join(templateManager['targetDir'], env.commandPath, 'command1.md')]); + + consoleWarnSpy.mockRestore(); + }); + + it('should copy commands when isCustomCommandPath is false', async () => { + const env: EnvironmentDefinition = { + code: 'test-env', + name: 'Test Environment', + contextFileName: '.test-context.md', + commandPath: '.test', + isCustomCommandPath: false + }; + + const mockCommandFiles = ['command1.md', 'command2.toml', 'command3.md']; + + (mockFs.pathExists as any) + .mockResolvedValueOnce(true) // context file exists + .mockResolvedValueOnce(true); // commands directory exists + + (mockFs.readdir as any).mockResolvedValue(mockCommandFiles); + + const result = await (templateManager as any).setupSingleEnvironment(env); + + expect(mockFs.ensureDir).toHaveBeenCalledWith( + path.join(templateManager['targetDir'], env.commandPath) + ); + + // Should only copy .md files (not .toml files) + expect(mockFs.copy).toHaveBeenCalledWith( + path.join(templateManager['templatesDir'], 'commands', 'command1.md'), + path.join(templateManager['targetDir'], env.commandPath, 'command1.md') + ); + expect(mockFs.copy).toHaveBeenCalledWith( + path.join(templateManager['templatesDir'], 'commands', 'command3.md'), + path.join(templateManager['targetDir'], env.commandPath, 'command3.md') + ); + + expect(result).toContain(path.join(templateManager['targetDir'], env.commandPath, 'command1.md')); + expect(result).toContain(path.join(templateManager['targetDir'], env.commandPath, 'command3.md')); + }); + + it('should skip commands when isCustomCommandPath is true', async () => { + const env: EnvironmentDefinition = { + code: 'test-env', + name: 'Test Environment', + contextFileName: '.test-context.md', + commandPath: '.test', + isCustomCommandPath: true + }; + + (mockFs.pathExists as any).mockResolvedValueOnce(true); + + const result = await (templateManager as any).setupSingleEnvironment(env); + + expect(mockFs.ensureDir).not.toHaveBeenCalled(); + expect(mockFs.copy).toHaveBeenCalledTimes(1); + expect(result).toContain(path.join(templateManager['targetDir'], env.contextFileName)); + }); + + it('should handle cursor environment with special files', async () => { + const env: EnvironmentDefinition = { + code: 'cursor', + name: 'Cursor', + contextFileName: '.cursor.md', + commandPath: '.cursor', + isCustomCommandPath: false + }; + + const mockRuleFiles = ['rule1.md', 'rule2.toml']; + + (mockFs.pathExists as any) + .mockResolvedValueOnce(true) + .mockResolvedValueOnce(true) .mockResolvedValueOnce(true); + + (mockFs.readdir as any) + .mockResolvedValueOnce([]) .mockResolvedValueOnce(mockRuleFiles); + const result = await (templateManager as any).setupSingleEnvironment(env); + + expect(mockFs.ensureDir).toHaveBeenCalledWith( + path.join(templateManager['targetDir'], '.cursor', 'rules') + ); + expect(mockFs.copy).toHaveBeenCalledWith( + path.join(templateManager['templatesDir'], 'env', 'cursor', 'rules'), + path.join(templateManager['targetDir'], '.cursor', 'rules') + ); + + expect(result).toContain(path.join(templateManager['targetDir'], '.cursor', 'rules', 'rule1.md')); + expect(result).toContain(path.join(templateManager['targetDir'], '.cursor', 'rules', 'rule2.toml')); + }); + + it('should handle gemini environment with toml files', async () => { + const env: EnvironmentDefinition = { + code: 'gemini', + name: 'Gemini', + contextFileName: '.gemini.md', + commandPath: '.gemini', + isCustomCommandPath: false + }; + + const mockCommandFiles = ['command1.md', 'command2.toml', 'command3.toml']; + + (mockFs.pathExists as any) + .mockResolvedValueOnce(true) + .mockResolvedValueOnce(true) .mockResolvedValueOnce(true); // gemini commands directory exists + + (mockFs.readdir as any).mockResolvedValue(mockCommandFiles); + + const result = await (templateManager as any).setupSingleEnvironment(env); + + expect(mockFs.ensureDir).toHaveBeenCalledWith( + path.join(templateManager['targetDir'], '.gemini', 'commands') + ); + + expect(mockFs.copy).toHaveBeenCalledWith( + path.join(templateManager['templatesDir'], 'commands', 'command2.toml'), + path.join(templateManager['targetDir'], '.gemini', 'commands', 'command2.toml') + ); + expect(mockFs.copy).toHaveBeenCalledWith( + path.join(templateManager['templatesDir'], 'commands', 'command3.toml'), + path.join(templateManager['targetDir'], '.gemini', 'commands', 'command3.toml') + ); + + expect(result).toContain(path.join(templateManager['targetDir'], '.gemini', 'commands', 'command2.toml')); + expect(result).toContain(path.join(templateManager['targetDir'], '.gemini', 'commands', 'command3.toml')); + }); + + it('should handle errors and rethrow them', async () => { + const consoleErrorSpy = jest.spyOn(console, 'error').mockImplementation(); + + const env: EnvironmentDefinition = { + code: 'test-env', + name: 'Test Environment', + contextFileName: '.test-context.md', + commandPath: '.test', + isCustomCommandPath: false + }; + + const testError = new Error('Test error'); + (mockFs.pathExists as any).mockRejectedValue(testError); + + await expect((templateManager as any).setupSingleEnvironment(env)).rejects.toThrow('Test error'); + + expect(consoleErrorSpy).toHaveBeenCalledWith( + 'Error setting up environment Test Environment:', + testError + ); + + consoleErrorSpy.mockRestore(); + }); + }); +}); diff --git a/src/lib/EnvironmentSelector.ts b/src/lib/EnvironmentSelector.ts index ed53303..1beb658 100644 --- a/src/lib/EnvironmentSelector.ts +++ b/src/lib/EnvironmentSelector.ts @@ -58,7 +58,7 @@ export class EnvironmentSelector { console.log('\nSelected environments:'); selected.forEach(envId => { - console.log(` [OK] ${getEnvironmentDisplayName(envId)}`); + console.log(` ${getEnvironmentDisplayName(envId)}`); }); console.log(''); } diff --git a/src/lib/PhaseSelector.ts b/src/lib/PhaseSelector.ts index fb5dfa9..c7141d2 100644 --- a/src/lib/PhaseSelector.ts +++ b/src/lib/PhaseSelector.ts @@ -49,7 +49,7 @@ export class PhaseSelector { console.log('\nSelected phases:'); selected.forEach(phase => { - console.log(` [OK] ${PHASE_DISPLAY_NAMES[phase]}`); + console.log(` ${PHASE_DISPLAY_NAMES[phase]}`); }); console.log(''); } diff --git a/src/lib/TemplateManager.ts b/src/lib/TemplateManager.ts index 08e5247..49ee3d3 100644 --- a/src/lib/TemplateManager.ts +++ b/src/lib/TemplateManager.ts @@ -1,21 +1,21 @@ -import * as fs from 'fs-extra'; -import * as path from 'path'; -import { Phase, EnvironmentCode, EnvironmentDefinition } from '../types'; -import { getEnvironment } from '../util/env'; +import * as fs from "fs-extra"; +import * as path from "path"; +import { Phase, EnvironmentCode, EnvironmentDefinition } from "../types"; +import { getEnvironment } from "../util/env"; export class TemplateManager { private templatesDir: string; private targetDir: string; constructor(targetDir: string = process.cwd()) { - this.templatesDir = path.join(__dirname, '../../templates'); + this.templatesDir = path.join(__dirname, "../../templates"); this.targetDir = targetDir; } async copyPhaseTemplate(phase: Phase): Promise { - const sourceFile = path.join(this.templatesDir, 'phases', `${phase}.md`); - const targetDir = path.join(this.targetDir, 'docs', 'ai', phase); - const targetFile = path.join(targetDir, 'README.md'); + const sourceFile = path.join(this.templatesDir, "phases", `${phase}.md`); + const targetDir = path.join(this.targetDir, "docs", "ai", phase); + const targetFile = path.join(targetDir, "README.md"); await fs.ensureDir(targetDir); await fs.copy(sourceFile, targetFile); @@ -23,13 +23,20 @@ export class TemplateManager { return targetFile; } - async fileExists(phase: Phase): Promise { - const targetFile = path.join(this.targetDir, 'docs', 'ai', phase, 'README.md'); + const targetFile = path.join( + this.targetDir, + "docs", + "ai", + phase, + "README.md" + ); return fs.pathExists(targetFile); } - async setupMultipleEnvironments(environmentIds: EnvironmentCode[]): Promise { + async setupMultipleEnvironments( + environmentIds: EnvironmentCode[] + ): Promise { const copiedFiles: string[] = []; for (const envId of environmentIds) { @@ -67,11 +74,13 @@ export class TemplateManager { return contextFileExists || commandDirExists; } - private async setupSingleEnvironment(env: EnvironmentDefinition): Promise { + private async setupSingleEnvironment( + env: EnvironmentDefinition + ): Promise { const copiedFiles: string[] = []; try { - const contextSource = path.join(this.templatesDir, 'env', 'base.md'); + const contextSource = path.join(this.templatesDir, "env", "base.md"); const contextTarget = path.join(this.targetDir, env.contextFileName); if (await fs.pathExists(contextSource)) { @@ -81,25 +90,20 @@ export class TemplateManager { console.warn(`Warning: Context file not found: ${contextSource}`); } - const commandsSourceDir = path.join(this.templatesDir, 'commands'); - const commandsTargetDir = path.join(this.targetDir, env.commandPath); - - if (await fs.pathExists(commandsSourceDir)) { - await fs.ensureDir(commandsTargetDir); - await fs.copy(commandsSourceDir, commandsTargetDir); - - const commandFiles = await fs.readdir(commandsTargetDir); - commandFiles.forEach(file => { - copiedFiles.push(path.join(commandsTargetDir, file)); - }); - } else { - console.warn(`Warning: Commands directory not found: ${commandsSourceDir}`); + if (!env.isCustomCommandPath) { + await this.copyCommands(env, copiedFiles); } - if (env.code === 'cursor') { - await this.copyCursorSpecificFiles(copiedFiles); + switch (env.code) { + case "cursor": + await this.copyCursorSpecificFiles(copiedFiles); + break; + case "gemini": + await this.copyGeminiSpecificFiles(copiedFiles); + break; + default: + break; } - } catch (error) { console.error(`Error setting up environment ${env.name}:`, error); throw error; @@ -108,19 +112,72 @@ export class TemplateManager { return copiedFiles; } + private async copyCommands( + env: EnvironmentDefinition, + copiedFiles: string[] + ): Promise { + const commandsSourceDir = path.join(this.templatesDir, "commands"); + const commandsTargetDir = path.join(this.targetDir, env.commandPath); + + if (await fs.pathExists(commandsSourceDir)) { + await fs.ensureDir(commandsTargetDir); + + const commandFiles = await fs.readdir(commandsSourceDir); + await Promise.all( + commandFiles + .filter((file: string) => file.endsWith(".md")) + .map(async (file: string) => { + await fs.copy( + path.join(commandsSourceDir, file), + path.join(commandsTargetDir, file) + ); + copiedFiles.push(path.join(commandsTargetDir, file)); + }) + ); + } else { + console.warn( + `Warning: Commands directory not found: ${commandsSourceDir}` + ); + } + } + private async copyCursorSpecificFiles(copiedFiles: string[]): Promise { - const rulesSourceDir = path.join(this.templatesDir, 'env', 'cursor', 'rules'); - const rulesTargetDir = path.join(this.targetDir, '.cursor', 'rules'); + const rulesSourceDir = path.join( + this.templatesDir, + "env", + "cursor", + "rules" + ); + const rulesTargetDir = path.join(this.targetDir, ".cursor", "rules"); if (await fs.pathExists(rulesSourceDir)) { await fs.ensureDir(rulesTargetDir); await fs.copy(rulesSourceDir, rulesTargetDir); const ruleFiles = await fs.readdir(rulesSourceDir); - ruleFiles.forEach(file => { + ruleFiles.forEach((file) => { copiedFiles.push(path.join(rulesTargetDir, file)); }); } } -} + private async copyGeminiSpecificFiles(copiedFiles: string[]): Promise { + const commandFiles = await fs.readdir( + path.join(this.templatesDir, "commands") + ); + const commandTargetDir = path.join(this.targetDir, ".gemini", "commands"); + + await fs.ensureDir(commandTargetDir); + await Promise.all( + commandFiles + .filter((file: string) => file.endsWith(".toml")) + .map(async (file: string) => { + await fs.copy( + path.join(this.templatesDir, "commands", file), + path.join(commandTargetDir, file) + ); + copiedFiles.push(path.join(commandTargetDir, file)); + }) + ); + } +} diff --git a/src/types.ts b/src/types.ts index c682ba5..5ac001d 100644 --- a/src/types.ts +++ b/src/types.ts @@ -13,6 +13,7 @@ export interface EnvironmentDefinition { contextFileName: string; commandPath: string; description?: string; + isCustomCommandPath?: boolean; } export type EnvironmentCode = 'cursor' | 'claude' | 'github' | 'gemini' | 'codex' | 'windsurf' | 'kilocode' | 'amp' | 'opencode' | 'roo'; diff --git a/src/util/env.ts b/src/util/env.ts index 3b63cfb..6264fe7 100644 --- a/src/util/env.ts +++ b/src/util/env.ts @@ -24,6 +24,7 @@ export const ENVIRONMENT_DEFINITIONS: Record.md + ``` +- Parse sections that represent task lists (look for headings + checkboxes `[ ]`, `[x]`). +- Build an ordered queue of tasks grouped by section (e.g., Foundation, Core Features, Testing). + +## Step 3: Present Task Queue +Show an overview: +``` +### Task Queue: +1. [status] Section • Task title +2. ... +``` +Status legend: `todo`, `in-progress`, `done`, `blocked` (based on checkbox/notes if present). + +## Step 4: Interactive Task Execution +For each task in order: +1. Display the section/context, full bullet text, and any existing notes. +2. Suggest relevant docs to reference (requirements/design/implementation). +3. Ask: "Plan for this task?" Offer to outline sub-steps using the design doc. +4. Prompt to mark status (`done`, `in-progress`, `blocked`, `skipped`) and capture short notes/next steps. +5. Encourage code/document edits inside Cursor; offer commands/snippets when useful. +6. If blocked, record blocker info and move task to the end or into a "Blocked" list. + +## Step 5: Update Planning Doc +After each status change, generate a Markdown snippet the user can paste back into the planning doc, e.g.: +``` +- [x] Task: Implement auth service (Notes: finished POST /auth/login, tests added) +``` +Remind the user to keep the source doc updated. + +## Step 6: Check for Newly Discovered Work +After each section, ask if new tasks were discovered. If yes, capture them in a "New Work" list with status `todo` and include in the summary. + +## Step 7: Session Summary +Produce a summary table: +``` +### Execution Summary +- Completed: (list) +- In Progress: (list + owners/next steps) +- Blocked: (list + blockers) +- Skipped / Deferred: (list + rationale) +- New Tasks: (list) +``` + +## Step 8: Next Actions +Remind the user to: +- Update `docs/ai/planning/feature-{name}.md` with the new statuses +- Sync related docs (requirements/design/implementation/testing) if decisions changed +- Run `/check-implementation` to validate changes against design docs +- Run `/writing-test` to produce unit/integration tests targeting 100% coverage +- Run `/update-planning` to reconcile the planning doc with the latest status +- Run `/code-review` when ready for final review +- Run test suites relevant to completed tasks + +--- +Let me know when you're ready to start executing the plan. Provide the feature +name and planning doc first.''' diff --git a/templates/commands/new-requirement.toml b/templates/commands/new-requirement.toml new file mode 100644 index 0000000..50fa6d5 --- /dev/null +++ b/templates/commands/new-requirement.toml @@ -0,0 +1,129 @@ +description='''Add new feature/requirement documentation and guide me through +the development workflow from requirements to testing.''' +prompt='''I want to add a new feature/requirement. Please guide me through the complete development workflow: + +## Step 1: Capture Requirement +First, ask me: +- What is the feature name? (e.g., "user-authentication", "payment-integration") +- What problem does it solve? +- Who will use it? +- What are the key user stories? + +## Step 2: Create Feature Documentation Structure +Once I provide the requirement, create the following files (copy the existing template content so sections/frontmatter match exactly): +- Start from `docs/ai/requirements/README.md` → save as `docs/ai/requirements/feature-{name}.md` +- Start from `docs/ai/design/README.md` → save as `docs/ai/design/feature-{name}.md` +- Start from `docs/ai/planning/README.md` → save as `docs/ai/planning/feature-{name}.md` +- Start from `docs/ai/implementation/README.md` → save as `docs/ai/implementation/feature-{name}.md` +- Start from `docs/ai/testing/README.md` → save as `docs/ai/testing/feature-{name}.md` + +Ensure the YAML frontmatter and section headings remain identical to the templates before filling in feature-specific content. + +## Step 3: Requirements Phase +Help me fill out `docs/ai/requirements/feature-{name}.md`: +- Clarify the problem statement +- Define goals and non-goals +- Write detailed user stories +- Establish success criteria +- Identify constraints and assumptions +- List open questions + +## Step 4: Design Phase +Guide me through `docs/ai/design/feature-{name}.md`: +- Propose system architecture changes needed +- Define data models/schema changes +- Design API endpoints or interfaces +- Identify components to create/modify +- Document key design decisions +- Note security and performance considerations + +## Step 5: Planning Phase +Help me break down work in `docs/ai/planning/feature-{name}.md`: +- Create task breakdown with subtasks +- Identify dependencies (on other features, APIs, etc.) +- Estimate effort for each task +- Suggest implementation order +- Identify risks and mitigation strategies + +## Step 6: Documentation Review (Chained Commands) +Once the docs above are drafted, run the following commands to tighten them up: +- `/review-requirements` to validate the requirements doc for completeness and clarity +- `/review-design` to ensure the design doc aligns with requirements and highlights key decisions + +(If you are using Claude Code, reference the `review-requirements` and `review-design` commands instead.) + +## Step 7: Implementation Phase (Deferred) +This command focuses on documentation only. Actual implementation happens later via `/execute-plan`. +For each task in the plan: +1. Review the task requirements and design +2. Ask me to confirm I'm starting this task +3. Guide implementation with reference to design docs +4. Suggest code structure and patterns +5. Help with error handling and edge cases +6. Update `docs/ai/implementation/feature-{name}.md` with notes + +## Step 8: Testing Phase +Guide testing in `docs/ai/testing/feature-{name}.md`: +- Draft unit test cases with `/writing-test` +- Draft integration test scenarios with `/writing-test` +- Recommend manual testing steps +- Help write test code +- Verify all success criteria are testable + +## Step 9: Local Testing & Verification +Guide me through: +1. Running all tests locally +2. Manual testing checklist +3. Reviewing against requirements +4. Checking design compliance +5. Preparing for code review (diff summary, list of files, design references) + +## Step 10: Local Code Review (Optional but recommended) +Before pushing, ask me to run `/code-review` with the modified file list and relevant docs. + +## Step 11: Implementation Execution Reminder +When ready to implement, run `/execute-plan` to work through the planning doc tasks interactively. That command will orchestrate implementation, testing, and follow-up documentation. + +## Step 12: Create Merge/Pull Request +Provide the MR/PR description: +```markdown +## Feature: [Feature Name] + +### Summary +[Brief description of what this feature does] + +### Requirements +- Documented in: `docs/ai/requirements/feature-{name}.md` +- Related to: [issue/ticket number if applicable] + +### Changes +- [List key changes] +- [List new files/components] +- [List modified files] + +### Design +- Architecture: [Link to design doc section] +- Key decisions: [Brief summary] + +### Testing +- Unit tests: [coverage/status] +- Integration tests: [status] +- Manual testing: Completed +- Test documentation: `docs/ai/testing/feature-{name}.md` + +### Checklist +- [ ] Code follows project standards +- [ ] All tests pass +- [ ] Documentation updated +- [ ] No breaking changes (or documented if any) +- [ ] Ready for review +``` + +Then provide the appropriate command: +- **GitHub**: `gh pr create --title "feat: [feature-name]" --body-file pr-description.md` +- **GitLab**: `glab mr create --title "feat: [feature-name]" --description "$(cat mr-description.md)"` + +--- + +**Let's start! Tell me about the feature you want to build.** +''' diff --git a/templates/commands/review-design.toml b/templates/commands/review-design.toml new file mode 100644 index 0000000..10defef --- /dev/null +++ b/templates/commands/review-design.toml @@ -0,0 +1,13 @@ +description='''Review the design documentation for a feature to ensure +completeness and accuracy.''' +prompt='''Review the design documentation in docs/ai/design/feature-{name}.md (and the project-level README if relevant). Summarize: +- Architecture overview (ensure mermaid diagram is present and accurate) +- Key components and their responsibilities +- Technology choices and rationale +- Data models and relationships +- API/interface contracts (inputs, outputs, auth) +- Major design decisions and trade-offs +- Non-functional requirements that must be preserved + +Highlight any inconsistencies, missing sections, or diagrams that need updates. +''' diff --git a/templates/commands/review-requirements.toml b/templates/commands/review-requirements.toml new file mode 100644 index 0000000..1c1e7c1 --- /dev/null +++ b/templates/commands/review-requirements.toml @@ -0,0 +1,11 @@ +description='''Review the requirements documentation for a feature to ensure +completeness and alignment with project standards.''' +prompt='''Please review `docs/ai/requirements/feature-{name}.md` and the project-level template `docs/ai/requirements/README.md` to ensure structure and content alignment. Summarize: +- Core problem statement and affected users +- Goals, non-goals, and success criteria +- Primary user stories & critical flows +- Constraints, assumptions, open questions +- Any missing sections or deviations from the template + +Identify gaps or contradictions and suggest clarifications. +''' diff --git a/templates/commands/update-planning.toml b/templates/commands/update-planning.toml new file mode 100644 index 0000000..6bb3952 --- /dev/null +++ b/templates/commands/update-planning.toml @@ -0,0 +1,63 @@ +description='''Assist in updating planning documentation to reflect current +implementation progress for a feature.''' +prompt='''# Planning Update Assistant + +Please help me reconcile the current implementation progress with our planning documentation. + +## Step 1: Gather Context +Ask me for: +- Feature/branch name and brief status +- Tasks completed since last update +- Any new tasks discovered +- Current blockers or risks +- Relevant planning docs (e.g., `docs/ai/planning/feature-{name}.md`) + +## Step 2: Review Planning Doc +If a planning doc exists: +- Summarize existing milestones and task breakdowns +- Note expected sequencing and dependencies +- Identify outstanding tasks in the plan + +## Step 3: Reconcile Progress +For each planned task: +- Mark status (done / in progress / blocked / not started) +- Note actual work completed vs. planned scope +- Record blockers or changes in approach +- Identify tasks that were skipped or added + +## Step 4: Update Task List +Help me produce an updated checklist such as: +``` +### Current Status: [Feature Name] + +#### Done +- [x] Task A - short note on completion or link to commit/pr +- [x] Task B + +#### In Progress +- [ ] Task C - waiting on [dependency] + +#### Blocked +- [ ] Task D - blocked by [issue/owner] + +#### Newly Discovered Work +- [ ] Task E - reason discovered +- [ ] Task F - due by [date] +``` + +## Step 5: Next Steps & Priorities +- Suggest the next 2-3 actionable tasks +- Highlight risky areas needing attention +- Recommend coordination (design changes, stakeholder sync, etc.) +- List documentation updates needed + +## Step 6: Summary for Planning Doc +Prepare a summary paragraph to copy into the planning doc, covering: +- Current state and progress +- Major risks/blockers +- Upcoming focus items +- Any changes to scope or timeline + +--- +Let me know when you're ready to begin the planning update. +''' diff --git a/templates/commands/writing-test.toml b/templates/commands/writing-test.toml new file mode 100644 index 0000000..ee5a06b --- /dev/null +++ b/templates/commands/writing-test.toml @@ -0,0 +1,46 @@ +description='''Add tests for a new feature''' +prompt='''Review `docs/ai/testing/feature-{name}.md` and ensure it mirrors the base template before writing tests. + +## Step 1: Gather Context +Ask me for: +- Feature name and branch +- Summary of what changed (link to design & requirements docs) +- Target environment (backend, frontend, full-stack) +- Existing automated test suites (unit, integration, E2E) +- Any flaky or slow tests to avoid + +## Step 2: Analyze Testing Template +- Identify required sections from `docs/ai/testing/feature-{name}.md` (unit, integration, manual verification, coverage targets) +- Confirm success criteria and edge cases from requirements & design docs +- Note any mocks/stubs or fixtures already available + +## Step 3: Unit Tests (Aim for 100% coverage) +For each module/function: +1. List behavior scenarios (happy path, edge cases, error handling) +2. Generate concrete test cases with assertions and inputs +3. Reference existing utilities/mocks to accelerate implementation +4. Provide pseudocode or actual test snippets +5. Highlight potential missing branches preventing full coverage + +## Step 4: Integration Tests +1. Identify critical flows that span multiple components/services +2. Define setup/teardown steps (databases, APIs, queues) +3. Outline test cases validating interaction boundaries, data contracts, and failure modes +4. Suggest instrumentation/logging to debug failures + +## Step 5: Coverage Strategy +- Recommend tooling commands (e.g., `npm run test -- --coverage`) +- Call out files/functions that still need coverage and why +- Suggest additional tests if coverage <100% + +## Step 6: Manual & Exploratory Testing +- Propose manual test checklist covering UX, accessibility, and error handling +- Identify exploratory scenarios or chaos/failure injection tests if relevant + +## Step 7: Update Documentation & TODOs +- Summarize which tests were added or still missing +- Update `docs/ai/testing/feature-{name}.md` sections with links to test files and results +- Flag follow-up tasks for deferred tests (with owners/dates) + +Let me know when you have the latest code changes ready; we'll write tests together until we hit 100% coverage. +'''