diff --git a/pkg/cli/audit_agent_example_test.go b/pkg/cli/audit_agent_example_test.go new file mode 100644 index 000000000..ac44d1d61 --- /dev/null +++ b/pkg/cli/audit_agent_example_test.go @@ -0,0 +1,444 @@ +package cli + +import ( + "bytes" + "encoding/json" + "io" + "os" + "strings" + "testing" + "time" + + "github.com/githubnext/gh-aw/pkg/testutil" + "github.com/githubnext/gh-aw/pkg/workflow" +) + +// TestAgentFriendlyOutputExample demonstrates the new agent-friendly output format +func TestAgentFriendlyOutputExample(t *testing.T) { + // Create a realistic workflow run scenario + run := WorkflowRun{ + DatabaseID: 987654, + WorkflowName: "weekly-research", + Status: "completed", + Conclusion: "success", + CreatedAt: time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC), + StartedAt: time.Date(2024, 1, 15, 10, 1, 0, 0, time.UTC), + UpdatedAt: time.Date(2024, 1, 15, 10, 15, 30, 0, time.UTC), + Duration: 14*time.Minute + 30*time.Second, + Event: "schedule", + HeadBranch: "main", + URL: "https://github.com/org/repo/actions/runs/987654", + TokenUsage: 45000, + EstimatedCost: 0.18, + Turns: 12, + ErrorCount: 0, + WarningCount: 2, + LogsPath: testutil.TempDir(t, "test-*"), + } + + metrics := LogMetrics{ + TokenUsage: 45000, + EstimatedCost: 0.18, + Turns: 12, + Errors: []workflow.LogError{ + { + File: "agent.log", + Line: 125, + Type: "warning", + Message: "Rate limit approaching for GitHub API", + }, + { + File: "agent.log", + Line: 230, + Type: "warning", + Message: "Large output size may impact performance", + }, + }, + ToolCalls: []workflow.ToolCallInfo{ + { + Name: "github_search_repositories", + CallCount: 8, + MaxInputSize: 512, + MaxOutputSize: 4096, + MaxDuration: 3 * time.Second, + }, + { + Name: "web_search", + CallCount: 5, + MaxInputSize: 256, + MaxOutputSize: 2048, + MaxDuration: 2 * time.Second, + }, + { + Name: "bash_echo", + CallCount: 3, + MaxInputSize: 128, + MaxOutputSize: 256, + MaxDuration: 500 * time.Millisecond, + }, + }, + } + + firewallAnalysis := &FirewallAnalysis{ + DomainBuckets: DomainBuckets{ + AllowedDomains: []string{ + "api.github.com:443", + "search.brave.com:443", + "npmjs.org:443", + }, + DeniedDomains: []string{ + "tracking.example.com:443", + }, + }, + TotalRequests: 42, + AllowedRequests: 40, + DeniedRequests: 2, + RequestsByDomain: map[string]DomainRequestStats{ + "api.github.com:443": {Allowed: 25, Denied: 0}, + "search.brave.com:443": {Allowed: 10, Denied: 0}, + "npmjs.org:443": {Allowed: 5, Denied: 0}, + "tracking.example.com:443": {Allowed: 0, Denied: 2}, + }, + } + + processedRun := ProcessedRun{ + Run: run, + FirewallAnalysis: firewallAnalysis, + MissingTools: []MissingToolReport{}, + MCPFailures: []MCPFailureReport{}, + JobDetails: []JobInfoWithDuration{ + { + JobInfo: JobInfo{ + Name: "research", + Status: "completed", + Conclusion: "success", + StartedAt: run.StartedAt, + CompletedAt: run.UpdatedAt, + }, + Duration: run.Duration, + }, + }, + } + + // Build audit data + auditData := buildAuditData(processedRun, metrics) + + // Test JSON output + t.Run("JSON Output", func(t *testing.T) { + jsonBytes, err := json.MarshalIndent(auditData, "", " ") + if err != nil { + t.Fatalf("Failed to marshal JSON: %v", err) + } + + // Verify key sections exist + jsonStr := string(jsonBytes) + if !strings.Contains(jsonStr, `"key_findings"`) { + t.Error("JSON missing key_findings") + } + if !strings.Contains(jsonStr, `"recommendations"`) { + t.Error("JSON missing recommendations") + } + if !strings.Contains(jsonStr, `"performance_metrics"`) { + t.Error("JSON missing performance_metrics") + } + + // Print sample JSON for documentation + t.Logf("Sample JSON Output:\n%s", string(jsonBytes)) + }) + + // Test console output + t.Run("Console Output", func(t *testing.T) { + // Capture console output + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + renderConsole(auditData, run.LogsPath) + + w.Close() + var buf bytes.Buffer + io.Copy(&buf, r) + os.Stdout = oldStdout + + output := buf.String() + + // Verify key sections + expectedSections := []string{ + "# Workflow Run Audit Report", + "## Overview", + "## Key Findings", + "## Recommendations", + "## Performance Metrics", + "## Metrics", + "## Jobs", + "## Firewall Analysis", + "## Tool Usage", + "## Errors and Warnings", + } + + for _, section := range expectedSections { + if !strings.Contains(output, section) { + t.Errorf("Console output missing section: %s", section) + } + } + + // Verify emojis and visual indicators + if !strings.Contains(output, "✅") { + t.Error("Console output should contain success indicator ✅") + } + + // Print sample console output for documentation + t.Logf("Sample Console Output:\n%s", output) + }) + + // Verify key findings quality + t.Run("Key Findings Quality", func(t *testing.T) { + if len(auditData.KeyFindings) == 0 { + t.Error("Expected key findings to be generated") + } + + // Should have findings for high token usage and many turns + hasPerformanceFinding := false + for _, finding := range auditData.KeyFindings { + if finding.Category == "performance" { + hasPerformanceFinding = true + } + // All findings should have impact + if finding.Impact == "" && finding.Severity != "info" { + t.Errorf("Finding '%s' missing impact", finding.Title) + } + } + + if !hasPerformanceFinding { + t.Error("Expected performance finding for high token usage") + } + }) + + // Verify recommendations quality + t.Run("Recommendations Quality", func(t *testing.T) { + if len(auditData.Recommendations) == 0 { + t.Error("Expected recommendations to be generated") + } + + for _, rec := range auditData.Recommendations { + // All recommendations should have action, reason, and priority + if rec.Action == "" { + t.Error("Recommendation missing action") + } + if rec.Reason == "" { + t.Error("Recommendation missing reason") + } + if rec.Priority == "" { + t.Error("Recommendation missing priority") + } + } + }) + + // Verify performance metrics + t.Run("Performance Metrics Quality", func(t *testing.T) { + if auditData.PerformanceMetrics == nil { + t.Fatal("Expected performance metrics to be generated") + } + + pm := auditData.PerformanceMetrics + + if pm.TokensPerMinute <= 0 { + t.Error("Expected tokens per minute to be calculated") + } + + if pm.CostEfficiency == "" { + t.Error("Expected cost efficiency to be set") + } + + if pm.MostUsedTool == "" { + t.Error("Expected most used tool to be identified") + } + + if pm.NetworkRequests != 42 { + t.Errorf("Expected 42 network requests, got %d", pm.NetworkRequests) + } + + // Verify cost efficiency calculation + // Cost: $0.18, Duration: 14.5 minutes = $0.0124/min → "good" + if pm.CostEfficiency != "good" { + t.Errorf("Expected 'good' cost efficiency, got '%s'", pm.CostEfficiency) + } + }) +} + +// TestAgentFriendlyOutputFailureScenario tests output for a failed workflow +func TestAgentFriendlyOutputFailureScenario(t *testing.T) { + // Create a failed workflow scenario + run := WorkflowRun{ + DatabaseID: 111222, + WorkflowName: "ci-build", + Status: "completed", + Conclusion: "failure", + CreatedAt: time.Date(2024, 1, 15, 12, 0, 0, 0, time.UTC), + Duration: 3*time.Minute + 45*time.Second, + Event: "push", + HeadBranch: "feature-branch", + URL: "https://github.com/org/repo/actions/runs/111222", + TokenUsage: 8000, + EstimatedCost: 0.03, + Turns: 4, + ErrorCount: 3, + WarningCount: 1, + LogsPath: testutil.TempDir(t, "test-*"), + } + + metrics := LogMetrics{ + TokenUsage: 8000, + EstimatedCost: 0.03, + Turns: 4, + Errors: []workflow.LogError{ + { + File: "build.log", + Line: 15, + Type: "error", + Message: "Connection timeout while fetching dependencies", + }, + { + File: "build.log", + Line: 42, + Type: "error", + Message: "Build process terminated unexpectedly", + }, + { + File: "build.log", + Line: 50, + Type: "error", + Message: "Failed to publish artifacts", + }, + { + File: "build.log", + Line: 30, + Type: "warning", + Message: "Deprecated API usage detected", + }, + }, + } + + processedRun := ProcessedRun{ + Run: run, + MCPFailures: []MCPFailureReport{ + { + ServerName: "build-tools", + Status: "connection_failed", + }, + }, + JobDetails: []JobInfoWithDuration{ + { + JobInfo: JobInfo{ + Name: "build", + Status: "completed", + Conclusion: "failure", + }, + Duration: run.Duration, + }, + }, + } + + // Build audit data + auditData := buildAuditData(processedRun, metrics) + + // Test failure analysis + t.Run("Failure Analysis", func(t *testing.T) { + if auditData.FailureAnalysis == nil { + t.Fatal("Expected failure analysis for failed workflow") + } + + fa := auditData.FailureAnalysis + + if fa.PrimaryFailure != "failure" { + t.Errorf("Expected primary failure 'failure', got '%s'", fa.PrimaryFailure) + } + + if len(fa.FailedJobs) == 0 { + t.Error("Expected failed jobs to be listed") + } + + if fa.RootCause == "" { + t.Error("Expected root cause to be identified") + } + + // Should identify timeout as root cause + if !strings.Contains(fa.RootCause, "timeout") && !strings.Contains(fa.RootCause, "MCP server") { + t.Errorf("Expected timeout or MCP failure as root cause, got: %s", fa.RootCause) + } + }) + + // Test key findings for failure + t.Run("Failure Findings", func(t *testing.T) { + if len(auditData.KeyFindings) == 0 { + t.Error("Expected key findings for failed workflow") + } + + // Should have critical failure finding + hasCritical := false + hasMCPFailure := false + for _, finding := range auditData.KeyFindings { + if finding.Severity == "critical" && strings.Contains(finding.Title, "Failed") { + hasCritical = true + } + if finding.Category == "tooling" && strings.Contains(finding.Description, "MCP") { + hasMCPFailure = true + } + } + + if !hasCritical { + t.Error("Expected critical failure finding") + } + if !hasMCPFailure { + t.Error("Expected MCP failure finding") + } + }) + + // Test recommendations for failure + t.Run("Failure Recommendations", func(t *testing.T) { + if len(auditData.Recommendations) == 0 { + t.Error("Expected recommendations for failed workflow") + } + + // Should have high priority recommendations + hasHighPriority := false + for _, rec := range auditData.Recommendations { + if rec.Priority == "high" { + hasHighPriority = true + // High priority recommendations should mention review or fix + if !strings.Contains(strings.ToLower(rec.Action), "review") && + !strings.Contains(strings.ToLower(rec.Action), "fix") { + t.Errorf("High priority recommendation should mention review or fix: %s", rec.Action) + } + } + } + + if !hasHighPriority { + t.Error("Expected high priority recommendations for failure") + } + }) + + // Test JSON output for failure + t.Run("JSON Output for Failure", func(t *testing.T) { + jsonBytes, err := json.MarshalIndent(auditData, "", " ") + if err != nil { + t.Fatalf("Failed to marshal JSON: %v", err) + } + + jsonStr := string(jsonBytes) + + // Verify failure analysis is included + if !strings.Contains(jsonStr, `"failure_analysis"`) { + t.Error("JSON missing failure_analysis for failed workflow") + } + if !strings.Contains(jsonStr, `"primary_failure"`) { + t.Error("JSON missing primary_failure field") + } + if !strings.Contains(jsonStr, `"root_cause"`) { + t.Error("JSON missing root_cause field") + } + + // Print for documentation + t.Logf("Failure Scenario JSON Output:\n%s", string(jsonBytes)) + }) +} diff --git a/pkg/cli/audit_agent_output_test.go b/pkg/cli/audit_agent_output_test.go new file mode 100644 index 000000000..c0fd2a5b1 --- /dev/null +++ b/pkg/cli/audit_agent_output_test.go @@ -0,0 +1,568 @@ +package cli + +import ( + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/githubnext/gh-aw/pkg/workflow" +) + +// TestKeyFindingsGeneration verifies key findings are generated correctly +func TestKeyFindingsGeneration(t *testing.T) { + tests := []struct { + name string + run WorkflowRun + metrics MetricsData + errors []ErrorInfo + warnings []ErrorInfo + mcpFailures []MCPFailureReport + missingTools []MissingToolReport + expectedCount int + hasFailure bool + hasCost bool + hasTooling bool + }{ + { + name: "Failed workflow with errors", + run: WorkflowRun{ + DatabaseID: 123, + WorkflowName: "Test", + Conclusion: "failure", + Duration: 5 * time.Minute, + }, + metrics: MetricsData{ + ErrorCount: 3, + TokenUsage: 1000, + }, + errors: []ErrorInfo{ + {Type: "error", Message: "Test error 1"}, + {Type: "error", Message: "Test error 2"}, + {Type: "error", Message: "Test error 3"}, + }, + expectedCount: 1, // only failure finding (3 errors doesn't trigger "multiple errors") + hasFailure: true, + }, + { + name: "High cost workflow", + run: WorkflowRun{ + DatabaseID: 124, + WorkflowName: "Expensive", + Conclusion: "success", + Duration: 10 * time.Minute, + }, + metrics: MetricsData{ + EstimatedCost: 1.5, + TokenUsage: 100000, + }, + expectedCount: 3, // high cost + high tokens + success + hasCost: true, + }, + { + name: "MCP failures", + run: WorkflowRun{ + DatabaseID: 125, + WorkflowName: "MCP Test", + Conclusion: "failure", + }, + mcpFailures: []MCPFailureReport{ + {ServerName: "test-server", Status: "failed"}, + }, + expectedCount: 2, // failure + mcp failure + hasTooling: true, + }, + { + name: "Missing tools", + run: WorkflowRun{ + DatabaseID: 126, + WorkflowName: "Tool Test", + Conclusion: "success", + }, + missingTools: []MissingToolReport{ + {Tool: "missing_tool_1"}, + {Tool: "missing_tool_2"}, + }, + expectedCount: 2, // missing tools + success + hasTooling: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + processedRun := ProcessedRun{ + Run: tt.run, + MCPFailures: tt.mcpFailures, + MissingTools: tt.missingTools, + } + + findings := generateFindings(processedRun, tt.metrics, tt.errors, tt.warnings) + + if len(findings) < tt.expectedCount { + t.Errorf("Expected at least %d findings, got %d", tt.expectedCount, len(findings)) + } + + // Verify expected categories + if tt.hasFailure { + found := false + for _, finding := range findings { + if finding.Category == "error" && strings.Contains(finding.Title, "Failed") { + found = true + if finding.Severity != "critical" { + t.Errorf("Expected critical severity for failure, got %s", finding.Severity) + } + break + } + } + if !found { + t.Error("Expected failure finding but didn't find one") + } + } + + if tt.hasCost { + found := false + for _, finding := range findings { + if finding.Category == "cost" { + found = true + break + } + } + if !found { + t.Error("Expected cost finding but didn't find one") + } + } + + if tt.hasTooling { + found := false + for _, finding := range findings { + if finding.Category == "tooling" { + found = true + break + } + } + if !found { + t.Error("Expected tooling finding but didn't find one") + } + } + }) + } +} + +// TestRecommendationsGeneration verifies recommendations are generated correctly +func TestRecommendationsGeneration(t *testing.T) { + tests := []struct { + name string + run WorkflowRun + metrics MetricsData + findings []Finding + mcpFailures []MCPFailureReport + missingTools []MissingToolReport + expectedMinCount int + hasHighPriority bool + }{ + { + name: "Critical failure", + run: WorkflowRun{ + Conclusion: "failure", + }, + findings: []Finding{ + {Severity: "critical", Category: "error"}, + }, + expectedMinCount: 1, + hasHighPriority: true, + }, + { + name: "High cost with many turns", + run: WorkflowRun{ + Conclusion: "success", + }, + metrics: MetricsData{ + EstimatedCost: 1.0, + Turns: 15, + }, + findings: []Finding{ + {Severity: "high", Category: "cost", Title: "High Cost"}, + {Severity: "medium", Category: "performance", Title: "Many Iterations"}, + }, + expectedMinCount: 2, + }, + { + name: "Missing tools", + run: WorkflowRun{ + Conclusion: "success", + }, + missingTools: []MissingToolReport{ + {Tool: "required_tool", Reason: "Not configured"}, + }, + expectedMinCount: 1, + }, + { + name: "MCP failures", + run: WorkflowRun{ + Conclusion: "failure", + }, + mcpFailures: []MCPFailureReport{ + {ServerName: "critical-server", Status: "failed"}, + }, + expectedMinCount: 2, // MCP failure + general failure + hasHighPriority: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + processedRun := ProcessedRun{ + Run: tt.run, + MCPFailures: tt.mcpFailures, + MissingTools: tt.missingTools, + } + + recommendations := generateRecommendations(processedRun, tt.metrics, tt.findings) + + if len(recommendations) < tt.expectedMinCount { + t.Errorf("Expected at least %d recommendations, got %d", tt.expectedMinCount, len(recommendations)) + } + + if tt.hasHighPriority { + found := false + for _, rec := range recommendations { + if rec.Priority == "high" { + found = true + break + } + } + if !found { + t.Error("Expected high priority recommendation but didn't find one") + } + } + + // Verify all recommendations have required fields + for _, rec := range recommendations { + if rec.Action == "" { + t.Error("Recommendation missing action") + } + if rec.Reason == "" { + t.Error("Recommendation missing reason") + } + if rec.Priority == "" { + t.Error("Recommendation missing priority") + } + } + }) + } +} + +// TestFailureAnalysisGeneration verifies failure analysis is generated correctly +func TestFailureAnalysisGeneration(t *testing.T) { + tests := []struct { + name string + run WorkflowRun + errors []ErrorInfo + jobDetails []JobInfoWithDuration + mcpFailures []MCPFailureReport + expectedRootCause string + expectedFailedJobs int + }{ + { + name: "Simple failure with error", + run: WorkflowRun{ + Conclusion: "failure", + }, + errors: []ErrorInfo{ + {Message: "Connection timeout occurred"}, + }, + expectedRootCause: "Operation timeout", + }, + { + name: "Permission denied error", + run: WorkflowRun{ + Conclusion: "failure", + }, + errors: []ErrorInfo{ + {Message: "Permission denied for resource"}, + }, + expectedRootCause: "Permission denied", + }, + { + name: "MCP server failure", + run: WorkflowRun{ + Conclusion: "failure", + }, + mcpFailures: []MCPFailureReport{ + {ServerName: "github-mcp", Status: "failed"}, + }, + expectedRootCause: "MCP server failure: github-mcp", + }, + { + name: "Failed jobs", + run: WorkflowRun{ + Conclusion: "failure", + }, + jobDetails: []JobInfoWithDuration{ + {JobInfo: JobInfo{Name: "test-job", Conclusion: "failure"}}, + {JobInfo: JobInfo{Name: "build-job", Conclusion: "success"}}, + }, + expectedFailedJobs: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + processedRun := ProcessedRun{ + Run: tt.run, + MCPFailures: tt.mcpFailures, + JobDetails: tt.jobDetails, + } + + analysis := generateFailureAnalysis(processedRun, tt.errors) + + if analysis == nil { + t.Fatal("Expected failure analysis but got nil") + } + + if analysis.PrimaryFailure != tt.run.Conclusion { + t.Errorf("Expected primary failure %s, got %s", tt.run.Conclusion, analysis.PrimaryFailure) + } + + if tt.expectedRootCause != "" { + if analysis.RootCause != tt.expectedRootCause { + t.Errorf("Expected root cause '%s', got '%s'", tt.expectedRootCause, analysis.RootCause) + } + } + + if tt.expectedFailedJobs > 0 { + if len(analysis.FailedJobs) != tt.expectedFailedJobs { + t.Errorf("Expected %d failed jobs, got %d", tt.expectedFailedJobs, len(analysis.FailedJobs)) + } + } + }) + } +} + +// TestPerformanceMetricsGeneration verifies performance metrics are calculated correctly +func TestPerformanceMetricsGeneration(t *testing.T) { + tests := []struct { + name string + run WorkflowRun + metrics MetricsData + toolUsage []ToolUsageInfo + firewallAnalysis *FirewallAnalysis + expectedCostEfficiency string + expectTokensPerMin bool + expectMostUsedTool bool + expectNetworkRequests bool + }{ + { + name: "Excellent cost efficiency", + run: WorkflowRun{ + Duration: 10 * time.Minute, + }, + metrics: MetricsData{ + EstimatedCost: 0.05, + TokenUsage: 5000, + }, + expectedCostEfficiency: "excellent", + expectTokensPerMin: true, + }, + { + name: "Poor cost efficiency", + run: WorkflowRun{ + Duration: 5 * time.Minute, + }, + metrics: MetricsData{ + EstimatedCost: 1.0, + TokenUsage: 10000, + }, + expectedCostEfficiency: "poor", + expectTokensPerMin: true, + }, + { + name: "With tool usage", + run: WorkflowRun{ + Duration: 5 * time.Minute, + }, + toolUsage: []ToolUsageInfo{ + {Name: "bash", CallCount: 10, MaxDuration: "2s"}, + {Name: "github_get_issue", CallCount: 5, MaxDuration: "1s"}, + }, + expectMostUsedTool: true, + }, + { + name: "With firewall analysis", + run: WorkflowRun{ + Duration: 5 * time.Minute, + }, + firewallAnalysis: &FirewallAnalysis{ + TotalRequests: 25, + }, + expectNetworkRequests: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + processedRun := ProcessedRun{ + Run: tt.run, + FirewallAnalysis: tt.firewallAnalysis, + } + + pm := generatePerformanceMetrics(processedRun, tt.metrics, tt.toolUsage) + + if pm == nil { + t.Fatal("Expected performance metrics but got nil") + } + + if tt.expectedCostEfficiency != "" { + if pm.CostEfficiency != tt.expectedCostEfficiency { + t.Errorf("Expected cost efficiency '%s', got '%s'", tt.expectedCostEfficiency, pm.CostEfficiency) + } + } + + if tt.expectTokensPerMin { + if pm.TokensPerMinute <= 0 { + t.Error("Expected positive tokens per minute") + } + } + + if tt.expectMostUsedTool { + if pm.MostUsedTool == "" { + t.Error("Expected most used tool to be set") + } + } + + if tt.expectNetworkRequests { + if pm.NetworkRequests <= 0 { + t.Error("Expected network requests count") + } + } + }) + } +} + +// TestAuditDataJSONStructure verifies the JSON structure includes all new fields +func TestAuditDataJSONStructure(t *testing.T) { + // Create comprehensive audit data + run := WorkflowRun{ + DatabaseID: 123456, + WorkflowName: "Test Workflow", + Status: "completed", + Conclusion: "failure", + CreatedAt: time.Now(), + Event: "push", + HeadBranch: "main", + URL: "https://github.com/org/repo/actions/runs/123456", + TokenUsage: 5000, + EstimatedCost: 0.5, + Turns: 8, + ErrorCount: 2, + WarningCount: 1, + Duration: 5 * time.Minute, + } + + metrics := LogMetrics{ + TokenUsage: 5000, + EstimatedCost: 0.5, + Turns: 8, + Errors: []workflow.LogError{ + {Type: "error", Message: "Test error", File: "test.log", Line: 10}, + {Type: "warning", Message: "Test warning", File: "test.log", Line: 20}, + }, + ToolCalls: []workflow.ToolCallInfo{ + {Name: "bash", CallCount: 5, MaxDuration: 2 * time.Second}, + }, + } + + processedRun := ProcessedRun{ + Run: run, + MissingTools: []MissingToolReport{ + {Tool: "missing_tool", Reason: "Not configured"}, + }, + MCPFailures: []MCPFailureReport{ + {ServerName: "test-server", Status: "failed"}, + }, + JobDetails: []JobInfoWithDuration{ + {JobInfo: JobInfo{Name: "test", Conclusion: "failure"}}, + }, + } + + // Build audit data + auditData := buildAuditData(processedRun, metrics) + + // Marshal to JSON + jsonBytes, err := json.MarshalIndent(auditData, "", " ") + if err != nil { + t.Fatalf("Failed to marshal audit data to JSON: %v", err) + } + + jsonStr := string(jsonBytes) + + // Verify all new fields are present + expectedFields := []string{ + "key_findings", + "recommendations", + "failure_analysis", + "performance_metrics", + "overview", + "metrics", + "jobs", + "downloaded_files", + "missing_tools", + "mcp_failures", + "errors", + "warnings", + "tool_usage", + } + + for _, field := range expectedFields { + if !strings.Contains(jsonStr, fmt.Sprintf(`"%s"`, field)) { + t.Errorf("JSON output missing expected field: %s", field) + } + } + + // Verify key findings structure + if !strings.Contains(jsonStr, `"category"`) { + t.Error("Key findings missing category field") + } + if !strings.Contains(jsonStr, `"severity"`) { + t.Error("Key findings missing severity field") + } + + // Verify recommendations structure + if !strings.Contains(jsonStr, `"priority"`) { + t.Error("Recommendations missing priority field") + } + if !strings.Contains(jsonStr, `"action"`) { + t.Error("Recommendations missing action field") + } + + // Verify failure analysis structure + if !strings.Contains(jsonStr, `"primary_failure"`) { + t.Error("Failure analysis missing primary_failure field") + } + + // Verify performance metrics structure + if !strings.Contains(jsonStr, `"cost_efficiency"`) { + t.Error("Performance metrics missing cost_efficiency field") + } + + // Parse back to verify structure + var parsed AuditData + if err := json.Unmarshal(jsonBytes, &parsed); err != nil { + t.Fatalf("Failed to parse JSON back to AuditData: %v", err) + } + + // Verify parsed data has expected content + if len(parsed.KeyFindings) == 0 { + t.Error("Expected key findings but got none") + } + if len(parsed.Recommendations) == 0 { + t.Error("Expected recommendations but got none") + } + if parsed.FailureAnalysis == nil { + t.Error("Expected failure analysis but got nil") + } + if parsed.PerformanceMetrics == nil { + t.Error("Expected performance metrics but got nil") + } +} diff --git a/pkg/cli/audit_report.go b/pkg/cli/audit_report.go index a5bbcf708..5b346725d 100644 --- a/pkg/cli/audit_report.go +++ b/pkg/cli/audit_report.go @@ -18,17 +18,55 @@ var auditReportLog = logger.New("cli:audit_report") // AuditData represents the complete structured audit data for a workflow run type AuditData struct { - Overview OverviewData `json:"overview"` - Metrics MetricsData `json:"metrics"` - Jobs []JobData `json:"jobs,omitempty"` - DownloadedFiles []FileInfo `json:"downloaded_files"` - MissingTools []MissingToolReport `json:"missing_tools,omitempty"` - Noops []NoopReport `json:"noops,omitempty"` - MCPFailures []MCPFailureReport `json:"mcp_failures,omitempty"` - FirewallAnalysis *FirewallAnalysis `json:"firewall_analysis,omitempty"` - Errors []ErrorInfo `json:"errors,omitempty"` - Warnings []ErrorInfo `json:"warnings,omitempty"` - ToolUsage []ToolUsageInfo `json:"tool_usage,omitempty"` + Overview OverviewData `json:"overview"` + Metrics MetricsData `json:"metrics"` + KeyFindings []Finding `json:"key_findings,omitempty"` + Recommendations []Recommendation `json:"recommendations,omitempty"` + FailureAnalysis *FailureAnalysis `json:"failure_analysis,omitempty"` + PerformanceMetrics *PerformanceMetrics `json:"performance_metrics,omitempty"` + Jobs []JobData `json:"jobs,omitempty"` + DownloadedFiles []FileInfo `json:"downloaded_files"` + MissingTools []MissingToolReport `json:"missing_tools,omitempty"` + Noops []NoopReport `json:"noops,omitempty"` + MCPFailures []MCPFailureReport `json:"mcp_failures,omitempty"` + FirewallAnalysis *FirewallAnalysis `json:"firewall_analysis,omitempty"` + Errors []ErrorInfo `json:"errors,omitempty"` + Warnings []ErrorInfo `json:"warnings,omitempty"` + ToolUsage []ToolUsageInfo `json:"tool_usage,omitempty"` +} + +// Finding represents a key insight discovered during audit +type Finding struct { + Category string `json:"category"` // e.g., "error", "performance", "cost", "tooling" + Severity string `json:"severity"` // "critical", "high", "medium", "low", "info" + Title string `json:"title"` // Brief title + Description string `json:"description"` // Detailed description + Impact string `json:"impact,omitempty"` // What impact this has +} + +// Recommendation represents an actionable suggestion +type Recommendation struct { + Priority string `json:"priority"` // "high", "medium", "low" + Action string `json:"action"` // What to do + Reason string `json:"reason"` // Why to do it + Example string `json:"example,omitempty"` // Example of how to implement +} + +// FailureAnalysis provides structured analysis for failed workflows +type FailureAnalysis struct { + PrimaryFailure string `json:"primary_failure"` // Main reason for failure + FailedJobs []string `json:"failed_jobs"` // List of failed job names + ErrorSummary string `json:"error_summary"` // Summary of errors + RootCause string `json:"root_cause,omitempty"` // Identified root cause if determinable +} + +// PerformanceMetrics provides aggregated performance statistics +type PerformanceMetrics struct { + TokensPerMinute float64 `json:"tokens_per_minute,omitempty"` + CostEfficiency string `json:"cost_efficiency,omitempty"` // e.g., "good", "poor" + AvgToolDuration string `json:"avg_tool_duration,omitempty"` + MostUsedTool string `json:"most_used_tool,omitempty"` + NetworkRequests int `json:"network_requests,omitempty"` } // OverviewData contains basic information about the workflow run @@ -199,23 +237,42 @@ func buildAuditData(processedRun ProcessedRun, metrics LogMetrics) AuditData { toolUsage = append(toolUsage, *info) } + // Generate key findings + findings := generateFindings(processedRun, metricsData, errors, warnings) + + // Generate recommendations + recommendations := generateRecommendations(processedRun, metricsData, findings) + + // Generate failure analysis if workflow failed + var failureAnalysis *FailureAnalysis + if run.Conclusion == "failure" || run.Conclusion == "timed_out" || run.Conclusion == "cancelled" { + failureAnalysis = generateFailureAnalysis(processedRun, errors) + } + + // Generate performance metrics + performanceMetrics := generatePerformanceMetrics(processedRun, metricsData, toolUsage) + if auditReportLog.Enabled() { - auditReportLog.Printf("Built audit data: %d jobs, %d errors, %d warnings, %d tool types", - len(jobs), len(errors), len(warnings), len(toolUsage)) + auditReportLog.Printf("Built audit data: %d jobs, %d errors, %d warnings, %d tool types, %d findings, %d recommendations", + len(jobs), len(errors), len(warnings), len(toolUsage), len(findings), len(recommendations)) } return AuditData{ - Overview: overview, - Metrics: metricsData, - Jobs: jobs, - DownloadedFiles: downloadedFiles, - MissingTools: processedRun.MissingTools, - Noops: processedRun.Noops, - MCPFailures: processedRun.MCPFailures, - FirewallAnalysis: processedRun.FirewallAnalysis, - Errors: errors, - Warnings: warnings, - ToolUsage: toolUsage, + Overview: overview, + Metrics: metricsData, + KeyFindings: findings, + Recommendations: recommendations, + FailureAnalysis: failureAnalysis, + PerformanceMetrics: performanceMetrics, + Jobs: jobs, + DownloadedFiles: downloadedFiles, + MissingTools: processedRun.MissingTools, + Noops: processedRun.Noops, + MCPFailures: processedRun.MCPFailures, + FirewallAnalysis: processedRun.FirewallAnalysis, + Errors: errors, + Warnings: warnings, + ToolUsage: toolUsage, } } @@ -349,6 +406,34 @@ func renderConsole(data AuditData, logsPath string) { fmt.Println() renderOverview(data.Overview) + // Key Findings Section - NEW + if len(data.KeyFindings) > 0 { + fmt.Println(console.FormatInfoMessage("## Key Findings")) + fmt.Println() + renderKeyFindings(data.KeyFindings) + } + + // Recommendations Section - NEW + if len(data.Recommendations) > 0 { + fmt.Println(console.FormatInfoMessage("## Recommendations")) + fmt.Println() + renderRecommendations(data.Recommendations) + } + + // Failure Analysis Section - NEW + if data.FailureAnalysis != nil { + fmt.Println(console.FormatInfoMessage("## Failure Analysis")) + fmt.Println() + renderFailureAnalysis(data.FailureAnalysis) + } + + // Performance Metrics Section - NEW + if data.PerformanceMetrics != nil { + fmt.Println(console.FormatInfoMessage("## Performance Metrics")) + fmt.Println() + renderPerformanceMetrics(data.PerformanceMetrics) + } + // Metrics Section - use new rendering system fmt.Println(console.FormatInfoMessage("## Metrics")) fmt.Println() @@ -582,3 +667,535 @@ func truncateString(s string, maxLen int) string { } return s[:maxLen-3] + "..." } + +// generateFindings analyzes the workflow run and generates key findings +func generateFindings(processedRun ProcessedRun, metrics MetricsData, errors []ErrorInfo, warnings []ErrorInfo) []Finding { + var findings []Finding + run := processedRun.Run + + // Failure findings + if run.Conclusion == "failure" { + findings = append(findings, Finding{ + Category: "error", + Severity: "critical", + Title: "Workflow Failed", + Description: fmt.Sprintf("Workflow '%s' failed with %d error(s)", run.WorkflowName, metrics.ErrorCount), + Impact: "Workflow did not complete successfully and may need intervention", + }) + } + + if run.Conclusion == "timed_out" { + findings = append(findings, Finding{ + Category: "performance", + Severity: "high", + Title: "Workflow Timeout", + Description: "Workflow exceeded time limit and was terminated", + Impact: "Tasks may be incomplete, consider optimizing workflow or increasing timeout", + }) + } + + // Cost findings + if metrics.EstimatedCost > 1.0 { + findings = append(findings, Finding{ + Category: "cost", + Severity: "high", + Title: "High Cost Detected", + Description: fmt.Sprintf("Estimated cost of $%.2f exceeds typical threshold", metrics.EstimatedCost), + Impact: "Review token usage and consider optimization opportunities", + }) + } else if metrics.EstimatedCost > 0.5 { + findings = append(findings, Finding{ + Category: "cost", + Severity: "medium", + Title: "Moderate Cost", + Description: fmt.Sprintf("Estimated cost of $%.2f is moderate", metrics.EstimatedCost), + Impact: "Monitor costs if this workflow runs frequently", + }) + } + + // Token usage findings + if metrics.TokenUsage > 50000 { + findings = append(findings, Finding{ + Category: "performance", + Severity: "medium", + Title: "High Token Usage", + Description: fmt.Sprintf("Used %s tokens", console.FormatNumber(metrics.TokenUsage)), + Impact: "High token usage may indicate verbose outputs or inefficient prompts", + }) + } + + // Turn count findings + if metrics.Turns > 10 { + findings = append(findings, Finding{ + Category: "performance", + Severity: "medium", + Title: "Many Iterations", + Description: fmt.Sprintf("Workflow took %d turns to complete", metrics.Turns), + Impact: "Many turns may indicate task complexity or unclear instructions", + }) + } + + // Error findings + if len(errors) > 5 { + findings = append(findings, Finding{ + Category: "error", + Severity: "high", + Title: "Multiple Errors", + Description: fmt.Sprintf("Encountered %d errors during execution", len(errors)), + Impact: "Multiple errors may indicate systemic issues requiring attention", + }) + } + + // MCP failure findings + if len(processedRun.MCPFailures) > 0 { + serverNames := make([]string, len(processedRun.MCPFailures)) + for i, failure := range processedRun.MCPFailures { + serverNames[i] = failure.ServerName + } + findings = append(findings, Finding{ + Category: "tooling", + Severity: "high", + Title: "MCP Server Failures", + Description: fmt.Sprintf("Failed MCP servers: %s", strings.Join(serverNames, ", ")), + Impact: "Missing tools may limit workflow capabilities", + }) + } + + // Missing tool findings + if len(processedRun.MissingTools) > 0 { + toolNames := make([]string, 0, min(3, len(processedRun.MissingTools))) + for i := 0; i < len(processedRun.MissingTools) && i < 3; i++ { + toolNames = append(toolNames, processedRun.MissingTools[i].Tool) + } + desc := fmt.Sprintf("Missing tools: %s", strings.Join(toolNames, ", ")) + if len(processedRun.MissingTools) > 3 { + desc += fmt.Sprintf(" (and %d more)", len(processedRun.MissingTools)-3) + } + findings = append(findings, Finding{ + Category: "tooling", + Severity: "medium", + Title: "Tools Not Available", + Description: desc, + Impact: "Agent requested tools that were not configured or available", + }) + } + + // Firewall findings + if processedRun.FirewallAnalysis != nil && processedRun.FirewallAnalysis.DeniedRequests > 0 { + findings = append(findings, Finding{ + Category: "network", + Severity: "medium", + Title: "Blocked Network Requests", + Description: fmt.Sprintf("%d network requests were blocked by firewall", processedRun.FirewallAnalysis.DeniedRequests), + Impact: "Blocked requests may indicate missing network permissions or unexpected behavior", + }) + } + + // Success findings + if run.Conclusion == "success" && len(errors) == 0 { + findings = append(findings, Finding{ + Category: "success", + Severity: "info", + Title: "Workflow Completed Successfully", + Description: fmt.Sprintf("Completed in %d turns with no errors", metrics.Turns), + Impact: "No action needed", + }) + } + + return findings +} + +// generateRecommendations creates actionable recommendations based on findings +func generateRecommendations(processedRun ProcessedRun, metrics MetricsData, findings []Finding) []Recommendation { + var recommendations []Recommendation + run := processedRun.Run + + // Check for high-severity findings + hasCriticalFindings := false + hasHighCostFindings := false + hasManyTurns := false + for _, finding := range findings { + if finding.Severity == "critical" { + hasCriticalFindings = true + } + if finding.Category == "cost" && (finding.Severity == "high" || finding.Severity == "medium") { + hasHighCostFindings = true + } + if finding.Category == "performance" && strings.Contains(finding.Title, "Iterations") { + hasManyTurns = true + } + } + + // Recommendations for failures + if run.Conclusion == "failure" || hasCriticalFindings { + recommendations = append(recommendations, Recommendation{ + Priority: "high", + Action: "Review error logs to identify root cause of failure", + Reason: "Understanding failure causes helps prevent recurrence", + Example: "Check the Errors section below for specific error messages and file locations", + }) + } + + // Recommendations for cost optimization + if hasHighCostFindings { + recommendations = append(recommendations, Recommendation{ + Priority: "medium", + Action: "Optimize prompt size and reduce verbose outputs", + Reason: "High token usage increases costs and may slow execution", + Example: "Use concise prompts, limit output verbosity, and consider caching repeated data", + }) + } + + // Recommendations for many turns + if hasManyTurns { + recommendations = append(recommendations, Recommendation{ + Priority: "medium", + Action: "Clarify workflow instructions or break into smaller tasks", + Reason: "Many iterations may indicate unclear objectives or overly complex tasks", + Example: "Split complex workflows into discrete steps with clear success criteria", + }) + } + + // Recommendations for missing tools + if len(processedRun.MissingTools) > 0 { + recommendations = append(recommendations, Recommendation{ + Priority: "medium", + Action: "Add missing tools to workflow configuration", + Reason: "Missing tools limit agent capabilities and may cause failures", + Example: fmt.Sprintf("Add tools configuration for: %s", processedRun.MissingTools[0].Tool), + }) + } + + // Recommendations for MCP failures + if len(processedRun.MCPFailures) > 0 { + recommendations = append(recommendations, Recommendation{ + Priority: "high", + Action: "Fix MCP server configuration or dependencies", + Reason: "MCP server failures prevent agent from accessing required tools", + Example: "Check server logs and verify MCP server is properly configured and accessible", + }) + } + + // Recommendations for firewall blocks + if processedRun.FirewallAnalysis != nil && processedRun.FirewallAnalysis.DeniedRequests > 10 { + recommendations = append(recommendations, Recommendation{ + Priority: "medium", + Action: "Review network access configuration", + Reason: "Many blocked requests suggest missing network permissions", + Example: "Add allowed domains to network configuration or review firewall rules", + }) + } + + // General best practices + if len(recommendations) == 0 && run.Conclusion == "success" { + recommendations = append(recommendations, Recommendation{ + Priority: "low", + Action: "Monitor workflow performance over time", + Reason: "Tracking metrics helps identify trends and optimization opportunities", + Example: "Run 'gh aw logs' periodically to review cost and performance trends", + }) + } + + return recommendations +} + +// generateFailureAnalysis creates structured analysis for failed workflows +func generateFailureAnalysis(processedRun ProcessedRun, errors []ErrorInfo) *FailureAnalysis { + run := processedRun.Run + + // Determine primary failure reason + primaryFailure := run.Conclusion + if primaryFailure == "" { + primaryFailure = "unknown" + } + + // Collect failed job names + var failedJobs []string + for _, job := range processedRun.JobDetails { + if job.Conclusion == "failure" || job.Conclusion == "timed_out" || job.Conclusion == "cancelled" { + failedJobs = append(failedJobs, job.Name) + } + } + + // Generate error summary + errorSummary := "No specific errors identified" + if len(errors) > 0 { + if len(errors) == 1 { + errorSummary = errors[0].Message + } else { + errorSummary = fmt.Sprintf("%d errors: %s (and %d more)", len(errors), errors[0].Message, len(errors)-1) + } + } + + // Attempt to identify root cause + rootCause := "" + if len(processedRun.MCPFailures) > 0 { + rootCause = fmt.Sprintf("MCP server failure: %s", processedRun.MCPFailures[0].ServerName) + } else if len(errors) > 0 { + // Look for common error patterns + firstError := errors[0].Message + if strings.Contains(strings.ToLower(firstError), "timeout") { + rootCause = "Operation timeout" + } else if strings.Contains(strings.ToLower(firstError), "permission") { + rootCause = "Permission denied" + } else if strings.Contains(strings.ToLower(firstError), "not found") { + rootCause = "Resource not found" + } else if strings.Contains(strings.ToLower(firstError), "authentication") { + rootCause = "Authentication failure" + } + } + + return &FailureAnalysis{ + PrimaryFailure: primaryFailure, + FailedJobs: failedJobs, + ErrorSummary: errorSummary, + RootCause: rootCause, + } +} + +// generatePerformanceMetrics calculates aggregated performance statistics +func generatePerformanceMetrics(processedRun ProcessedRun, metrics MetricsData, toolUsage []ToolUsageInfo) *PerformanceMetrics { + run := processedRun.Run + pm := &PerformanceMetrics{} + + // Calculate tokens per minute + if run.Duration > 0 && metrics.TokenUsage > 0 { + minutes := run.Duration.Minutes() + if minutes > 0 { + pm.TokensPerMinute = float64(metrics.TokenUsage) / minutes + } + } + + // Determine cost efficiency + if metrics.EstimatedCost > 0 && run.Duration > 0 { + costPerMinute := metrics.EstimatedCost / run.Duration.Minutes() + if costPerMinute < 0.01 { + pm.CostEfficiency = "excellent" + } else if costPerMinute < 0.05 { + pm.CostEfficiency = "good" + } else if costPerMinute < 0.10 { + pm.CostEfficiency = "moderate" + } else { + pm.CostEfficiency = "poor" + } + } + + // Find most used tool + if len(toolUsage) > 0 { + mostUsed := toolUsage[0] + for i := 1; i < len(toolUsage); i++ { + if toolUsage[i].CallCount > mostUsed.CallCount { + mostUsed = toolUsage[i] + } + } + pm.MostUsedTool = fmt.Sprintf("%s (%d calls)", mostUsed.Name, mostUsed.CallCount) + } + + // Calculate average tool duration + if len(toolUsage) > 0 { + totalDuration := time.Duration(0) + count := 0 + for _, tool := range toolUsage { + if tool.MaxDuration != "" { + // Try to parse duration string using time.ParseDuration + if d, err := time.ParseDuration(tool.MaxDuration); err == nil { + totalDuration += d + count++ + } + } + } + if count > 0 { + avgDuration := totalDuration / time.Duration(count) + pm.AvgToolDuration = timeutil.FormatDuration(avgDuration) + } + } + + // Network request count from firewall + if processedRun.FirewallAnalysis != nil { + pm.NetworkRequests = processedRun.FirewallAnalysis.TotalRequests + } + + return pm +} + +// renderKeyFindings renders key findings with colored severity indicators +func renderKeyFindings(findings []Finding) { + // Group findings by severity for better presentation + critical := []Finding{} + high := []Finding{} + medium := []Finding{} + low := []Finding{} + info := []Finding{} + + for _, finding := range findings { + switch finding.Severity { + case "critical": + critical = append(critical, finding) + case "high": + high = append(high, finding) + case "medium": + medium = append(medium, finding) + case "low": + low = append(low, finding) + default: + info = append(info, finding) + } + } + + // Render critical findings first + for _, finding := range critical { + fmt.Printf(" 🔴 %s [%s]\n", console.FormatErrorMessage(finding.Title), finding.Category) + fmt.Printf(" %s\n", finding.Description) + if finding.Impact != "" { + fmt.Printf(" Impact: %s\n", finding.Impact) + } + fmt.Println() + } + + // Then high severity + for _, finding := range high { + fmt.Printf(" 🟠 %s [%s]\n", console.FormatWarningMessage(finding.Title), finding.Category) + fmt.Printf(" %s\n", finding.Description) + if finding.Impact != "" { + fmt.Printf(" Impact: %s\n", finding.Impact) + } + fmt.Println() + } + + // Medium severity + for _, finding := range medium { + fmt.Printf(" 🟡 %s [%s]\n", finding.Title, finding.Category) + fmt.Printf(" %s\n", finding.Description) + if finding.Impact != "" { + fmt.Printf(" Impact: %s\n", finding.Impact) + } + fmt.Println() + } + + // Low severity + for _, finding := range low { + fmt.Printf(" â„šī¸ %s [%s]\n", finding.Title, finding.Category) + fmt.Printf(" %s\n", finding.Description) + if finding.Impact != "" { + fmt.Printf(" Impact: %s\n", finding.Impact) + } + fmt.Println() + } + + // Info findings + for _, finding := range info { + fmt.Printf(" ✅ %s [%s]\n", console.FormatSuccessMessage(finding.Title), finding.Category) + fmt.Printf(" %s\n", finding.Description) + if finding.Impact != "" { + fmt.Printf(" Impact: %s\n", finding.Impact) + } + fmt.Println() + } +} + +// renderRecommendations renders actionable recommendations +func renderRecommendations(recommendations []Recommendation) { + // Group by priority + high := []Recommendation{} + medium := []Recommendation{} + low := []Recommendation{} + + for _, rec := range recommendations { + switch rec.Priority { + case "high": + high = append(high, rec) + case "medium": + medium = append(medium, rec) + default: + low = append(low, rec) + } + } + + // Render high priority first + for i, rec := range high { + fmt.Printf(" %d. [HIGH] %s\n", i+1, console.FormatWarningMessage(rec.Action)) + fmt.Printf(" Reason: %s\n", rec.Reason) + if rec.Example != "" { + fmt.Printf(" Example: %s\n", rec.Example) + } + fmt.Println() + } + + // Medium priority + startIdx := len(high) + 1 + for i, rec := range medium { + fmt.Printf(" %d. [MEDIUM] %s\n", startIdx+i, rec.Action) + fmt.Printf(" Reason: %s\n", rec.Reason) + if rec.Example != "" { + fmt.Printf(" Example: %s\n", rec.Example) + } + fmt.Println() + } + + // Low priority + startIdx += len(medium) + for i, rec := range low { + fmt.Printf(" %d. [LOW] %s\n", startIdx+i, rec.Action) + fmt.Printf(" Reason: %s\n", rec.Reason) + if rec.Example != "" { + fmt.Printf(" Example: %s\n", rec.Example) + } + fmt.Println() + } +} + +// renderFailureAnalysis renders failure analysis information +func renderFailureAnalysis(analysis *FailureAnalysis) { + fmt.Printf(" Primary Failure: %s\n", console.FormatErrorMessage(analysis.PrimaryFailure)) + fmt.Println() + + if len(analysis.FailedJobs) > 0 { + fmt.Printf(" Failed Jobs:\n") + for _, job := range analysis.FailedJobs { + fmt.Printf(" â€ĸ %s\n", job) + } + fmt.Println() + } + + fmt.Printf(" Error Summary: %s\n", analysis.ErrorSummary) + fmt.Println() + + if analysis.RootCause != "" { + fmt.Printf(" Identified Root Cause: %s\n", console.FormatWarningMessage(analysis.RootCause)) + fmt.Println() + } +} + +// renderPerformanceMetrics renders performance metrics +func renderPerformanceMetrics(metrics *PerformanceMetrics) { + if metrics.TokensPerMinute > 0 { + fmt.Printf(" Tokens per Minute: %.1f\n", metrics.TokensPerMinute) + } + + if metrics.CostEfficiency != "" { + efficiencyDisplay := metrics.CostEfficiency + switch metrics.CostEfficiency { + case "excellent", "good": + efficiencyDisplay = console.FormatSuccessMessage(metrics.CostEfficiency) + case "moderate": + efficiencyDisplay = console.FormatWarningMessage(metrics.CostEfficiency) + case "poor": + efficiencyDisplay = console.FormatErrorMessage(metrics.CostEfficiency) + } + fmt.Printf(" Cost Efficiency: %s\n", efficiencyDisplay) + } + + if metrics.AvgToolDuration != "" { + fmt.Printf(" Average Tool Duration: %s\n", metrics.AvgToolDuration) + } + + if metrics.MostUsedTool != "" { + fmt.Printf(" Most Used Tool: %s\n", metrics.MostUsedTool) + } + + if metrics.NetworkRequests > 0 { + fmt.Printf(" Network Requests: %d\n", metrics.NetworkRequests) + } + + fmt.Println() +}