-
Notifications
You must be signed in to change notification settings - Fork 389
Add 50 benchmarks for performance-critical CLI operations with CI integration #3778
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
Show all changes
5 commits
Select commit
Hold shift + click to select a range
d9f51b5
Initial plan
Copilot a14bca7
Add 50+ new benchmarks for performance-critical code paths
Copilot e2a76d8
Add benchmark make targets and update documentation
Copilot deb7235
Add bench job to CI workflow
Copilot 535712d
Optimize benchmark suite to run under 1 minute
Copilot File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -51,6 +51,9 @@ coverage.html | |
| coverage/ | ||
| logs/ | ||
|
|
||
| # Benchmark results | ||
| bench_results.txt | ||
|
|
||
| node_modules/ | ||
| gh-aw-test/ | ||
|
|
||
|
|
||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,231 @@ | ||
| package cli | ||
|
|
||
| import ( | ||
| "testing" | ||
|
|
||
| "github.com/githubnext/gh-aw/pkg/workflow" | ||
| ) | ||
|
|
||
| // Sample log content for benchmarking | ||
| const ( | ||
| sampleClaudeLog = `[{"type":"session_created","timestamp":"2024-01-15T10:00:00.000Z"}] | ||
| [{"type":"message","timestamp":"2024-01-15T10:00:01.000Z","message":"Starting analysis"}] | ||
| [{"type":"tool_use","timestamp":"2024-01-15T10:00:02.000Z","tool":"github.get_issue"}] | ||
| [{"type":"tool_result","timestamp":"2024-01-15T10:00:03.000Z"}] | ||
| [{"type":"usage","timestamp":"2024-01-15T10:00:04.000Z","input_tokens":1000,"output_tokens":500}] | ||
| [{"type":"message","timestamp":"2024-01-15T10:00:05.000Z","message":"Analysis complete"}] | ||
| [{"type":"result","timestamp":"2024-01-15T10:00:06.000Z","total_input_tokens":1000,"total_output_tokens":500,"cost":0.015}]` | ||
|
|
||
| sampleCopilotLog = `2024-01-15T10:00:00.123Z [INFO] Copilot started | ||
| 2024-01-15T10:00:01.456Z [INFO] Processing request | ||
| 2024-01-15T10:00:02.789Z [DEBUG] Tool call: github.get_issue | ||
| 2024-01-15T10:00:03.012Z [DEBUG] Tool result received | ||
| 2024-01-15T10:00:04.345Z [INFO] Token usage: 1500 total | ||
| 2024-01-15T10:00:05.678Z [ERROR] Minor issue detected | ||
| 2024-01-15T10:00:06.901Z [INFO] Request completed` | ||
|
|
||
| sampleCodexLog = `] tool github.search_issues(...) | ||
| tool result: [{"id": 123, "title": "Issue 1"}] | ||
| ] exec ls -la in /tmp | ||
| exec result: total 8 | ||
| ] tool github.get_issue(...) | ||
| tool result: {"id": 123, "body": "Issue content"} | ||
| ] success in 2.5s` | ||
|
|
||
| largeClaudeLog = sampleClaudeLog + "\n" + sampleClaudeLog + "\n" + sampleClaudeLog + "\n" + sampleClaudeLog + "\n" + sampleClaudeLog | ||
|
|
||
| largeCopilotLog = sampleCopilotLog + "\n" + sampleCopilotLog + "\n" + sampleCopilotLog + "\n" + sampleCopilotLog + "\n" + sampleCopilotLog | ||
| ) | ||
|
|
||
| // BenchmarkParseClaudeLog benchmarks Claude log parsing | ||
| func BenchmarkParseClaudeLog(b *testing.B) { | ||
| engine := &workflow.ClaudeEngine{} | ||
|
|
||
| b.ResetTimer() | ||
| for i := 0; i < b.N; i++ { | ||
| _ = engine.ParseLogMetrics(sampleClaudeLog, false) | ||
| } | ||
| } | ||
|
|
||
| // BenchmarkParseClaudeLog_Large benchmarks parsing large Claude log file | ||
| func BenchmarkParseClaudeLog_Large(b *testing.B) { | ||
| engine := &workflow.ClaudeEngine{} | ||
|
|
||
| b.ResetTimer() | ||
| for i := 0; i < b.N; i++ { | ||
| _ = engine.ParseLogMetrics(largeClaudeLog, false) | ||
| } | ||
| } | ||
|
|
||
| // BenchmarkParseCopilotLog benchmarks Copilot log parsing | ||
| func BenchmarkParseCopilotLog(b *testing.B) { | ||
| engine := &workflow.CopilotEngine{} | ||
|
|
||
| b.ResetTimer() | ||
| for i := 0; i < b.N; i++ { | ||
| _ = engine.ParseLogMetrics(sampleCopilotLog, false) | ||
| } | ||
| } | ||
|
|
||
| // BenchmarkParseCopilotLog_Large benchmarks parsing large Copilot log file | ||
| func BenchmarkParseCopilotLog_Large(b *testing.B) { | ||
| engine := &workflow.CopilotEngine{} | ||
|
|
||
| b.ResetTimer() | ||
| for i := 0; i < b.N; i++ { | ||
| _ = engine.ParseLogMetrics(largeCopilotLog, false) | ||
| } | ||
| } | ||
|
|
||
| // BenchmarkParseCodexLog benchmarks Codex log parsing | ||
| func BenchmarkParseCodexLog(b *testing.B) { | ||
| engine := &workflow.CodexEngine{} | ||
|
|
||
| b.ResetTimer() | ||
| for i := 0; i < b.N; i++ { | ||
| _ = engine.ParseLogMetrics(sampleCodexLog, false) | ||
| } | ||
| } | ||
|
|
||
| // BenchmarkParseCodexLog_WithErrors benchmarks Codex log parsing with errors | ||
| func BenchmarkParseCodexLog_WithErrors(b *testing.B) { | ||
| logWithErrors := sampleCodexLog + ` | ||
| ] error: connection timeout | ||
| ] warning: retry attempt | ||
| ] error: max retries exceeded | ||
| ] tool github.get_repository(...) | ||
| ] success in 1.2s` | ||
|
|
||
| engine := &workflow.CodexEngine{} | ||
|
|
||
| b.ResetTimer() | ||
| for i := 0; i < b.N; i++ { | ||
| _ = engine.ParseLogMetrics(logWithErrors, false) | ||
| } | ||
| } | ||
|
|
||
| // BenchmarkAggregateWorkflowStats benchmarks log aggregation across multiple runs | ||
| func BenchmarkAggregateWorkflowStats(b *testing.B) { | ||
| // Create sample workflow runs | ||
| runs := []WorkflowRun{ | ||
| { | ||
| DatabaseID: 12345, | ||
| WorkflowName: "test-workflow-1", | ||
| Status: "completed", | ||
| Conclusion: "success", | ||
| TokenUsage: 1500, | ||
| EstimatedCost: 0.015, | ||
| Turns: 3, | ||
| ErrorCount: 0, | ||
| WarningCount: 1, | ||
| }, | ||
| { | ||
| DatabaseID: 12346, | ||
| WorkflowName: "test-workflow-2", | ||
| Status: "completed", | ||
| Conclusion: "failure", | ||
| TokenUsage: 2500, | ||
| EstimatedCost: 0.025, | ||
| Turns: 5, | ||
| ErrorCount: 2, | ||
| WarningCount: 3, | ||
| }, | ||
| { | ||
| DatabaseID: 12347, | ||
| WorkflowName: "test-workflow-1", | ||
| Status: "completed", | ||
| Conclusion: "success", | ||
| TokenUsage: 1800, | ||
| EstimatedCost: 0.018, | ||
| Turns: 4, | ||
| ErrorCount: 0, | ||
| WarningCount: 0, | ||
| }, | ||
| } | ||
|
|
||
| b.ResetTimer() | ||
| for i := 0; i < b.N; i++ { | ||
| // Simulate aggregation logic | ||
| totalTokens := 0 | ||
| totalCost := 0.0 | ||
| totalTurns := 0 | ||
| totalErrors := 0 | ||
| totalWarnings := 0 | ||
|
|
||
| for _, run := range runs { | ||
| totalTokens += run.TokenUsage | ||
| totalCost += run.EstimatedCost | ||
| totalTurns += run.Turns | ||
| totalErrors += run.ErrorCount | ||
| totalWarnings += run.WarningCount | ||
| } | ||
|
|
||
| _ = totalTokens | ||
| _ = totalCost | ||
| _ = totalTurns | ||
| _ = totalErrors | ||
| _ = totalWarnings | ||
| } | ||
| } | ||
|
|
||
| // BenchmarkAggregateWorkflowStats_Large benchmarks aggregation with many runs | ||
| func BenchmarkAggregateWorkflowStats_Large(b *testing.B) { | ||
| // Create 100 sample workflow runs | ||
| runs := make([]WorkflowRun, 100) | ||
| for i := 0; i < 100; i++ { | ||
| runs[i] = WorkflowRun{ | ||
| DatabaseID: int64(12345 + i), | ||
| WorkflowName: "test-workflow", | ||
| Status: "completed", | ||
| Conclusion: "success", | ||
| TokenUsage: 1500 + i*10, | ||
| EstimatedCost: 0.015 + float64(i)*0.001, | ||
| Turns: 3 + i%5, | ||
| ErrorCount: i % 3, | ||
| WarningCount: i % 2, | ||
| } | ||
| } | ||
|
|
||
| b.ResetTimer() | ||
| for i := 0; i < b.N; i++ { | ||
| totalTokens := 0 | ||
| totalCost := 0.0 | ||
| totalTurns := 0 | ||
| totalErrors := 0 | ||
| totalWarnings := 0 | ||
|
|
||
| for _, run := range runs { | ||
| totalTokens += run.TokenUsage | ||
| totalCost += run.EstimatedCost | ||
| totalTurns += run.Turns | ||
| totalErrors += run.ErrorCount | ||
| totalWarnings += run.WarningCount | ||
| } | ||
|
|
||
| _ = totalTokens | ||
| _ = totalCost | ||
| _ = totalTurns | ||
| _ = totalErrors | ||
| _ = totalWarnings | ||
| } | ||
| } | ||
|
|
||
| // BenchmarkExtractJSONMetrics benchmarks JSON metrics extraction | ||
| func BenchmarkExtractJSONMetrics(b *testing.B) { | ||
| jsonLine := `{"type":"usage","input_tokens":1000,"output_tokens":500,"cost":0.015}` | ||
|
|
||
| b.ResetTimer() | ||
| for i := 0; i < b.N; i++ { | ||
| _ = workflow.ExtractJSONMetrics(jsonLine, false) | ||
| } | ||
| } | ||
|
|
||
| // BenchmarkExtractJSONMetrics_Complex benchmarks complex JSON metrics extraction | ||
| func BenchmarkExtractJSONMetrics_Complex(b *testing.B) { | ||
| jsonLine := `{"type":"result","total_input_tokens":5000,"total_output_tokens":2500,"cost":0.075,"metadata":{"tool_calls":["github.get_issue","github.add_comment"],"duration_ms":1500}}` | ||
|
|
||
| b.ResetTimer() | ||
| for i := 0; i < b.N; i++ { | ||
| _ = workflow.ExtractJSONMetrics(jsonLine, false) | ||
| } | ||
| } | ||
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The
BenchmarkAggregateWorkflowStatsandBenchmarkAggregateWorkflowStats_Largebenchmarks simulate aggregation logic inline instead of calling actual aggregation functions from the codebase. This creates a maintainability issue:Recommendation: Replace the inline aggregation code with calls to actual aggregation functions from the codebase. For example, if there's a function like
AggregateWorkflowStats()or similar in the CLI package, the benchmark should call that function instead of manually summing values.