diff --git a/backend/controllers/github.go b/backend/controllers/github.go index fe35ad39a..6accb0c34 100644 --- a/backend/controllers/github.go +++ b/backend/controllers/github.go @@ -2,6 +2,11 @@ package controllers import ( "context" + "log/slog" + "net/http" + "reflect" + "strconv" + "github.com/diggerhq/digger/backend/ci_backends" "github.com/diggerhq/digger/backend/logging" "github.com/diggerhq/digger/backend/middleware" @@ -9,10 +14,6 @@ import ( "github.com/diggerhq/digger/backend/utils" "github.com/gin-gonic/gin" "github.com/google/go-github/v61/github" - "log/slog" - "net/http" - "reflect" - "strconv" ) type IssueCommentHook func(gh utils.GithubClientProvider, payload *github.IssueCommentEvent, ciBackendProvider ci_backends.CiBackendProvider) error @@ -114,6 +115,31 @@ func (d DiggerController) GithubAppWebHook(c *gin.Context) { handlePullRequestEvent(gh, event, d.CiBackendProvider, appId64) }(c.Request.Context()) + case *github.CheckRunEvent: + slog.Info("Processing CheckRunEvent", + "action", event.GetAction(), + "checkRunID", event.GetCheckRun().GetID(), + ) + + // Only care about button clicks: + if event.GetAction() != "requested_action" { + // e.g. "created", "completed", etc. – ignore for now + return + } + + ra := event.GetRequestedAction() + if ra == nil { + slog.Warn("requested_action is nil in CheckRunEvent") + return + } + + identifier := ra.Identifier + // run it as a goroutine to avoid timeouts + go func(ctx context.Context) { + defer logging.InheritRequestLogger(ctx)() + handleCheckRunActionEvent(gh, identifier, event, d.CiBackendProvider, appId64) + }(c.Request.Context()) + slog.Info("Processing CheckRun requested_action", "identifier", identifier) default: slog.Debug("Unhandled event type", "eventType", reflect.TypeOf(event)) } diff --git a/backend/controllers/github_comment.go b/backend/controllers/github_comment.go index 288fafa42..e57f087df 100644 --- a/backend/controllers/github_comment.go +++ b/backend/controllers/github_comment.go @@ -492,14 +492,16 @@ func handleIssueCommentEvent(gh utils.GithubClientProvider, payload *github.Issu "command", *diggerCommand, ) // This one is for aggregate reporting - err = utils.SetPRStatusForJobs(ghService, issueNumber, jobs) + //err = utils.SetPRCommitStatusForJobs(ghService, issueNumber, jobs) + _, _, err = utils.SetPRCheckForJobs(ghService, issueNumber, jobs, *commitSha, repoName, repoOwner) return nil } // If we reach here then we have created a comment that would have led to more events segment.Track(*org, repoOwner, vcsActorID, "github", "issue_digger_comment", map[string]string{"comment": commentBody}) - err = utils.SetPRStatusForJobs(ghService, issueNumber, jobs) + //err = utils.SetPRCommitStatusForJobs(ghService, issueNumber, jobs) + batchCheckRunData, jobCheckRunDataMap, err := utils.SetPRCheckForJobs(ghService, issueNumber, jobs, *commitSha, repoName, repoOwner) if err != nil { slog.Error("Error setting status for PR", "issueNumber", issueNumber, @@ -551,34 +553,19 @@ func handleIssueCommentEvent(gh utils.GithubClientProvider, payload *github.Issu slog.Debug("Created AI summary comment", "commentId", aiSummaryCommentId) } + + reporterType := "lazy" + if config.Reporting.CommentsEnabled == false { + reporterType = "noop" + } + slog.Info("Converting jobs to Digger jobs", "issueNumber", issueNumber, "command", *diggerCommand, "jobCount", len(impactedProjectsJobMap), ) - batchId, _, err := utils.ConvertJobsToDiggerJobs( - *diggerCommand, - "github", - orgId, - impactedProjectsJobMap, - impactedProjectsMap, - projectsGraph, - installationId, - *prSourceBranch, - issueNumber, - repoOwner, - repoName, - repoFullName, - *commitSha, - reporterCommentId, - diggerYmlStr, - 0, - aiSummaryCommentId, - config.ReportTerraformOutputs, - coverAllImpactedProjects, - nil, - ) + batchId, _, err := utils.ConvertJobsToDiggerJobs(*diggerCommand, reporterType, "github", orgId, impactedProjectsJobMap, impactedProjectsMap, projectsGraph, installationId, *prSourceBranch, issueNumber, repoOwner, repoName, repoFullName, *commitSha, &reporterCommentId, diggerYmlStr, 0, aiSummaryCommentId, config.ReportTerraformOutputs, coverAllImpactedProjects, nil, batchCheckRunData, jobCheckRunDataMap) if err != nil { slog.Error("Error converting jobs to Digger jobs", "issueNumber", issueNumber, @@ -593,6 +580,16 @@ func handleIssueCommentEvent(gh utils.GithubClientProvider, payload *github.Issu "batchId", batchId, ) + batch, err := models.DB.GetDiggerBatch(batchId) + if err != nil { + slog.Error("Error getting Digger batch", + "batchId", batchId, + "error", err, + ) + commentReporterManager.UpdateComment(fmt.Sprintf(":x: Could not retrieve created batch: %v", err)) + return fmt.Errorf("error getting digger batch") + } + if config.CommentRenderMode == digger_config.CommentRenderModeGroupByModule && (*diggerCommand == scheduler.DiggerCommandPlan || *diggerCommand == scheduler.DiggerCommandApply) { @@ -608,15 +605,6 @@ func handleIssueCommentEvent(gh utils.GithubClientProvider, payload *github.Issu return fmt.Errorf("error posting initial comments") } - batch, err := models.DB.GetDiggerBatch(batchId) - if err != nil { - slog.Error("Error getting Digger batch", - "batchId", batchId, - "error", err, - ) - commentReporterManager.UpdateComment(fmt.Sprintf(":x: PostInitialSourceComments error: %v", err)) - return fmt.Errorf("error getting digger batch") - } batch.SourceDetails, err = json.Marshal(sourceDetails) if err != nil { diff --git a/backend/controllers/github_helpers.go b/backend/controllers/github_helpers.go index 0f3906310..92159d5a8 100644 --- a/backend/controllers/github_helpers.go +++ b/backend/controllers/github_helpers.go @@ -5,6 +5,13 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" + "net/http" + "os" + "path/filepath" + "slices" + "strings" + "github.com/diggerhq/digger/backend/ci_backends" "github.com/diggerhq/digger/backend/models" "github.com/diggerhq/digger/backend/services" @@ -19,12 +26,6 @@ import ( "github.com/google/uuid" "golang.org/x/oauth2" "gorm.io/gorm" - "log/slog" - "net/http" - "os" - "path/filepath" - "slices" - "strings" ) // why this validation is needed: https://roadie.io/blog/avoid-leaking-github-org-data/ @@ -664,9 +665,7 @@ func GetDiggerConfigForBranchWithGracefulHandling(gh utils.GithubClientProvider, } } - diggerYmlStr, ghService, config, dependencyGraph, err := GetDiggerConfigForBranch( - gh, installationId, repoFullName, repoOwner, repoName, cloneUrl, branch, changedFiles, taConfig, - ) + diggerYmlStr, ghService, config, dependencyGraph, err := GetDiggerConfigForBranchOrSha(gh, installationId, repoFullName, repoOwner, repoName, cloneUrl, branch, "", changedFiles, taConfig) if err != nil { errMsg := err.Error() @@ -837,7 +836,7 @@ func getDiggerConfigForPR(gh utils.GithubClientProvider, orgId uint, prLabels [] return diggerYmlStr, ghService, config, dependencyGraph, &prBranch, &prCommitSha, changedFiles, nil } -func GetDiggerConfigForBranch(gh utils.GithubClientProvider, installationId int64, repoFullName string, repoOwner string, repoName string, cloneUrl string, branch string, changedFiles []string, taConfig *tac.AtlantisConfig) (string, *github2.GithubService, *digger_config.DiggerConfig, graph.Graph[string, digger_config.Project], error) { +func GetDiggerConfigForBranchOrSha(gh utils.GithubClientProvider, installationId int64, repoFullName string, repoOwner string, repoName string, cloneUrl string, branch string, commitSha string, changedFiles []string, taConfig *tac.AtlantisConfig) (string, *github2.GithubService, *digger_config.DiggerConfig, graph.Graph[string, digger_config.Project], error) { slog.Info("Getting Digger config for branch", slog.Group("repository", slog.String("fullName", repoFullName), @@ -863,7 +862,7 @@ func GetDiggerConfigForBranch(gh utils.GithubClientProvider, installationId int6 var diggerYmlStr string var dependencyGraph graph.Graph[string, digger_config.Project] - err = git_utils.CloneGitRepoAndDoAction(cloneUrl, branch, "", *token, "", func(dir string) error { + err = git_utils.CloneGitRepoAndDoAction(cloneUrl, branch, commitSha, *token, "", func(dir string) error { slog.Debug("Reading Digger config from cloned repository", "directory", dir) diggerYmlStr, err = digger_config.ReadDiggerYmlFileContents(dir) diff --git a/backend/controllers/github_pull_request.go b/backend/controllers/github_pull_request.go index 70e31a221..452d3e926 100644 --- a/backend/controllers/github_pull_request.go +++ b/backend/controllers/github_pull_request.go @@ -200,7 +200,7 @@ func handlePullRequestEvent(gh utils.GithubClientProvider, payload *github.PullR // This one is for aggregate reporting commentReporterManager.UpdateComment(":construction_worker: No projects impacted") } - err = utils.SetPRStatusForJobs(ghService, prNumber, jobsForImpactedProjects) + _, _, err = utils.SetPRCheckForJobs(ghService, prNumber, jobsForImpactedProjects, commitSha, repoName, repoOwner) return nil } @@ -376,7 +376,8 @@ func handlePullRequestEvent(gh utils.GithubClientProvider, payload *github.PullR return fmt.Errorf("error initializing comment reporter") } - err = utils.SetPRStatusForJobs(ghService, prNumber, jobsForImpactedProjects) + //err = utils.SetPRCommitStatusForJobs(ghService, prNumber, jobsForImpactedProjects) + batchCheckRunData, jobsCheckRunIdsMap, err := utils.SetPRCheckForJobs(ghService, prNumber, jobsForImpactedProjects, commitSha, repoName, repoOwner) if err != nil { slog.Error("Error setting status for PR", "prNumber", prNumber, @@ -464,6 +465,10 @@ func handlePullRequestEvent(gh utils.GithubClientProvider, payload *github.PullR slog.Debug("Created AI summary comment", "commentId", aiSummaryCommentId) } + reporterType := "lazy" + if config.Reporting.CommentsEnabled == false { + reporterType = "noop" + } slog.Info("Converting jobs to Digger jobs", "prNumber", prNumber, "command", *diggerCommand, @@ -473,28 +478,8 @@ func handlePullRequestEvent(gh utils.GithubClientProvider, payload *github.PullR if config.RespectLayers { } - batchId, _, err := utils.ConvertJobsToDiggerJobs( - *diggerCommand, - models.DiggerVCSGithub, - organisationId, - impactedJobsMap, - impactedProjectsMap, - projectsGraph, - installationId, - branch, - prNumber, - repoOwner, - repoName, - repoFullName, - commitSha, - commentId, - diggerYmlStr, - 0, - aiSummaryCommentId, - config.ReportTerraformOutputs, - coverAllImpactedProjects, - nil, - ) + + batchId, _, err := utils.ConvertJobsToDiggerJobs(*diggerCommand, reporterType, models.DiggerVCSGithub, organisationId, impactedJobsMap, impactedProjectsMap, projectsGraph, installationId, branch, prNumber, repoOwner, repoName, repoFullName, commitSha, &commentId, diggerYmlStr, 0, aiSummaryCommentId, config.ReportTerraformOutputs, coverAllImpactedProjects, nil, batchCheckRunData, jobsCheckRunIdsMap) if err != nil { slog.Error("Error converting jobs to Digger jobs", "prNumber", prNumber, @@ -510,6 +495,16 @@ func handlePullRequestEvent(gh utils.GithubClientProvider, payload *github.PullR "batchId", batchId, ) + batch, err := models.DB.GetDiggerBatch(batchId) + if err != nil { + slog.Error("Error getting Digger batch", + "batchId", batchId, + "error", err, + ) + commentReporterManager.UpdateComment(fmt.Sprintf(":x: Could not retrieve created batch: %v", err)) + return fmt.Errorf("error getting digger batch") + } + if config.CommentRenderMode == digger_config.CommentRenderModeGroupByModule { slog.Info("Using GroupByModule render mode for comments", "prNumber", prNumber) @@ -523,16 +518,6 @@ func handlePullRequestEvent(gh utils.GithubClientProvider, payload *github.PullR return fmt.Errorf("error posting initial comments") } - batch, err := models.DB.GetDiggerBatch(batchId) - if err != nil { - slog.Error("Error getting Digger batch", - "batchId", batchId, - "error", err, - ) - commentReporterManager.UpdateComment(fmt.Sprintf(":x: PostInitialSourceComments error: %v", err)) - return fmt.Errorf("error getting digger batch") - } - batch.SourceDetails, err = json.Marshal(sourceDetails) if err != nil { slog.Error("Error marshalling source details", diff --git a/backend/controllers/github_test.go b/backend/controllers/github_test.go index 5f7219d85..15955f9ed 100644 --- a/backend/controllers/github_test.go +++ b/backend/controllers/github_test.go @@ -724,7 +724,8 @@ func TestJobsTreeWithOneJobsAndTwoProjects(t *testing.T) { graph, err := configuration.CreateProjectDependencyGraph(projects) assert.NoError(t, err) - _, result, err := utils.ConvertJobsToDiggerJobs("", "github", 1, jobs, projectMap, graph, 41584295, "", 2, "diggerhq", "parallel_jobs_demo", "diggerhq/parallel_jobs_demo", "", 123, "test", 0, "", false, true, nil) + var commentId int64 = 123 + _, result, err := utils.ConvertJobsToDiggerJobs("", "lazy", "github", 1, jobs, projectMap, graph, 41584295, "", 2, "diggerhq", "parallel_jobs_demo", "diggerhq/parallel_jobs_demo", "", &commentId, "test", 0, "", false, true, nil, nil, nil) assert.NoError(t, err) assert.Equal(t, 1, len(result)) @@ -754,7 +755,8 @@ func TestJobsTreeWithTwoDependantJobs(t *testing.T) { projectMap["dev"] = project1 projectMap["prod"] = project2 - _, result, err := utils.ConvertJobsToDiggerJobs("", "github", 1, jobs, projectMap, graph, 123, "", 2, "", "", "test", "", 123, "test", 0, "", false, true, nil) + var commentId int64 = 123 + _, result, err := utils.ConvertJobsToDiggerJobs("", "lazy", "github", 1, jobs, projectMap, graph, 123, "", 2, "", "", "test", "", &commentId, "test", 0, "", false, true, nil, nil, nil) assert.NoError(t, err) assert.Equal(t, 2, len(result)) @@ -788,7 +790,8 @@ func TestJobsTreeWithTwoIndependentJobs(t *testing.T) { projectMap["dev"] = project1 projectMap["prod"] = project2 - _, result, err := utils.ConvertJobsToDiggerJobs("", "github", 1, jobs, projectMap, graph, 123, "", 2, "", "", "test", "", 123, "test", 0, "", false, true, nil) + var commentId int64 = 123 + _, result, err := utils.ConvertJobsToDiggerJobs("", "lazy", "github", 1, jobs, projectMap, graph, 123, "", 2, "", "", "test", "", &commentId, "test", 0, "", false, true, nil, nil, nil) assert.NoError(t, err) assert.Equal(t, 2, len(result)) @@ -834,7 +837,8 @@ func TestJobsTreeWithThreeLevels(t *testing.T) { projectMap["555"] = project5 projectMap["666"] = project6 - _, result, err := utils.ConvertJobsToDiggerJobs("", "github", 1, jobs, projectMap, graph, 123, "", 2, "", "", "test", "", 123, "test", 0, "", false, true, nil) + var commentId int64 = 123 + _, result, err := utils.ConvertJobsToDiggerJobs("", "lazy", "github", 1, jobs, projectMap, graph, 123, "", 2, "", "", "test", "", &commentId, "test", 0, "", false, true, nil, nil, nil) assert.NoError(t, err) assert.Equal(t, 6, len(result)) diff --git a/backend/controllers/gitihub_check_run.go b/backend/controllers/gitihub_check_run.go new file mode 100644 index 000000000..5becda25e --- /dev/null +++ b/backend/controllers/gitihub_check_run.go @@ -0,0 +1,201 @@ +package controllers + +import ( + "fmt" + "log/slog" + "runtime/debug" + "strings" + + "github.com/diggerhq/digger/backend/ci_backends" + "github.com/diggerhq/digger/backend/models" + "github.com/diggerhq/digger/backend/utils" + "github.com/diggerhq/digger/libs/ci/generic" + "github.com/diggerhq/digger/libs/digger_config" + "github.com/diggerhq/digger/libs/scheduler" + "github.com/google/go-github/v61/github" + "github.com/samber/lo" +) + +func handleCheckRunActionEvent(gh utils.GithubClientProvider, identifier string, payload *github.CheckRunEvent, ciBackendProvider ci_backends.CiBackendProvider, appId int64) error { + defer func() { + if r := recover(); r != nil { + stack := string(debug.Stack()) + slog.Error("Recovered from panic in handlePullRequestEvent", "error", r, slog.Group("stack")) + fmt.Printf("Stack trace:\n%s\n", stack) + } + }() + + repoFullName := *payload.Repo.FullName + repoName := *payload.Repo.Name + repoOwner := *payload.Repo.Owner.Login + cloneUrl := *payload.Repo.CloneURL + actor := *payload.Sender.Login + + var checkRunBatch *models.DiggerBatch + var checkedRunDiggerJobs []models.DiggerJob + + batchCheckApplyAllPrefix := string(utils.CheckedRunActionBatchApply)+":" + if strings.HasPrefix(identifier, batchCheckApplyAllPrefix) { + diggerBatchId := strings.ReplaceAll(identifier, batchCheckApplyAllPrefix, "") + var err error + checkRunBatch, err = models.DB.GetDiggerBatchFromId(diggerBatchId) + if err != nil { + slog.Error("Failed to find batch", "identifier", identifier, "error", err) + return fmt.Errorf("Failed to find batch from identifier %v, err: %v", identifier, err) + } + checkedRunDiggerJobs, err = models.DB.GetDiggerJobsForBatch(checkRunBatch.ID) + if err != nil { + slog.Error("Failed to find jobs for batch", "batchId", checkRunBatch.ID, "error", err) + return fmt.Errorf("Failed to find batch from identifier %v, err: %v", identifier, err) + } + } + + + installationId := checkRunBatch.GithubInstallationId + prNumber := checkRunBatch.PrNumber + commitSha := checkRunBatch.CommitSha + + link, err := models.DB.GetGithubAppInstallationLink(installationId) + if err != nil { + slog.Error("Error getting GitHub app installation link", + "installationId", installationId, + "error", err, + ) + return fmt.Errorf("error getting github app link") + } + if link == nil { + slog.Error("GitHub app installation link not found", + "installationId", installationId, + "prNumber", prNumber, + ) + return fmt.Errorf("GitHub App installation not found for installation ID %d. Please ensure the GitHub App is properly installed on the repository and the installation process completed successfully", installationId) + } + orgId := link.OrganisationId + + + ghService, _, ghServiceErr := utils.GetGithubService(gh, installationId, repoFullName, repoOwner, repoName) + if ghServiceErr != nil { + slog.Error("Error getting GitHub service", + "installationId", installationId, + "repoFullName", repoFullName, + "issueNumber", prNumber, + "error", ghServiceErr, + ) + return fmt.Errorf("error getting ghService to post error comment") + } + + prBranchName, _, _, _, err := ghService.GetBranchName(prNumber) + + + diggerYmlStr, ghService, config, projectsGraph, err := GetDiggerConfigForBranchOrSha(gh, installationId, repoFullName, repoOwner, repoName, cloneUrl, prBranchName, commitSha, nil, nil) + if err != nil { + slog.Error("Error getting Digger config for PR", + "issueNumber", prNumber, + "repoFullName", repoFullName, + "error", err, + ) + return fmt.Errorf("error getting digger config") + } + + selectedProjects := lo.Filter(config.Projects, func(diggerYmlProject digger_config.Project, index int) bool { + return lo.ContainsBy(checkedRunDiggerJobs, func(diggerJob models.DiggerJob) bool { + return diggerJob.ProjectName == diggerYmlProject.Name + }) + }) + + jobs, err := generic.CreateJobsForProjects(selectedProjects, "digger apply", "check_run_action", repoFullName, actor, config.Workflows, &prNumber, &commitSha, "", checkRunBatch.BranchName, false) + + + // just use noop since if someone clicks a button he shouldn't see comments on the PR (confusing) + reporterType := "noop" + + + impactedProjectsMap := make(map[string]digger_config.Project) + for _, p := range selectedProjects { + impactedProjectsMap[p.Name] = p + } + + impactedProjectsJobMap := make(map[string]scheduler.Job) + for _, j := range jobs { + impactedProjectsJobMap[j.ProjectName] = j + } + + batchCheckRunData, jobCheckRunDataMap, err := utils.SetPRCheckForJobs(ghService, prNumber, jobs, commitSha, repoName, repoOwner) + if err != nil { + slog.Error("Error setting status for PR", + "prNumber", prNumber, + "error", err, + ) + return fmt.Errorf("error setting status for PR: %v", err) + } + + batchId, _, err := utils.ConvertJobsToDiggerJobs( + scheduler.DiggerCommandApply, + reporterType, + "github", + orgId, + impactedProjectsJobMap, + impactedProjectsMap, + projectsGraph, + installationId, + prBranchName, + prNumber, + repoOwner, + repoName, + repoFullName, + commitSha, + nil, + diggerYmlStr, + 0, + "", + config.ReportTerraformOutputs, + false, + nil, + batchCheckRunData, + jobCheckRunDataMap, + ) + if err != nil { + slog.Error("Error converting jobs to Digger jobs", + "issueNumber", prNumber, + "error", err, + ) + return fmt.Errorf("error converting jobs") + } + + ciBackend, err := ciBackendProvider.GetCiBackend( + ci_backends.CiBackendOptions{ + GithubClientProvider: gh, + GithubInstallationId: installationId, + GithubAppId: appId, + RepoName: repoName, + RepoOwner: repoOwner, + RepoFullName: repoFullName, + }, + ) + if err != nil { + slog.Error("Error getting CI backend", + "prNumber", prNumber, + "repoFullName", repoFullName, + "error", err, + ) + return fmt.Errorf("error fetching ci backed %v", err) + } + + + err = TriggerDiggerJobs(ciBackend, repoFullName, repoOwner, repoName, batchId, prNumber, ghService, gh) + if err != nil { + slog.Error("Error triggering Digger jobs", + "prNumber", prNumber, + "batchId", batchId, + "error", err, + ) + return fmt.Errorf("error triggering Digger Jobs") + } + + slog.Info("Successfully processed issue comment event", + "prNumber", prNumber, + "batchId", batchId, + "repoFullName", repoFullName, + ) + return nil +} \ No newline at end of file diff --git a/backend/controllers/projects.go b/backend/controllers/projects.go index e0cde5e4a..27e34376f 100644 --- a/backend/controllers/projects.go +++ b/backend/controllers/projects.go @@ -1006,7 +1006,8 @@ func (d DiggerController) SetJobStatusForProject(c *gin.Context) { c.JSON(http.StatusInternalServerError, gin.H{"error": "Error getting refreshed batch"}) return } - err = UpdateCheckStatusForBatch(d.GithubClientProvider, refreshedBatch) + //err = UpdateCheckStatusForBatch(d.GithubClientProvider, refreshedBatch) + err = UpdateCheckRunForBatch(d.GithubClientProvider, refreshedBatch) if err != nil { slog.Error("Error updating check status", "batchId", batch.ID, @@ -1026,7 +1027,8 @@ func (d DiggerController) SetJobStatusForProject(c *gin.Context) { c.JSON(http.StatusInternalServerError, gin.H{"error": "Error getting refreshed job"}) return } - err = UpdateCheckStatusForJob(d.GithubClientProvider, refreshedJob) + //err = UpdateCommitStatusForJob(d.GithubClientProvider, refreshedJob) + err = UpdateCheckRunForJob(d.GithubClientProvider, refreshedJob) if err != nil { slog.Error("Error updating check status", "jobId", jobId, diff --git a/backend/controllers/projects_helpers.go b/backend/controllers/projects_helpers.go index 9a3442db2..54d8e02cf 100644 --- a/backend/controllers/projects_helpers.go +++ b/backend/controllers/projects_helpers.go @@ -3,14 +3,117 @@ package controllers import ( "encoding/json" "fmt" + "log/slog" + "os" + "github.com/diggerhq/digger/backend/models" "github.com/diggerhq/digger/backend/utils" + "github.com/diggerhq/digger/libs/ci/github" "github.com/diggerhq/digger/libs/digger_config" orchestrator_scheduler "github.com/diggerhq/digger/libs/scheduler" - "log/slog" ) -func UpdateCheckStatusForBatch(gh utils.GithubClientProvider, batch *models.DiggerBatch) error { + +func GenerateChecksSummaryForBatch( batch *models.DiggerBatch) (string, error) { + summaryEndpoint := os.Getenv("DIGGER_AI_SUMMARY_ENDPOINT") + if summaryEndpoint == "" { + slog.Error("DIGGER_AI_SUMMARY_ENDPOINT not set") + return"", fmt.Errorf("could not generate AI summary, ai summary endpoint missing") + } + apiToken := os.Getenv("DIGGER_AI_SUMMARY_API_TOKEN") + + jobs, err := models.DB.GetDiggerJobsForBatch(batch.ID) + if err != nil { + slog.Error("Could not get jobs for batch", + "batchId", batch.ID, + "error", err, + ) + + return "", fmt.Errorf("could not get jobs for batch: %v", err) + } + + terraformOutputs := "" + for _, job := range jobs { + var jobSpec orchestrator_scheduler.JobJson + err := json.Unmarshal(job.SerializedJobSpec, &jobSpec) + if err != nil { + slog.Error("Could not unmarshal job spec", + "jobId", job.DiggerJobID, + "error", err, + ) + + return "", fmt.Errorf("could not summarize plans due to unmarshalling error: %v", err) + } + + projectName := jobSpec.ProjectName + slog.Debug("Adding Terraform output for project", + "projectName", projectName, + "jobId", job.DiggerJobID, + "outputLength", len(job.TerraformOutput), + ) + + terraformOutputs += fmt.Sprintf("terraform output for %v: %v \n\n", projectName, job.TerraformOutput) + } + + aiSummary, err := utils.GetAiSummaryFromTerraformPlans(terraformOutputs, summaryEndpoint, apiToken) + if err != nil { + slog.Error("Could not generate AI summary from Terraform outputs", + "batchId", batch.ID, + "error", err, + ) + + return "", fmt.Errorf("could not summarize terraform outputs: %v", err) + } + + summary := "" + if aiSummary != "FOUR_OH_FOUR" { + summary = fmt.Sprintf(":sparkles: **AI summary (experimental):** %v", aiSummary) + } + + return summary, nil +} + +func GenerateChecksSummaryForJob( job *models.DiggerJob) (string, error) { + batch := job.Batch + summaryEndpoint := os.Getenv("DIGGER_AI_SUMMARY_ENDPOINT") + if summaryEndpoint == "" { + slog.Error("AI summary endpoint not configured", "batch", batch.ID, "jobId", job.ID, "DiggerJobId", job.DiggerJobID) + return"", fmt.Errorf("could not generate AI summary, ai summary endpoint missing") + } + apiToken := os.Getenv("DIGGER_AI_SUMMARY_API_TOKEN") + + if job.TerraformOutput == "" { + slog.Warn("Terraform output not set yet, ignoring this call") + return "", nil + } + terraformOutput := fmt.Sprintf("Terraform output for: %v\n\n", job.TerraformOutput) + aiSummary, err := utils.GetAiSummaryFromTerraformPlans(terraformOutput, summaryEndpoint, apiToken) + if err != nil { + slog.Error("Could not generate AI summary from Terraform outputs", + "batchId", batch.ID, + "error", err, + ) + + return "", fmt.Errorf("could not summarize terraform outputs: %v", err) + } + + summary := "" + + if job.WorkflowRunUrl != nil { + summary += fmt.Sprintf(":link: CI job\n\n", *job.WorkflowRunUrl ) + } + + if aiSummary != "FOUR_OH_FOUR" { + summary += fmt.Sprintf(":sparkles: **AI summary (experimental):** %v", aiSummary) + } + + return summary, nil +} + + + + +func UpdateCommitStatusForBatch(gh utils.GithubClientProvider, batch *models.DiggerBatch) error { slog.Info("Updating PR status for batch", "batchId", batch.ID, "prNumber", batch.PrNumber, @@ -60,9 +163,9 @@ func UpdateCheckStatusForBatch(gh utils.GithubClientProvider, batch *models.Digg } slog.Debug("Updating PR status for batch", "batchId", batch.ID, "prNumber", batch.PrNumber, "batchStatus", batch.Status, "batchType", batch.BatchType, - "newStatus", serializedBatch.ToStatusCheck()) + "newStatus", serializedBatch.ToCommitStatusCheck()) if isPlanBatch { - prService.SetStatus(batch.PrNumber, serializedBatch.ToStatusCheck(), "digger/plan") + prService.SetStatus(batch.PrNumber, serializedBatch.ToCommitStatusCheck(), "digger/plan") if disableDiggerApplyStatusCheck == false { prService.SetStatus(batch.PrNumber, "pending", "digger/apply") } @@ -70,13 +173,126 @@ func UpdateCheckStatusForBatch(gh utils.GithubClientProvider, batch *models.Digg } else { prService.SetStatus(batch.PrNumber, "success", "digger/plan") if disableDiggerApplyStatusCheck == false { - prService.SetStatus(batch.PrNumber, serializedBatch.ToStatusCheck(), "digger/apply") + prService.SetStatus(batch.PrNumber, serializedBatch.ToCommitStatusCheck(), "digger/apply") + } + } + return nil +} + +func UpdateCheckRunForBatch(gh utils.GithubClientProvider, batch *models.DiggerBatch) error { + slog.Info("Updating PR status for batch", + "batchId", batch.ID, + "prNumber", batch.PrNumber, + "batchStatus", batch.Status, + "batchType", batch.BatchType, + ) + + if batch.CheckRunId == nil { + slog.Error("Error checking run id, found nil", "batchId", batch.ID) + return fmt.Errorf("error checking run id, found nil batch") + } + + if batch.VCS != models.DiggerVCSGithub { + return fmt.Errorf("We only support github VCS for modern checks at the moment") + } + prService, err := utils.GetPrServiceFromBatch(batch, gh) + if err != nil { + slog.Error("Error getting PR service", + "batchId", batch.ID, + "error", err, + ) + return fmt.Errorf("error getting github service: %v", err) + } + + ghPrService := prService.(*github.GithubService) + diggerYmlString := batch.DiggerConfig + diggerConfigYml, err := digger_config.LoadDiggerConfigYamlFromString(diggerYmlString) + if err != nil { + slog.Error("Error loading Digger config from batch", + "batchId", batch.ID, + "error", err, + ) + return fmt.Errorf("error loading digger config from batch: %v", err) + } + + config, _, err := digger_config.ConvertDiggerYamlToConfig(diggerConfigYml) + if err != nil { + slog.Error("Error converting Digger YAML to config", + "batchId", batch.ID, + "error", err, + ) + return fmt.Errorf("error converting Digger YAML to config: %v", err) + } + + disableDiggerApplyStatusCheck := config.DisableDiggerApplyStatusCheck + + isPlanBatch := batch.BatchType == orchestrator_scheduler.DiggerCommandPlan + + serializedBatch, err := batch.MapToJsonStruct() + if err != nil { + slog.Error("Error mapping batch to json struct", + "batchId", batch.ID, + "error", err, + ) + return fmt.Errorf("error mapping batch to json struct: %v", err) + } + slog.Debug("Updating PR status for batch", + "batchId", batch.ID, "prNumber", batch.PrNumber, "batchStatus", batch.Status, "batchType", batch.BatchType, + "newStatus", serializedBatch.ToCheckRunStatus()) + + jobs, err := models.DB.GetDiggerJobsForBatch(batch.ID) + if err != nil { + slog.Error("Error getting jobs for batch", + "batchId", batch.ID, + "error", err) + return fmt.Errorf("error getting jobs for batch: %v", err) + } + message, err := utils.GenerateRealtimeCommentMessage(jobs, batch.BatchType) + if err != nil { + slog.Error("Error generating realtime comment message", + "batchId", batch.ID, + "error", err) + return fmt.Errorf("error generating realtime comment message: %v", err) + } + + summary, err := GenerateChecksSummaryForBatch(batch) + if err != nil { + slog.Warn("Error generating checks summary for batch", "batchId", batch.ID, "error", err) + } + + if isPlanBatch { + status := serializedBatch.ToCheckRunStatus() + conclusion := serializedBatch.ToCheckRunConclusion() + title := "Plans Summary" + opts := github.GithubCheckRunUpdateOptions{ + &status, + conclusion, + &title, + &summary, + &message, + utils.GetActionsForBatch(batch), + } + ghPrService.UpdateCheckRun(*batch.CheckRunId, opts) + } else { + if disableDiggerApplyStatusCheck == false { + status := serializedBatch.ToCheckRunStatus() + conclusion := serializedBatch.ToCheckRunConclusion() + title := "Apply Summary" + opts := github.GithubCheckRunUpdateOptions{ + &status, + conclusion, + &title, + &summary, + &message, + utils.GetActionsForBatch(batch), + } + ghPrService.UpdateCheckRun(*batch.CheckRunId, opts) } } return nil } -func UpdateCheckStatusForJob(gh utils.GithubClientProvider, job *models.DiggerJob) error { +func UpdateCommitStatusForJob(gh utils.GithubClientProvider, job *models.DiggerJob) error { batch := job.Batch slog.Info("Updating PR status for job", "jobId", job.DiggerJobID, @@ -102,7 +318,7 @@ func UpdateCheckStatusForJob(gh utils.GithubClientProvider, job *models.DiggerJo } isPlan := jobSpec.IsPlan() - status, err := models.GetStatusCheckForJob(job) + status, err := models.GetCommitStatusForJob(job) if err != nil { return fmt.Errorf("could not get status check for job: %v", err) } @@ -111,8 +327,98 @@ func UpdateCheckStatusForJob(gh utils.GithubClientProvider, job *models.DiggerJo prService.SetStatus(batch.PrNumber, status, jobSpec.GetProjectAlias()+"/plan") prService.SetStatus(batch.PrNumber, "neutral", jobSpec.GetProjectAlias()+"/apply") } else { - //prService.SetStatus(batch.PrNumber, "success", jobSpec.GetProjectAlias()+"/plan") prService.SetStatus(batch.PrNumber, status, jobSpec.GetProjectAlias()+"/apply") } return nil } + +// more modern check runs on github have their own page +func UpdateCheckRunForJob(gh utils.GithubClientProvider, job *models.DiggerJob) error { + batch := job.Batch + slog.Info("Updating PR Check run for job", + "jobId", job.DiggerJobID, + "prNumber", batch.PrNumber, + "jobStatus", job.Status, + "batchType", batch.BatchType, + ) + + if batch.VCS != models.DiggerVCSGithub { + slog.Error("Error updating PR status for job only github is supported", "batchid", batch.ID, "vcs", batch.VCS) + return fmt.Errorf("Error updating PR status for job only github is supported") + } + + if job.CheckRunId == nil { + slog.Error("Error updating PR status, could not find checkRunId in job", "diggerJobId", job.DiggerJobID) + return fmt.Errorf("Error updating PR status, could not find checkRunId in job") + } + + prService, err := utils.GetPrServiceFromBatch(batch, gh) + ghService := prService.(*github.GithubService) + + if err != nil { + slog.Error("Error getting PR service", + "batchId", batch.ID, + "error", err, + ) + return fmt.Errorf("error getting github service: %v", err) + } + + var jobSpec orchestrator_scheduler.JobJson + err = json.Unmarshal([]byte(job.SerializedJobSpec), &jobSpec) + if err != nil { + slog.Error("Could not unmarshal job spec", "jobId", job.DiggerJobID, "error", err) + return fmt.Errorf("could not unmarshal json string: %v", err) + } + + isPlan := jobSpec.IsPlan() + status, err := models.GetCheckRunStatusForJob(job) + if err != nil { + return fmt.Errorf("could not get status check for job: %v", err) + } + + conclusion, err := models.GetCheckRunConclusionForJob(job) + if err != nil { + return fmt.Errorf("could not get conclusion for job: %v", err) + } + + text := "" + + "```terraform\n" + + job.TerraformOutput + + "```\n" + + + summary, err := GenerateChecksSummaryForJob(job) + if err != nil { + slog.Warn("Error generating checks summary for batch", "batchId", batch.ID, "error", err) + } + + slog.Debug("Updating PR status for job", "jobId", job.DiggerJobID, "status", status, "conclusion", conclusion) + if isPlan { + title := fmt.Sprintf("%v to create %v to update %v to delete", job.DiggerJobSummary.ResourcesCreated, job.DiggerJobSummary.ResourcesUpdated, job.DiggerJobSummary.ResourcesDeleted) + opts := github.GithubCheckRunUpdateOptions{ + Status: &status, + Conclusion: &conclusion, + Title: &title, + Summary: &summary, + Text: &text, + Actions: utils.GetActionsForJob(job), + } + _, err = ghService.UpdateCheckRun(*job.CheckRunId, opts) + if err != nil { + slog.Error("Error updating PR status for job", "error", err) + } + } else { + title := fmt.Sprintf("%v created %v updated %v deleted", job.DiggerJobSummary.ResourcesCreated, job.DiggerJobSummary.ResourcesUpdated, job.DiggerJobSummary.ResourcesDeleted) + opts := github.GithubCheckRunUpdateOptions{ + Status: &status, + Conclusion: &conclusion, + Title: &title, + Summary: &summary, + Text: &text, + Actions: utils.GetActionsForJob(job), + } + _, err = ghService.UpdateCheckRun(*job.CheckRunId, opts) + slog.Error("Error updating PR status for job", "error", err) + } + return nil +} diff --git a/backend/migrations/20251118022613.sql b/backend/migrations/20251118022613.sql new file mode 100644 index 000000000..667c67f32 --- /dev/null +++ b/backend/migrations/20251118022613.sql @@ -0,0 +1,4 @@ +-- Modify "digger_batches" table +ALTER TABLE "public"."digger_batches" ADD COLUMN "check_run_id" text NULL; +-- Modify "digger_jobs" table +ALTER TABLE "public"."digger_jobs" ADD COLUMN "check_run_id" text NULL; diff --git a/backend/migrations/20251119004103.sql b/backend/migrations/20251119004103.sql new file mode 100644 index 000000000..a34906428 --- /dev/null +++ b/backend/migrations/20251119004103.sql @@ -0,0 +1,4 @@ +-- Modify "digger_batches" table +ALTER TABLE "public"."digger_batches" ADD COLUMN "check_run_url" text NULL; +-- Modify "digger_jobs" table +ALTER TABLE "public"."digger_jobs" ADD COLUMN "check_run_url" text NULL; diff --git a/backend/migrations/20251120020911.sql b/backend/migrations/20251120020911.sql new file mode 100644 index 000000000..2f73207bc --- /dev/null +++ b/backend/migrations/20251120020911.sql @@ -0,0 +1,2 @@ +-- Modify "digger_batches" table +ALTER TABLE "public"."digger_batches" ADD COLUMN "digger_batch_id" text NULL; diff --git a/backend/migrations/20251120060106.sql b/backend/migrations/20251120060106.sql new file mode 100644 index 000000000..dcad2230f --- /dev/null +++ b/backend/migrations/20251120060106.sql @@ -0,0 +1,2 @@ +-- Modify "digger_jobs" table +ALTER TABLE "public"."digger_jobs" ADD COLUMN "reporter_type" text NULL DEFAULT 'lazy'; diff --git a/backend/migrations/atlas.sum b/backend/migrations/atlas.sum index b92355cf9..803d792ad 100644 --- a/backend/migrations/atlas.sum +++ b/backend/migrations/atlas.sum @@ -1,4 +1,4 @@ -h1:HUFvDp6jvx9L32hqunXbuvbX3cWvhFy8oVuq3SZ7xzY= +h1:Yoa3j5DOMmrwiYi3e7uIy5Y32t7e4jmjUxerP0qXEGA= 20231227132525.sql h1:43xn7XC0GoJsCnXIMczGXWis9d504FAWi4F1gViTIcw= 20240115170600.sql h1:IW8fF/8vc40+eWqP/xDK+R4K9jHJ9QBSGO6rN9LtfSA= 20240116123649.sql h1:R1JlUIgxxF6Cyob9HdtMqiKmx/BfnsctTl5rvOqssQw= @@ -69,3 +69,7 @@ h1:HUFvDp6jvx9L32hqunXbuvbX3cWvhFy8oVuq3SZ7xzY= 20251107000100.sql h1:b3USfhlLulZ+6iL9a66Ddpy6uDcYmmyDGZLYzbEjuRA= 20251114205312.sql h1:RBQdD8zLKavCEfZOW8S2r31QBC9ZznCjB1Tw4SDJQGg= 20251114230419.sql h1:/WA7vp7SKgdfe3KHS65nbwE4RUyUmlUOxuQ8tZZ/FQI= +20251118022613.sql h1:nSMv/SJ6gUdKjWWVJJhgPTN18SQrhkkknBJGnx8lhKc= +20251119004103.sql h1:zdyEn54C6mY5iKZ86LQWhOi13sSA2EMriE1lQ9wGi6w= +20251120020911.sql h1:JaybKP/PHLE3qt5+jA9k0sGFAMPl62T91SSMOC3W5Ow= +20251120060106.sql h1:MK5LjwWUr3nszLIzSJJBAy7d8Y2PvpDRV8qmTTnFfIM= diff --git a/backend/models/scheduler.go b/backend/models/scheduler.go index 8c1bb422d..77cadb7ea 100644 --- a/backend/models/scheduler.go +++ b/backend/models/scheduler.go @@ -14,8 +14,8 @@ import ( type ImpactedProject struct { gorm.Model ID uuid.UUID `gorm:"primary_key"` - RepoFullName string `gorm:"index:idx_org_repo"` - CommitSha string `gorm:"index:idx_org_repo"` + RepoFullName string `gorm:"index:idx_org_repo"` + CommitSha string `gorm:"index:idx_org_repo"` PrNumber *int Branch *string ProjectName string @@ -38,11 +38,14 @@ const DiggerVCSBitbucket DiggerVCSType = "bitbucket" type DiggerBatch struct { gorm.Model ID uuid.UUID `gorm:"primary_key"` + DiggerBatchID string `gorm:"size:20,index:idx_digger_batch_id"` // shorter version of the ID to be able to use in check run Layer uint VCS DiggerVCSType PrNumber int - CommitSha string + CommitSha string CommentId *int64 + CheckRunId *string + CheckRunUrl *string AiSummaryCommentId string Status orchestrator_scheduler.DiggerBatchStatus BranchName string @@ -71,6 +74,8 @@ type DiggerJob struct { BatchID *string `gorm:"index:idx_digger_job_id"` PRCommentUrl string PRCommentId *int64 + CheckRunId *string + CheckRunUrl *string DiggerJobSummary DiggerJobSummary DiggerJobSummaryID uint SerializedJobSpec []byte @@ -87,6 +92,7 @@ type DiggerJob struct { WorkflowFile string WorkflowRunUrl *string StatusUpdatedAt time.Time + ReporterType string `gorm:"default:'lazy'"` // temporary, to be replaced by SerializedReporterSpec } type DiggerJobSummary struct { @@ -198,7 +204,7 @@ func (b *DiggerBatch) MapToJsonStruct() (orchestrator_scheduler.SerializedBatch, return res, nil } -func GetStatusCheckForJob(job *DiggerJob) (string, error) { +func GetCommitStatusForJob(job *DiggerJob) (string, error) { switch job.Status { case orchestrator_scheduler.DiggerJobStarted: return "pending", nil @@ -213,3 +219,35 @@ func GetStatusCheckForJob(job *DiggerJob) (string, error) { } return "", fmt.Errorf("unknown job status: %v", job.Status) } + +func GetCheckRunStatusForJob(job *DiggerJob) (string, error) { + switch job.Status { + case orchestrator_scheduler.DiggerJobStarted: + return "in_progress", nil + case orchestrator_scheduler.DiggerJobTriggered: + return "in_progress", nil + case orchestrator_scheduler.DiggerJobCreated: + return "in_progress", nil + case orchestrator_scheduler.DiggerJobSucceeded: + return "completed", nil + case orchestrator_scheduler.DiggerJobFailed: + return "completed", nil + } + return "", fmt.Errorf("unknown job status: %v", job.Status) +} + +func GetCheckRunConclusionForJob(job *DiggerJob) (string, error) { + switch job.Status { + case orchestrator_scheduler.DiggerJobStarted: + return "", nil + case orchestrator_scheduler.DiggerJobTriggered: + return "", nil + case orchestrator_scheduler.DiggerJobCreated: + return "", nil + case orchestrator_scheduler.DiggerJobSucceeded: + return "success", nil + case orchestrator_scheduler.DiggerJobFailed: + return "failure", nil + } + return "", fmt.Errorf("unknown job status: %v", job.Status) +} diff --git a/backend/models/scheduler_test.go b/backend/models/scheduler_test.go index 3516e2e42..46c25134d 100644 --- a/backend/models/scheduler_test.go +++ b/backend/models/scheduler_test.go @@ -75,7 +75,7 @@ func TestCreateDiggerJob(t *testing.T) { defer teardownSuite(t) batchId, _ := uuid.NewUUID() - job, err := database.CreateDiggerJob(batchId, []byte{100}, "digger_workflow.yml") + job, err := database.CreateDiggerJob(batchId, []byte{100}, "digger_workflow.yml", nil, nil, "lazy", "") assert.NoError(t, err) assert.NotNil(t, job) @@ -87,7 +87,7 @@ func TestCreateSingleJob(t *testing.T) { defer teardownSuite(t) batchId, _ := uuid.NewUUID() - job, err := database.CreateDiggerJob(batchId, []byte{100}, "digger_workflow.yml") + job, err := database.CreateDiggerJob(batchId, []byte{100}, "digger_workflow.yml", nil, nil, "lazy", "") assert.NoError(t, err) assert.NotNil(t, job) @@ -99,20 +99,20 @@ func TestFindDiggerJobsByParentJobId(t *testing.T) { defer teardownSuite(t) batchId, _ := uuid.NewUUID() - job, err := database.CreateDiggerJob(batchId, []byte{100}, "digger_workflow.yml") + job, err := database.CreateDiggerJob(batchId, []byte{100}, "digger_workflow.yml", nil, nil, "lazy", "") parentJobId := job.DiggerJobID assert.NoError(t, err) assert.NotNil(t, job) assert.NotZero(t, job.ID) - job, err = database.CreateDiggerJob(batchId, []byte{100}, "digger_workflow.yml") + job, err = database.CreateDiggerJob(batchId, []byte{100}, "digger_workflow.yml", nil, nil, "lazy", "") assert.NoError(t, err) assert.NotNil(t, job) assert.NotZero(t, job.ID) err = database.CreateDiggerJobParentLink(parentJobId, job.DiggerJobID) assert.Nil(t, err) - job, err = database.CreateDiggerJob(batchId, []byte{100}, "digger_workflow.yml") + job, err = database.CreateDiggerJob(batchId, []byte{100}, "digger_workflow.yml", nil, nil, "lazy", "") assert.NoError(t, err) assert.NotNil(t, job) err = database.CreateDiggerJobParentLink(parentJobId, job.DiggerJobID) diff --git a/backend/models/storage.go b/backend/models/storage.go index 55d55339f..066636c57 100644 --- a/backend/models/storage.go +++ b/backend/models/storage.go @@ -866,10 +866,23 @@ func (db *Database) GetDiggerBatch(batchId *uuid.UUID) (*DiggerBatch, error) { return batch, nil } -func (db *Database) CreateDiggerBatch(vcsType DiggerVCSType, githubInstallationId int64, repoOwner string, repoName string, repoFullname string, PRNumber int, diggerConfig string, branchName string, batchType scheduler.DiggerCommand, commentId *int64, gitlabProjectId int, aiSummaryCommentId string, reportTerraformOutputs bool, coverAllImpactedProjects bool, VCSConnectionId *uint, commitSha string) (*DiggerBatch, error) { +func (db *Database) GetDiggerBatchFromId(diggerBatchId string) (*DiggerBatch, error) { + batch := &DiggerBatch{} + result := db.GormDB.Where("digger_batch_id=? ", diggerBatchId).Find(batch) + if result.Error != nil { + if !errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, result.Error + } + } + return batch, nil +} + +func (db *Database) CreateDiggerBatch(vcsType DiggerVCSType, githubInstallationId int64, repoOwner string, repoName string, repoFullname string, PRNumber int, diggerConfig string, branchName string, batchType scheduler.DiggerCommand, commentId *int64, gitlabProjectId int, aiSummaryCommentId string, reportTerraformOutputs bool, coverAllImpactedProjects bool, VCSConnectionId *uint, commitSha string, checkRunId *string, checkRunUrl *string) (*DiggerBatch, error) { uid := uuid.New() + diggerBatchId := uniuri.NewLen(7) batch := &DiggerBatch{ ID: uid, + DiggerBatchID: diggerBatchId, VCS: vcsType, VCSConnectionId: VCSConnectionId, GithubInstallationId: githubInstallationId, @@ -879,6 +892,8 @@ func (db *Database) CreateDiggerBatch(vcsType DiggerVCSType, githubInstallationI PrNumber: PRNumber, CommitSha: commitSha, CommentId: commentId, + CheckRunId: checkRunId, + CheckRunUrl: checkRunUrl, Status: scheduler.BatchJobCreated, BranchName: branchName, DiggerConfig: diggerConfig, @@ -945,11 +960,11 @@ func (db *Database) UpdateBatchStatus(batch *DiggerBatch) error { return nil } -func (db *Database) CreateDiggerJob(batchId uuid.UUID, serializedJob []byte, workflowFile string) (*DiggerJob, error) { +func (db *Database) CreateDiggerJob(batchId uuid.UUID, serializedJob []byte, workflowFile string, checkRunId *string, checkRunUrl *string, reporterType string, projectName string) (*DiggerJob, error) { if serializedJob == nil || len(serializedJob) == 0 { return nil, fmt.Errorf("serializedJob can't be empty") } - jobId := uniuri.New() + jobId := uniuri.NewLen(10) batchIdStr := batchId.String() summary := &DiggerJobSummary{} @@ -959,8 +974,19 @@ func (db *Database) CreateDiggerJob(batchId uuid.UUID, serializedJob []byte, wor } workflowUrl := "#" - job := &DiggerJob{DiggerJobID: jobId, Status: scheduler.DiggerJobCreated, - BatchID: &batchIdStr, SerializedJobSpec: serializedJob, DiggerJobSummary: *summary, WorkflowRunUrl: &workflowUrl, WorkflowFile: workflowFile} + job := &DiggerJob{ + DiggerJobID: jobId, + Status: scheduler.DiggerJobCreated, + ProjectName: projectName, + BatchID: &batchIdStr, + CheckRunId: checkRunId, + CheckRunUrl: checkRunUrl, + SerializedJobSpec: serializedJob, + DiggerJobSummary: *summary, + WorkflowRunUrl: &workflowUrl, + WorkflowFile: workflowFile, + ReporterType: reporterType, + } result = db.GormDB.Save(job) if result.Error != nil { return nil, result.Error diff --git a/backend/models/storage_test.go b/backend/models/storage_test.go index a16924bb8..c2e1f8301 100644 --- a/backend/models/storage_test.go +++ b/backend/models/storage_test.go @@ -143,10 +143,10 @@ func TestGetDiggerJobsForBatchPreloadsSummary(t *testing.T) { resourcesUpdated := uint(2) resourcesDeleted := uint(3) - batch, err := DB.CreateDiggerBatch(DiggerVCSGithub, 123, repoOwner, repoName, repoFullName, prNumber, diggerconfig, branchName, batchType, &commentId, 0, "", false, true, nil, "") + batch, err := DB.CreateDiggerBatch(DiggerVCSGithub, 123, repoOwner, repoName, repoFullName, prNumber, diggerconfig, branchName, batchType, &commentId, 0, "", false, true, nil, "", nil, nil) assert.NoError(t, err) - job, err := DB.CreateDiggerJob(batch.ID, []byte(jobSpec), "workflow_file.yml") + job, err := DB.CreateDiggerJob(batch.ID, []byte(jobSpec), "workflow_file.yml", nil, nil, "lazy", "") assert.NoError(t, err) job, err = DB.UpdateDiggerJobSummary(job.DiggerJobID, resourcesCreated, resourcesUpdated, resourcesDeleted) diff --git a/backend/services/spec.go b/backend/services/spec.go index fbac384ac..caa48faff 100644 --- a/backend/services/spec.go +++ b/backend/services/spec.go @@ -189,7 +189,7 @@ func GetSpecFromJob(job models.DiggerJob) (*spec.Spec, error) { Job: jobSpec, Reporter: spec.ReporterSpec{ ReportingStrategy: "comments_per_run", - ReporterType: "lazy", + ReporterType: job.ReporterType, ReportTerraformOutput: batch.ReportTerraformOutputs, }, Lock: spec.LockSpec{ diff --git a/backend/tasks/runs_test.go b/backend/tasks/runs_test.go index 96669e62b..015b60d4c 100644 --- a/backend/tasks/runs_test.go +++ b/backend/tasks/runs_test.go @@ -139,7 +139,7 @@ func TestThatRunQueueItemMovesFromQueuedToPlanningAfterPickup(t *testing.T) { for i, testParam := range testParameters { ciService := github2.MockCiService{} - batch, _ := models.DB.CreateDiggerBatch(models.DiggerVCSGithub, 123, "", "", "", 22, "", "", "", nil, 0, "", false, true, nil, "") + batch, _ := models.DB.CreateDiggerBatch(models.DiggerVCSGithub, 123, "", "", "", 22, "", "", "", nil, 0, "", false, true, nil, "", nil, nil) project, _ := models.DB.CreateProject(fmt.Sprintf("test%v", i), "", nil, "", false, false) planStage, _ := models.DB.CreateDiggerRunStage(batch.ID.String()) applyStage, _ := models.DB.CreateDiggerRunStage(batch.ID.String()) diff --git a/backend/utils/comment_utils.go b/backend/utils/comment_utils.go index 78a0b892c..8a793b52d 100644 --- a/backend/utils/comment_utils.go +++ b/backend/utils/comment_utils.go @@ -55,6 +55,11 @@ func UpdatePRCommentRealtime(gh GithubClientProvider, batch *models.DiggerBatch) return fmt.Errorf("error requerying jobs for batch: %v", err) } + if freshBatch.CommentId == nil { + slog.Debug("No comment id found for batch, not updating", "batchId", batch.ID) + return nil + } + if len(freshJobs) == 0 { slog.Debug("No jobs found after requery", "batchId", freshBatch.ID) return nil @@ -133,9 +138,15 @@ func GenerateRealtimeCommentMessage(jobs []models.DiggerJob, batchType orchestra } // Safe handling of WorkflowRunUrl pointer - workflowUrl := "#" + + var checkRunUrl = "#" + if job.CheckRunUrl != nil { + checkRunUrl = *job.CheckRunUrl + } + + workflowRunUrl := "#" if job.WorkflowRunUrl != nil { - workflowUrl = *job.WorkflowRunUrl + workflowRunUrl = *job.WorkflowRunUrl } // Get project name from job spec @@ -173,9 +184,9 @@ func GenerateRealtimeCommentMessage(jobs []models.DiggerJob, batchType orchestra message += fmt.Sprintf("|%s **%s** |%s | %s | %d | %d | %d|\n", job.Status.ToEmoji(), projectDisplayName, - workflowUrl, + workflowRunUrl, job.Status.ToString(), - prCommentUrl, + checkRunUrl, jobTypeTitle, resourcesCreated, resourcesUpdated, diff --git a/backend/utils/github.go b/backend/utils/github.go index d00cff511..de8fcfc5d 100644 --- a/backend/utils/github.go +++ b/backend/utils/github.go @@ -7,6 +7,7 @@ import ( "log/slog" net "net/http" "os" + "strconv" "strings" "time" @@ -169,7 +170,7 @@ func GetGithubService(gh GithubClientProvider, installationId int64, repoFullNam return &ghService, token, nil } -func SetPRStatusForJobs(prService ci.PullRequestService, prNumber int, jobs []scheduler.Job) error { +func SetPRCommitStatusForJobs(prService ci.PullRequestService, prNumber int, jobs []scheduler.Job) error { slog.Info("Setting PR status for jobs", "prNumber", prNumber, "jobCount", len(jobs), @@ -241,6 +242,150 @@ func SetPRStatusForJobs(prService ci.PullRequestService, prNumber int, jobs []sc return nil } +func GetCheckDetailedUrl(checkRunId int64, repoOwner string, repoName string, prNumber int) string { + githubHostname := os.Getenv("DIGGER_GITHUB_HOSTNAME") + if githubHostname == "" { + githubHostname = "github.com" + } + url := fmt.Sprintf( + "https://%v/%s/%s/pull/%d/checks?check_run_id=%d", githubHostname, repoOwner, repoName, prNumber, checkRunId, + ) + return url +} + +// Checks are the more modern github way as opposed to "commit status" +// With checks you also get to set a page representing content of the check +func SetPRCheckForJobs(ghService *github2.GithubService, prNumber int, jobs []scheduler.Job, commitSha string, repoName string, repoOwner string) (*CheckRunData, map[string]CheckRunData, error) { + slog.Info("commitSha", "commitsha", commitSha) + slog.Info("Setting PR status for jobs", + "prNumber", prNumber, + "jobCount", len(jobs), + "commitSha", commitSha, + ) + var batchCheckRunId CheckRunData + var jobCheckRunIds = make(map[string]CheckRunData) + + for _, job := range jobs { + for _, command := range job.Commands { + var cr *github.CheckRun + var err error + switch command { + case "digger plan": + slog.Debug("Setting PR status for plan", + "prNumber", prNumber, + "project", job.ProjectName, + ) + var actions []*github.CheckRunAction + cr, err = ghService.CreateCheckRun(job.GetProjectAlias()+"/plan", "in_progress", "", "Waiting for plan...", "", "Plan result will appear here", commitSha, actions) + jobCheckRunIds[job.ProjectName] = CheckRunData{ + Id: strconv.FormatInt(*cr.ID, 10), + Url: GetCheckDetailedUrl(*cr.ID, repoOwner, repoName, prNumber, ), + } + + case "digger apply": + slog.Debug("Setting PR status for apply", + "prNumber", prNumber, + "project", job.ProjectName, + ) + cr, err = ghService.CreateCheckRun(job.GetProjectAlias()+"/apply", "in_progress", "", "Waiting for apply...", "", "Apply result will appear here", commitSha, nil) + jobCheckRunIds[job.ProjectName] = CheckRunData{ + Id: strconv.FormatInt(*cr.ID, 10), + Url: GetCheckDetailedUrl(*cr.ID, repoOwner, repoName, prNumber, ), + } + } + if err != nil { + slog.Error("Failed to set job PR status", + "prNumber", prNumber, + "project", job.ProjectName, + "command", command, + "error", err, + ) + return nil, nil, fmt.Errorf("Error setting pr status: %v", err) + } + } + } + + // Report aggregate status for digger/plan or digger/apply + jobsSummaryTable := GetInitialJobSummary(jobs) + if len(jobs) > 0 { + var err error + var cr *github.CheckRun + if scheduler.IsPlanJobs(jobs) { + slog.Debug("Setting aggregate plan status", "prNumber", prNumber) + cr, err = ghService.CreateCheckRun("digger/plan", "in_progress", "", "Pending start...", "", jobsSummaryTable, commitSha, nil) + batchCheckRunId = CheckRunData{ + Id: strconv.FormatInt(*cr.ID, 10), + Url: GetCheckDetailedUrl(*cr.ID, repoOwner, repoName, prNumber, ), + } + } else { + slog.Debug("Setting aggregate apply status", "prNumber", prNumber) + cr, err = ghService.CreateCheckRun("digger/apply", "in_progress", "", "Pending start...", "", jobsSummaryTable, commitSha, nil) + batchCheckRunId = CheckRunData{ + Id: strconv.FormatInt(*cr.ID, 10), + Url: GetCheckDetailedUrl(*cr.ID, repoOwner, repoName, prNumber, ), + } + } + if err != nil { + slog.Error("Failed to set aggregate PR status", + "prNumber", prNumber, + "error", err, + ) + return nil, nil, fmt.Errorf("error setting pr status: %v", err) + } + } else { + slog.Debug("Setting success status for empty job list", "prNumber", prNumber) + _, err := ghService.CreateCheckRun("digger/plan", "completed", "success", "No impacted projects", "Check your configuration and files changed if this is unexpected", "digger/plan", commitSha, nil) + if err != nil { + slog.Error("Failed to set success plan status", "prNumber", prNumber, "error", err) + return nil, nil, fmt.Errorf("error setting pr status: %v", err) + } + + _, err = ghService.CreateCheckRun("digger/apply", "completed", "success", "No impacted projects", "Check your configuration and files changed if this is unexpected", "digger/apply", commitSha, nil) + if err != nil { + slog.Error("Failed to set success apply status", "prNumber", prNumber, "error", err) + return nil, nil, fmt.Errorf("error setting pr status: %v", err) + } + } + + slog.Info("Successfully set PR status", "prNumber", prNumber) + return &batchCheckRunId, jobCheckRunIds, nil +} + +type CheckedRunActionIdentifier string +const CheckedRunActionBatchApply CheckedRunActionIdentifier = "abatch" +const CheckedRunActionJobApply CheckedRunActionIdentifier = "ajob" + +func GetActionsForBatch(batch *models.DiggerBatch) []*github.CheckRunAction { + batchActions := make([]*github.CheckRunAction, 0) + if batch.Status == scheduler.BatchJobSucceeded { + batchActions = append(batchActions, &github.CheckRunAction{ + Label: "Apply all", // max 20 chars + Description: "Apply all jobs", // max 40 chars + Identifier: fmt.Sprintf("%v:%v", CheckedRunActionBatchApply, batch.DiggerBatchID), // max 20 chars + }) + } + return batchActions +} + +func GetActionsForJob(job *models.DiggerJob) []*github.CheckRunAction { + batchActions := make([]*github.CheckRunAction, 0) + if job.Status == scheduler.DiggerJobSucceeded { + batch := job.Batch + batchActions = append(batchActions, &github.CheckRunAction{ + Label: "Apply all", // max 20 chars + Description: "Apply all jobs", // max 40 chars + Identifier: fmt.Sprintf("%v:%v", CheckedRunActionBatchApply, batch.DiggerBatchID), // max 20 chars + }) + // TODO: in the future when we support "apply single job we can add this + //batchActions = append(batchActions, &github.CheckRunAction{ + // Label: "Apply job", // max 20 chars + // Description: "Apply this job", // max 40 chars + // Identifier: fmt.Sprintf("%v:%v", CheckedRunActionJobApply, job.DiggerJobID), // max 20 chars + //}) + } + return batchActions +} + func GetGithubHostname() string { githubHostname := os.Getenv("DIGGER_GITHUB_HOSTNAME") if githubHostname == "" { diff --git a/backend/utils/github_types.go b/backend/utils/github_types.go new file mode 100644 index 000000000..93f3147eb --- /dev/null +++ b/backend/utils/github_types.go @@ -0,0 +1,6 @@ +package utils + +type CheckRunData struct { + Id string + Url string +} \ No newline at end of file diff --git a/backend/utils/graphs.go b/backend/utils/graphs.go index c09bb2833..69d043de6 100644 --- a/backend/utils/graphs.go +++ b/backend/utils/graphs.go @@ -15,7 +15,7 @@ import ( ) // ConvertJobsToDiggerJobs jobs is map with project name as a key and a Job as a value -func ConvertJobsToDiggerJobs(jobType scheduler.DiggerCommand, vcsType models.DiggerVCSType, organisationId uint, jobsMap map[string]scheduler.Job, projectMap map[string]configuration.Project, projectsGraph graph.Graph[string, configuration.Project], githubInstallationId int64, branch string, prNumber int, repoOwner string, repoName string, repoFullName string, commitSha string, commentId int64, diggerConfigStr string, gitlabProjectId int, aiSummaryCommentId string, reportTerraformOutput bool, coverAllImpactedProjects bool, VCSConnectionId *uint) (*uuid.UUID, map[string]*models.DiggerJob, error) { +func ConvertJobsToDiggerJobs(jobType scheduler.DiggerCommand, jobReporterType string, vcsType models.DiggerVCSType, organisationId uint, jobsMap map[string]scheduler.Job, projectMap map[string]configuration.Project, projectsGraph graph.Graph[string, configuration.Project], githubInstallationId int64, branch string, prNumber int, repoOwner string, repoName string, repoFullName string, commitSha string, commentId *int64, diggerConfigStr string, gitlabProjectId int, aiSummaryCommentId string, reportTerraformOutput bool, coverAllImpactedProjects bool, VCSConnectionId *uint, batchCheckRunData *CheckRunData, jobsCheckRunIdsMap map[string]CheckRunData) (*uuid.UUID, map[string]*models.DiggerJob, error) { slog.Info("Converting jobs to Digger jobs", "jobType", jobType, "vcsType", vcsType, @@ -72,7 +72,13 @@ func ConvertJobsToDiggerJobs(jobType scheduler.DiggerCommand, vcsType models.Dig ) } - batch, err := models.DB.CreateDiggerBatch(vcsType, githubInstallationId, repoOwner, repoName, repoFullName, prNumber, diggerConfigStr, branch, jobType, &commentId, gitlabProjectId, aiSummaryCommentId, reportTerraformOutput, coverAllImpactedProjects, VCSConnectionId, commitSha) + var batchCheckRunId *string = nil + var batchCheckRunUrl *string = nil + if batchCheckRunData != nil { + batchCheckRunId = &batchCheckRunData.Id + batchCheckRunUrl = &batchCheckRunData.Url + } + batch, err := models.DB.CreateDiggerBatch(vcsType, githubInstallationId, repoOwner, repoName, repoFullName, prNumber, diggerConfigStr, branch, jobType, commentId, gitlabProjectId, aiSummaryCommentId, reportTerraformOutput, coverAllImpactedProjects, VCSConnectionId, commitSha, batchCheckRunId, batchCheckRunUrl) if err != nil { slog.Error("Failed to create batch", "error", err) return nil, nil, fmt.Errorf("failed to create batch: %v", err) @@ -93,10 +99,17 @@ func ConvertJobsToDiggerJobs(jobType scheduler.DiggerCommand, vcsType models.Dig } visit := func(value string) bool { + var jobCheckRunId *string = nil + var jobCheckRunUrl *string = nil + if jobsCheckRunIdsMap != nil { + if v, ok := jobsCheckRunIdsMap[value]; ok { + jobCheckRunId = &v.Id + jobCheckRunUrl = &v.Url + } + } if predecessorMap[value] == nil || len(predecessorMap[value]) == 0 { slog.Debug("Processing node with no parents", "projectName", value) - - parentJob, err := models.DB.CreateDiggerJob(batch.ID, marshalledJobsMap[value], projectMap[value].WorkflowFile) + parentJob, err := models.DB.CreateDiggerJob(batch.ID, marshalledJobsMap[value], projectMap[value].WorkflowFile, jobCheckRunId, jobCheckRunUrl, jobReporterType, value) if err != nil { slog.Error("Failed to create job", "projectName", value, @@ -133,7 +146,7 @@ func ConvertJobsToDiggerJobs(jobType scheduler.DiggerCommand, vcsType models.Dig parent := edge.Source parentDiggerJob := result[parent] - childJob, err := models.DB.CreateDiggerJob(batch.ID, marshalledJobsMap[value], projectMap[value].WorkflowFile) + childJob, err := models.DB.CreateDiggerJob(batch.ID, marshalledJobsMap[value], projectMap[value].WorkflowFile, jobCheckRunId, jobCheckRunUrl, jobReporterType, value) if err != nil { slog.Error("Failed to create child job", "projectName", value, diff --git a/backend/utils/pr_comment.go b/backend/utils/pr_comment.go index 40400f3b0..ad25a1a54 100644 --- a/backend/utils/pr_comment.go +++ b/backend/utils/pr_comment.go @@ -169,17 +169,7 @@ func ReportInitialJobsStatus(cr *CommentReporter, jobs []scheduler.Job) error { "jobCount", len(jobs), ) - message := "" - if len(jobs) == 0 { - message = message + ":construction_worker: No projects impacted" - } else { - message = message + fmt.Sprintf("| Project | Status |\n") - message = message + fmt.Sprintf("|---------|--------|\n") - for _, job := range jobs { - message = message + fmt.Sprintf(""+ - "|:clock11: **%v**|pending...|\n", job.GetProjectAlias()) - } - } + message := GetInitialJobSummary(jobs) message = trimMessageIfExceedsMaxLength(message) err := prService.EditComment(prNumber, commentId, message) @@ -196,6 +186,21 @@ func ReportInitialJobsStatus(cr *CommentReporter, jobs []scheduler.Job) error { return nil } +func GetInitialJobSummary(jobs []scheduler.Job) string { + message := "" + if len(jobs) == 0 { + message = message + ":construction_worker: No projects impacted" + } else { + message = message + fmt.Sprintf("| Project | Status |\n") + message = message + fmt.Sprintf("|---------|--------|\n") + for _, job := range jobs { + message = message + fmt.Sprintf(""+ + "|:clock11: **%v**|pending...|\n", job.GetProjectAlias()) + } + } + return message +} + func ReportLayersTableForJobs(cr *CommentReporter, jobs []scheduler.Job) error { prNumber := cr.PrNumber prService := cr.PrService diff --git a/drift/controllers/drift.go b/drift/controllers/drift.go index a4d8de4e3..5cfaa3e68 100644 --- a/drift/controllers/drift.go +++ b/drift/controllers/drift.go @@ -166,7 +166,7 @@ func (mc MainController) TriggerDriftRunForProject(c *gin.Context) { } - batch, err := models.DB.CreateDiggerBatch(models.DiggerVCSGithub, installationid, repoOwner, repoName, repoFullName, 0, "", branch, scheduler.DiggerCommandPlan, nil, 0, "", true, false, nil, "") + batch, err := models.DB.CreateDiggerBatch(models.DiggerVCSGithub, installationid, repoOwner, repoName, repoFullName, 0, "", branch, scheduler.DiggerCommandPlan, nil, 0, "", true, false, nil, "", nil, nil) if err != nil { log.Printf("error creating the batch: %v", err) c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("Error creating batch entry")}) diff --git a/ee/backend/controllers/bitbucket.go b/ee/backend/controllers/bitbucket.go index 1183b2569..00528b669 100644 --- a/ee/backend/controllers/bitbucket.go +++ b/ee/backend/controllers/bitbucket.go @@ -278,11 +278,11 @@ func handleIssueCommentEventBB(bitbucketProvider utils.BitbucketProvider, payloa if len(jobs) == 0 { log.Printf("no projects impacated, succeeding") // This one is for aggregate reporting - err = utils.SetPRStatusForJobs(bbService, issueNumber, jobs) + err = utils.SetPRCommitStatusForJobs(bbService, issueNumber, jobs) return nil } - err = utils.SetPRStatusForJobs(bbService, issueNumber, jobs) + err = utils.SetPRCommitStatusForJobs(bbService, issueNumber, jobs) if err != nil { log.Printf("error setting status for PR: %v", err) utils.InitCommentReporter(bbService, issueNumber, fmt.Sprintf(":x: error setting status for PR: %v", err)) @@ -305,7 +305,7 @@ func handleIssueCommentEventBB(bitbucketProvider utils.BitbucketProvider, payloa return fmt.Errorf("parseint error: %v", err) } - batchId, _, err := utils.ConvertJobsToDiggerJobs(*diggerCommand, models.DiggerVCSBitbucket, organisationId, impactedProjectsJobMap, impactedProjectsMap, projectsGraph, 0, branch, issueNumber, repoOwner, repoName, repoFullName, commitSha, commentId64, diggerYmlStr, 0, "", false, true, vcsConnectionId) + batchId, _, err := utils.ConvertJobsToDiggerJobs(*diggerCommand, "lazy", models.DiggerVCSBitbucket, organisationId, impactedProjectsJobMap, impactedProjectsMap, projectsGraph, 0, branch, issueNumber, repoOwner, repoName, repoFullName, commitSha, &commentId64, diggerYmlStr, 0, "", false, true, vcsConnectionId, nil, nil) if err != nil { log.Printf("ConvertJobsToDiggerJobs error: %v", err) utils.InitCommentReporter(bbService, issueNumber, fmt.Sprintf(":x: ConvertJobsToDiggerJobs error: %v", err)) diff --git a/ee/backend/controllers/gitlab.go b/ee/backend/controllers/gitlab.go index 5027ec036..c4ee20151 100644 --- a/ee/backend/controllers/gitlab.go +++ b/ee/backend/controllers/gitlab.go @@ -203,7 +203,7 @@ func handlePullRequestEvent(gitlabProvider utils.GitlabProvider, payload *gitlab // TODO use status checks instead: https://github.com/diggerhq/digger/issues/1135 log.Printf("No projects impacted; not starting any jobs") // This one is for aggregate reporting - err = utils.SetPRStatusForJobs(glService, prNumber, jobsForImpactedProjects) + err = utils.SetPRCommitStatusForJobs(glService, prNumber, jobsForImpactedProjects) return nil } @@ -265,7 +265,7 @@ func handlePullRequestEvent(gitlabProvider utils.GitlabProvider, payload *gitlab return fmt.Errorf("failed to comment initial status for jobs") } - err = utils.SetPRStatusForJobs(glService, prNumber, jobsForImpactedProjects) + err = utils.SetPRCommitStatusForJobs(glService, prNumber, jobsForImpactedProjects) if err != nil { log.Printf("error setting status for PR: %v", err) utils.InitCommentReporter(glService, prNumber, fmt.Sprintf(":x: error setting status for PR: %v", err)) @@ -288,7 +288,7 @@ func handlePullRequestEvent(gitlabProvider utils.GitlabProvider, payload *gitlab utils.InitCommentReporter(glService, prNumber, fmt.Sprintf(":x: could not handle commentId: %v", err)) } - batchId, _, err := utils.ConvertJobsToDiggerJobs(*diggerCommand, models.DiggerVCSGitlab, organisationId, impactedJobsMap, impactedProjectsMap, projectsGraph, 0, branch, prNumber, repoOwner, repoName, repoFullName, commitSha, commentId, diggeryamlStr, projectId, "", false, coverAllImpactedProjects, nil) + batchId, _, err := utils.ConvertJobsToDiggerJobs(*diggerCommand, "lazy", models.DiggerVCSGitlab, organisationId, impactedJobsMap, impactedProjectsMap, projectsGraph, 0, branch, prNumber, repoOwner, repoName, repoFullName, commitSha, &commentId, diggeryamlStr, projectId, "", false, coverAllImpactedProjects, nil, nil, nil) if err != nil { log.Printf("ConvertJobsToDiggerJobs error: %v", err) utils.InitCommentReporter(glService, prNumber, fmt.Sprintf(":x: ConvertJobsToDiggerJobs error: %v", err)) @@ -461,11 +461,11 @@ func handleIssueCommentEvent(gitlabProvider utils.GitlabProvider, payload *gitla if len(jobs) == 0 { log.Printf("no projects impacated, succeeding") // This one is for aggregate reporting - err = utils.SetPRStatusForJobs(glService, issueNumber, jobs) + err = utils.SetPRCommitStatusForJobs(glService, issueNumber, jobs) return nil } - err = utils.SetPRStatusForJobs(glService, issueNumber, jobs) + err = utils.SetPRCommitStatusForJobs(glService, issueNumber, jobs) if err != nil { log.Printf("error setting status for PR: %v", err) utils.InitCommentReporter(glService, issueNumber, fmt.Sprintf(":x: error setting status for PR: %v", err)) @@ -488,7 +488,7 @@ func handleIssueCommentEvent(gitlabProvider utils.GitlabProvider, payload *gitla return fmt.Errorf("parseint error: %v", err) } - batchId, _, err := utils.ConvertJobsToDiggerJobs(*diggerCommand, models.DiggerVCSGitlab, organisationId, impactedProjectsJobMap, impactedProjectsMap, projectsGraph, 0, branch, issueNumber, repoOwner, repoName, repoFullName, commitSha, commentId64, diggerYmlStr, projectId, "", false, coverAllImpactedProjects, nil) + batchId, _, err := utils.ConvertJobsToDiggerJobs(*diggerCommand, "lazy", models.DiggerVCSGitlab, organisationId, impactedProjectsJobMap, impactedProjectsMap, projectsGraph, 0, branch, issueNumber, repoOwner, repoName, repoFullName, commitSha, &commentId64, diggerYmlStr, projectId, "", false, coverAllImpactedProjects, nil, nil, nil) if err != nil { log.Printf("ConvertJobsToDiggerJobs error: %v", err) utils.InitCommentReporter(glService, issueNumber, fmt.Sprintf(":x: ConvertJobsToDiggerJobs error: %v", err)) diff --git a/ee/backend/hooks/github.go b/ee/backend/hooks/github.go index bbbbb1b5e..607744d1e 100644 --- a/ee/backend/hooks/github.go +++ b/ee/backend/hooks/github.go @@ -71,7 +71,7 @@ var DriftReconcilliationHook ce_controllers.IssueCommentHook = func(gh utils.Git return nil } - diggerYmlStr, ghService, config, projectsGraph, err := ce_controllers.GetDiggerConfigForBranch(gh, installationId, repoFullName, repoOwner, repoName, cloneURL, defaultBranch, nil, nil) + diggerYmlStr, ghService, config, projectsGraph, err := ce_controllers.GetDiggerConfigForBranchOrSha(gh, installationId, repoFullName, repoOwner, repoName, cloneURL, defaultBranch, "", nil, nil) if err != nil { log.Printf("Error loading digger.yml: %v", err) return fmt.Errorf("error loading digger.yml") @@ -152,7 +152,7 @@ var DriftReconcilliationHook ce_controllers.IssueCommentHook = func(gh utils.Git utils.InitCommentReporter(ghService, issueNumber, fmt.Sprintf(":x: could not handle commentId: %v", err)) } - batchId, _, err := utils.ConvertJobsToDiggerJobs(*diggerCommand, "github", orgId, impactedProjectsJobMap, impactedProjectsMap, projectsGraph, installationId, defaultBranch, issueNumber, repoOwner, repoName, repoFullName, "", reporterCommentId, diggerYmlStr, 0, "", false, coverAllImpactedProjects, nil) + batchId, _, err := utils.ConvertJobsToDiggerJobs(*diggerCommand, "lazy", "github", orgId, impactedProjectsJobMap, impactedProjectsMap, projectsGraph, installationId, defaultBranch, issueNumber, repoOwner, repoName, repoFullName, "", &reporterCommentId, diggerYmlStr, 0, "", false, coverAllImpactedProjects, nil, nil, nil) if err != nil { log.Printf("ConvertJobsToDiggerJobs error: %v", err) utils.InitCommentReporter(ghService, issueNumber, fmt.Sprintf(":x: ConvertJobsToDiggerJobs error: %v", err)) diff --git a/libs/ci/github/github.go b/libs/ci/github/github.go index 6c5e59028..f86583356 100644 --- a/libs/ci/github/github.go +++ b/libs/ci/github/github.go @@ -308,6 +308,151 @@ func (svc GithubService) SetStatus(prNumber int, status string, statusContext st return err } +// modern check runs for github (not the commit status) +func (svc GithubService) CreateCheckRun(name string, status string, conclusion string, title string, summary string, text string, headSHA string, actions []*github.CheckRunAction) (*github.CheckRun, error) { + client := svc.Client + owner := svc.Owner + repoName := svc.RepoName + opts := github.CreateCheckRunOptions{ + Name: name, + HeadSHA: headSHA, // commit SHA to attach the check to + Status: github.String(status), // or "queued" / "in_progress" + Output: &github.CheckRunOutput{ + Title: github.String(title), + Summary: github.String(summary), + Text: github.String(text), + }, + } + + if conclusion != "" { + opts.Conclusion = github.String(conclusion) + } + + if actions != nil { + opts.Actions = actions + } + + ctx := context.Background() + checkRun, _, err := client.Checks.CreateCheckRun(ctx, owner, repoName, opts) + return checkRun, err +} + +type GithubCheckRunUpdateOptions struct { + Status *string + Conclusion *string + Title *string + Summary *string + Text *string + Actions []*github.CheckRunAction +} + +func (svc GithubService) UpdateCheckRun(checkRunId string, options GithubCheckRunUpdateOptions) (*github.CheckRun, error) { + status := options.Status + conclusion := options.Conclusion + title := options.Title + summary := options.Summary + text := options.Text + actions := options.Actions + + slog.Debug("Updating check run", + "checkRunId", checkRunId, + "status", status, + "conclusion", conclusion, + "title", title, + "summary", summary, + "text", text, + "actions", actions, + ) + client := svc.Client + owner := svc.Owner + repoName := svc.RepoName + + checkRunIdInt64, err := strconv.ParseInt(checkRunId, 10, 64) + if err != nil { + return nil, fmt.Errorf("could not convert id %v to i64: %v", checkRunId, err) + } + + ctx := context.Background() + + // Fetch existing check run to preserve annotations and other output data + existingCheckRun, _, err := client.Checks.GetCheckRun(ctx, owner, repoName, checkRunIdInt64) + if err != nil { + slog.Warn("Failed to fetch existing check run, proceeding with update anyway", + "checkRunId", checkRunId, + "error", err, + ) + return nil, fmt.Errorf("could not fetch existing check run: %v", err) + } + + // Merge existing output with new output, preserving annotations and images + output := &github.CheckRunOutput{} + if existingCheckRun.Output != nil { + // Preserve existing annotations if they exist + if existingCheckRun.Output.Annotations != nil && len(existingCheckRun.Output.Annotations) > 0 { + output.Annotations = existingCheckRun.Output.Annotations + } + // Preserve existing images if they exist + if existingCheckRun.Output.Images != nil && len(existingCheckRun.Output.Images) > 0 { + output.Images = existingCheckRun.Output.Images + } + } + + newActions := []*github.CheckRunAction{} + if actions != nil { + newActions = actions + } + + // Update with new values (only update if provided and non-empty) + if title != nil { + output.Title = github.String(*title) + } else if existingCheckRun.Output != nil && existingCheckRun.Output.Title != nil { + output.Title = existingCheckRun.Output.Title + } + + if summary != nil { + output.Summary = github.String(*summary) + } else if existingCheckRun.Output != nil && existingCheckRun.Output.Summary != nil { + output.Summary = existingCheckRun.Output.Summary + } + + if text != nil { + output.Text = github.String(*text) + } else if existingCheckRun.Output != nil && existingCheckRun.Output.Text != nil { + output.Text = existingCheckRun.Output.Text + } + + var newStatus *string = nil + if status != nil { + newStatus = status + } else { + newStatus = existingCheckRun.Status + } + + opts := github.UpdateCheckRunOptions{ + Name: *existingCheckRun.Name, + Output: output, + Actions: newActions, + } + + if newStatus != nil { + opts.Status = github.String(*newStatus) + } + + if conclusion != nil { + opts.Conclusion = github.String(*conclusion) + } + + checkRun, _, err := client.Checks.UpdateCheckRun(ctx, owner, repoName, checkRunIdInt64, opts) + if err != nil { + slog.Error("Failed to update check run", + "inputCheckRunId", checkRunId, + "error", err) + return checkRun, err + } + + return checkRun, err +} + func (svc GithubService) GetCombinedPullRequestStatus(prNumber int) (string, error) { pr, _, err := svc.Client.PullRequests.Get(context.Background(), svc.Owner, svc.RepoName, prNumber) if err != nil { @@ -859,4 +1004,4 @@ func CheckIfShowProjectsComment(event interface{}) bool { slog.Debug("show-projects comment detected") } return result -} \ No newline at end of file +} diff --git a/libs/digger_config/config.go b/libs/digger_config/config.go index 9e33ea010..7f06c2f7d 100644 --- a/libs/digger_config/config.go +++ b/libs/digger_config/config.go @@ -34,6 +34,7 @@ type DiggerConfig struct { type ReporterConfig struct { AiSummary bool + CommentsEnabled bool } type DependencyConfiguration struct { diff --git a/libs/digger_config/converters.go b/libs/digger_config/converters.go index 6dffdc694..20a749190 100644 --- a/libs/digger_config/converters.go +++ b/libs/digger_config/converters.go @@ -194,11 +194,13 @@ func copyReporterConfig(r *ReportingConfigYaml) ReporterConfig { if r == nil { return ReporterConfig{ AiSummary: false, + CommentsEnabled: true, } } return ReporterConfig{ AiSummary: r.AiSummary, + CommentsEnabled: r.CommentsEnabled, } } @@ -231,7 +233,7 @@ func ConvertDiggerYamlToConfig(diggerYaml *DiggerConfigYaml) (*DiggerConfig, gra if diggerYaml.ReportTerraformOutputs != nil { diggerConfig.ReportTerraformOutputs = *diggerYaml.ReportTerraformOutputs } else { - diggerConfig.ReportTerraformOutputs = false + diggerConfig.ReportTerraformOutputs = true } diggerConfig.Reporting = copyReporterConfig(diggerYaml.Reporting) diff --git a/libs/digger_config/yaml.go b/libs/digger_config/yaml.go index 914dd2478..83bfcf98c 100644 --- a/libs/digger_config/yaml.go +++ b/libs/digger_config/yaml.go @@ -31,6 +31,7 @@ type DiggerConfigYaml struct { type ReportingConfigYaml struct { AiSummary bool `yaml:"ai_summary"` + CommentsEnabled bool `yaml:"comments_enabled"` } type DependencyConfigurationYaml struct { @@ -191,6 +192,16 @@ type TerragruntParsingConfig struct { DependsOnOrdering *bool `yaml:"dependsOnOrdering,omitempty"` } +func (c *ReportingConfigYaml) UnmarshalYAML(unmarshal func(any) error) error { + // set defaults + c.AiSummary = false + c.CommentsEnabled = true + + // overlay YAML values + type plain ReportingConfigYaml + return unmarshal((*plain)(c)) +} + func (p *ProjectYaml) UnmarshalYAML(unmarshal func(interface{}) error) error { type rawProject ProjectYaml raw := rawProject{ diff --git a/libs/scheduler/models.go b/libs/scheduler/models.go index da6f85048..041bb1890 100644 --- a/libs/scheduler/models.go +++ b/libs/scheduler/models.go @@ -145,7 +145,7 @@ func (b *SerializedBatch) IsApply() (bool, error) { return IsPlanJobSpecs(jobSpecs), nil } -func (b *SerializedBatch) ToStatusCheck() string { +func (b *SerializedBatch) ToCommitStatusCheck() string { switch b.Status { case BatchJobCreated: return "pending" @@ -160,6 +160,41 @@ func (b *SerializedBatch) ToStatusCheck() string { } } + +func (b *SerializedBatch) ToCheckRunStatus() string { + switch b.Status { + case BatchJobCreated: + return "in_progress" + case BatchJobInvalidated: + return "completed" + case BatchJobFailed: + return "completed" + case BatchJobSucceeded: + return "completed" + default: + return "in_progress" + } +} + +func (b *SerializedBatch) ToCheckRunConclusion() *string { + switch b.Status { + case BatchJobCreated: + return nil + case BatchJobInvalidated: + res := "cancelled" + return &res + case BatchJobFailed: + res := "failure" + return &res + case BatchJobSucceeded: + res := "success" + return &res + default: + return nil + } +} + + func (s *SerializedJob) ResourcesSummaryString(isPlan bool) string { if !isPlan { return ""