diff --git a/backend/controllers/github.go b/backend/controllers/github.go index 39dae973..e7e3e508 100644 --- a/backend/controllers/github.go +++ b/backend/controllers/github.go @@ -606,7 +606,7 @@ func handlePullRequestEvent(gh utils.GithubClientProvider, payload *github.PullR return nil } -func getDiggerConfigForBranch(gh utils.GithubClientProvider, installationId int64, repoFullName string, repoOwner string, repoName string, cloneUrl string, branch string) (string, *dg_github.GithubService, *dg_configuration.DiggerConfig, graph.Graph[string, dg_configuration.Project], error) { +func getDiggerConfigForBranch(gh utils.GithubClientProvider, installationId int64, repoFullName string, repoOwner string, repoName string, cloneUrl string, branch string, prNumber int) (string, *dg_github.GithubService, *dg_configuration.DiggerConfig, graph.Graph[string, dg_configuration.Project], error) { ghService, token, err := utils.GetGithubService(gh, installationId, repoFullName, repoOwner, repoName) if err != nil { log.Printf("Error getting github service: %v", err) @@ -616,10 +616,16 @@ func getDiggerConfigForBranch(gh utils.GithubClientProvider, installationId int6 var config *dg_configuration.DiggerConfig var diggerYmlStr string var dependencyGraph graph.Graph[string, dg_configuration.Project] + + changedFiles, err := ghService.GetChangedFiles(prNumber) + if err != nil { + log.Printf("Error getting changed files: %v", err) + return "", nil, nil, nil, fmt.Errorf("error getting changed files") + } err = utils.CloneGitRepoAndDoAction(cloneUrl, branch, *token, func(dir string) error { diggerYmlBytes, err := os.ReadFile(path.Join(dir, "digger.yml")) diggerYmlStr = string(diggerYmlBytes) - config, _, dependencyGraph, err = dg_configuration.LoadDiggerConfig(dir, true) + config, _, dependencyGraph, err = dg_configuration.LoadDiggerConfig(dir, true, changedFiles) if err != nil { log.Printf("Error loading digger config: %v", err) return err @@ -627,8 +633,8 @@ func getDiggerConfigForBranch(gh utils.GithubClientProvider, installationId int6 return nil }) if err != nil { - log.Printf("Error generating projects: %v", err) - return "", nil, nil, nil, fmt.Errorf("error generating projects") + log.Printf("Error cloning and loading config: %v", err) + return "", nil, nil, nil, fmt.Errorf("error cloning and loading config") } log.Printf("Digger config loadded successfully\n") @@ -649,7 +655,7 @@ func getDiggerConfigForPR(gh utils.GithubClientProvider, installationId int64, r return "", nil, nil, nil, nil, nil, fmt.Errorf("error getting branch name") } - diggerYmlStr, ghService, config, dependencyGraph, err := getDiggerConfigForBranch(gh, installationId, repoFullName, repoOwner, repoName, cloneUrl, prBranch) + diggerYmlStr, ghService, config, dependencyGraph, err := getDiggerConfigForBranch(gh, installationId, repoFullName, repoOwner, repoName, cloneUrl, prBranch, prNumber) if err != nil { log.Printf("Error loading digger.yml: %v", err) return "", nil, nil, nil, nil, nil, fmt.Errorf("error loading digger.yml") diff --git a/backend/controllers/github_after_merge.go b/backend/controllers/github_after_merge.go index 5d479f6b..878dc4e1 100644 --- a/backend/controllers/github_after_merge.go +++ b/backend/controllers/github_after_merge.go @@ -158,7 +158,7 @@ func handlePushEventApplyAfterMerge(gh utils.GithubClientProvider, payload *gith // ==== starting apply after merge part ======= // TODO: Replace branch with actual commitID - diggerYmlStr, ghService, config, projectsGraph, err := getDiggerConfigForBranch(gh, installationId, repoFullName, repoOwner, repoName, cloneURL, defaultBranch) + diggerYmlStr, ghService, config, projectsGraph, err := getDiggerConfigForBranch(gh, installationId, repoFullName, repoOwner, repoName, cloneURL, defaultBranch, 0) if err != nil { log.Printf("getDiggerConfigForPR error: %v", err) return fmt.Errorf("error getting digger config") diff --git a/cli/cmd/digger/default.go b/cli/cmd/digger/default.go index 03c6c920..2ee3c787 100644 --- a/cli/cmd/digger/default.go +++ b/cli/cmd/digger/default.go @@ -2,7 +2,6 @@ package main import ( "fmt" - "github.com/diggerhq/digger/cli/pkg/azure" "github.com/diggerhq/digger/cli/pkg/digger" "github.com/diggerhq/digger/cli/pkg/drift" "github.com/diggerhq/digger/cli/pkg/github" @@ -24,21 +23,6 @@ var defaultCmd = &cobra.Command{ case digger.GitHub: logLeader = os.Getenv("GITHUB_ACTOR") github.GitHubCI(lock, PolicyChecker, BackendApi, ReportStrategy, comment_updater.CommentUpdaterProviderBasic{}, drift.DriftNotificationProviderBasic{}) - case digger.GitLab: - logLeader = os.Getenv("CI_PROJECT_NAME") - gitLabCI(lock, PolicyChecker, BackendApi, ReportStrategy) - case digger.Azure: - // This should be refactored in the future because in this way the parsing - // is done twice, both here and inside azureCI, a better solution might be - // to encapsulate it into a method on the azure package and then grab the - // value here and pass it into the azureCI call. - azureContext := os.Getenv("AZURE_CONTEXT") - parsedAzureContext, _ := azure.GetAzureReposContext(azureContext) - logLeader = parsedAzureContext.BaseUrl - azureCI(lock, PolicyChecker, BackendApi, ReportStrategy) - case digger.BitBucket: - logLeader = os.Getenv("BITBUCKET_STEP_TRIGGERER_UUID") - bitbucketCI(lock, PolicyChecker, BackendApi, ReportStrategy) case digger.None: print("No CI detected.") os.Exit(10) diff --git a/cli/cmd/digger/main.go b/cli/cmd/digger/main.go index 6d775cac..be519a4d 100644 --- a/cli/cmd/digger/main.go +++ b/cli/cmd/digger/main.go @@ -2,464 +2,20 @@ package main import ( "fmt" - "github.com/diggerhq/digger/libs/comment_utils/reporting" - "github.com/diggerhq/digger/libs/comment_utils/summary" - core_locking "github.com/diggerhq/digger/libs/locking" - "log" - "net/http" - "os" - "strconv" - "strings" - - "github.com/diggerhq/digger/cli/pkg/azure" - "github.com/diggerhq/digger/cli/pkg/bitbucket" core_backend "github.com/diggerhq/digger/cli/pkg/core/backend" core_policy "github.com/diggerhq/digger/cli/pkg/core/policy" - core_storage "github.com/diggerhq/digger/cli/pkg/core/storage" "github.com/diggerhq/digger/cli/pkg/digger" - "github.com/diggerhq/digger/cli/pkg/gitlab" "github.com/diggerhq/digger/cli/pkg/storage" "github.com/diggerhq/digger/cli/pkg/usage" + "github.com/diggerhq/digger/libs/comment_utils/reporting" + "github.com/diggerhq/digger/libs/comment_utils/summary" "github.com/diggerhq/digger/libs/digger_config" + core_locking "github.com/diggerhq/digger/libs/locking" orchestrator "github.com/diggerhq/digger/libs/orchestrator" - "gopkg.in/yaml.v3" + "log" + "os" ) -func gitLabCI(lock core_locking.Lock, policyChecker core_policy.Checker, backendApi core_backend.Api, reportingStrategy reporting.ReportStrategy) { - log.Println("Using GitLab.") - - projectNamespace := os.Getenv("CI_PROJECT_NAMESPACE") - projectName := os.Getenv("CI_PROJECT_NAME") - gitlabToken := os.Getenv("GITLAB_TOKEN") - if gitlabToken == "" { - log.Println("GITLAB_TOKEN is empty") - } - - currentDir, err := os.Getwd() - if err != nil { - usage.ReportErrorAndExit(projectNamespace, fmt.Sprintf("Failed to get current dir. %s", err), 4) - } - log.Printf("main: working dir: %s \n", currentDir) - - diggerConfig, diggerConfigYaml, dependencyGraph, err := digger_config.LoadDiggerConfig(currentDir, true) - if err != nil { - usage.ReportErrorAndExit(projectNamespace, fmt.Sprintf("Failed to read Digger digger_config. %s", err), 4) - } - log.Println("Digger digger_config read successfully") - - gitLabContext, err := gitlab.ParseGitLabContext() - if err != nil { - log.Printf("failed to parse GitLab context. %s\n", err.Error()) - os.Exit(4) - } - - yamlData, err := yaml.Marshal(diggerConfigYaml) - if err != nil { - log.Fatalf("error: %v", err) - } - - // Convert to string - yamlStr := string(yamlData) - repo := strings.ReplaceAll(gitLabContext.ProjectNamespace, "/", "-") - - for _, p := range diggerConfig.Projects { - err = backendApi.ReportProject(repo, p.Name, yamlStr) - if err != nil { - log.Printf("Failed to report project %s. %s\n", p.Name, err) - } - } - - // it's ok to not have merge request info if it has been merged - if (gitLabContext.MergeRequestIId == nil || len(gitLabContext.OpenMergeRequests) == 0) && gitLabContext.EventType != "merge_request_merge" { - log.Println("No merge request found.") - os.Exit(0) - } - - gitlabService, err := gitlab.NewGitLabService(gitlabToken, gitLabContext) - if err != nil { - log.Printf("failed to initialise GitLab service, %v", err) - os.Exit(4) - } - - gitlabEvent := gitlab.GitLabEvent{EventType: gitLabContext.EventType} - - impactedProjects, requestedProject, err := gitlab.ProcessGitLabEvent(gitLabContext, diggerConfig, gitlabService) - if err != nil { - log.Printf("failed to process GitLab event, %v", err) - os.Exit(6) - } - log.Println("GitLab event processed successfully") - - jobs, coversAllImpactedProjects, err := gitlab.ConvertGitLabEventToCommands(gitlabEvent, gitLabContext, impactedProjects, requestedProject, diggerConfig.Workflows) - if err != nil { - log.Printf("failed to convert event to command, %v", err) - os.Exit(7) - } - log.Println("GitLab event converted to commands successfully") - - log.Println("Digger commands to be executed:") - for _, v := range jobs { - log.Printf("command: %s, project: %s\n", strings.Join(v.Commands, ", "), v.ProjectName) - } - - planStorage := storage.NewPlanStorage("", "", "", gitLabContext.GitlabUserName, gitLabContext.MergeRequestIId) - reporter := &reporting.CiReporter{ - CiService: gitlabService, - PrNumber: *gitLabContext.MergeRequestIId, - ReportStrategy: reportingStrategy, - } - jobs = digger.SortedCommandsByDependency(jobs, &dependencyGraph) - allAppliesSuccess, atLeastOneApply, err := digger.RunJobs(jobs, gitlabService, gitlabService, lock, reporter, planStorage, policyChecker, comment_updater.NoopCommentUpdater{}, backendApi, "", false, false, 0, currentDir) - - if err != nil { - log.Printf("failed to execute command, %v", err) - os.Exit(8) - } - - if diggerConfig.AutoMerge && atLeastOneApply && allAppliesSuccess && coversAllImpactedProjects { - digger.MergePullRequest(gitlabService, *gitLabContext.MergeRequestIId) - log.Println("Merge request changes has been applied successfully") - } - - log.Println("Commands executed successfully") - - usage.ReportErrorAndExit(projectName, "Digger finished successfully", 0) -} - -func azureCI(lock core_locking.Lock, policyChecker core_policy.Checker, backendApi core_backend.Api, reportingStrategy reporting.ReportStrategy) { - log.Println("> Azure CI detected") - azureContext := os.Getenv("AZURE_CONTEXT") - azureToken := os.Getenv("AZURE_TOKEN") - if azureToken == "" { - log.Println("AZURE_TOKEN is empty") - } - parsedAzureContext, err := azure.GetAzureReposContext(azureContext) - if err != nil { - log.Printf("failed to parse Azure context. %s\n", err.Error()) - os.Exit(4) - } - - currentDir, err := os.Getwd() - if err != nil { - usage.ReportErrorAndExit(parsedAzureContext.BaseUrl, fmt.Sprintf("Failed to get current dir. %s", err), 4) - } - log.Printf("main: working dir: %s \n", currentDir) - - diggerConfig, diggerConfigYaml, dependencyGraph, err := digger_config.LoadDiggerConfig(currentDir, true) - if err != nil { - usage.ReportErrorAndExit(parsedAzureContext.BaseUrl, fmt.Sprintf("Failed to read Digger digger_config. %s", err), 4) - } - log.Println("Digger digger_config read successfully") - - yamlData, err := yaml.Marshal(diggerConfigYaml) - if err != nil { - log.Fatalf("error: %v", err) - } - - // Convert to string - yamlStr := string(yamlData) - repo := strings.ReplaceAll(parsedAzureContext.BaseUrl, "/", "-") - - for _, p := range diggerConfig.Projects { - err = backendApi.ReportProject(repo, p.Name, yamlStr) - if err != nil { - log.Printf("Failed to report project %s. %s\n", p.Name, err) - } - } - - azureService, err := azure.NewAzureReposService(azureToken, parsedAzureContext.BaseUrl, parsedAzureContext.ProjectName, parsedAzureContext.RepositoryId) - if err != nil { - usage.ReportErrorAndExit(parsedAzureContext.BaseUrl, fmt.Sprintf("Failed to initialise azure service. %s", err), 5) - } - - impactedProjects, requestedProject, prNumber, err := azure.ProcessAzureReposEvent(parsedAzureContext.Event, diggerConfig, azureService) - if err != nil { - usage.ReportErrorAndExit(parsedAzureContext.BaseUrl, fmt.Sprintf("Failed to process Azure event. %s", err), 6) - } - log.Println("Azure event processed successfully") - - jobs, coversAllImpactedProjects, err := azure.ConvertAzureEventToCommands(parsedAzureContext, impactedProjects, requestedProject, diggerConfig.Workflows) - if err != nil { - usage.ReportErrorAndExit(parsedAzureContext.BaseUrl, fmt.Sprintf("Failed to convert event to command. %s", err), 7) - - } - log.Println(fmt.Sprintf("Azure event converted to commands successfully: %v", jobs)) - - for _, v := range jobs { - log.Printf("command: %s, project: %s\n", strings.Join(v.Commands, ", "), v.ProjectName) - } - - var planStorage core_storage.PlanStorage - - reporter := &reporting.CiReporter{ - CiService: azureService, - PrNumber: prNumber, - ReportStrategy: reportingStrategy, - } - jobs = digger.SortedCommandsByDependency(jobs, &dependencyGraph) - allAppliesSuccess, atLeastOneApply, err := digger.RunJobs(jobs, azureService, azureService, lock, reporter, planStorage, policyChecker, comment_updater.NoopCommentUpdater{}, backendApi, "", false, false, 0, currentDir) - if err != nil { - usage.ReportErrorAndExit(parsedAzureContext.BaseUrl, fmt.Sprintf("Failed to run commands. %s", err), 8) - } - - if diggerConfig.AutoMerge && allAppliesSuccess && atLeastOneApply && coversAllImpactedProjects { - digger.MergePullRequest(azureService, prNumber) - log.Println("PR merged successfully") - } - - log.Println("Commands executed successfully") - - usage.ReportErrorAndExit(parsedAzureContext.BaseUrl, "Digger finished successfully", 0) -} - -func bitbucketCI(lock core_locking.Lock, policyChecker core_policy.Checker, backendApi core_backend.Api, reportingStrategy reporting.ReportStrategy) { - log.Printf("Using Bitbucket.\n") - actor := os.Getenv("BITBUCKET_STEP_TRIGGERER_UUID") - if actor != "" { - usage.SendUsageRecord(actor, "log", "initialize") - } else { - usage.SendUsageRecord("", "log", "non github initialisation") - } - - runningMode := os.Getenv("INPUT_DIGGER_MODE") - - repository := os.Getenv("BITBUCKET_REPO_FULL_NAME") - - if repository == "" { - usage.ReportErrorAndExit(actor, "BITBUCKET_REPO_FULL_NAME is not defined", 3) - } - - splitRepositoryName := strings.Split(repository, "/") - repoOwner, repositoryName := splitRepositoryName[0], splitRepositoryName[1] - - currentDir, err := os.Getwd() - if err != nil { - usage.ReportErrorAndExit(actor, fmt.Sprintf("Failed to get current dir. %s", err), 4) - } - - diggerConfig, _, dependencyGraph, err := digger_config.LoadDiggerConfig("./", true) - if err != nil { - usage.ReportErrorAndExit(actor, fmt.Sprintf("Failed to read Digger digger_config. %s", err), 4) - } - log.Printf("Digger digger_config read successfully\n") - - authToken := os.Getenv("BITBUCKET_AUTH_TOKEN") - - if authToken == "" { - usage.ReportErrorAndExit(actor, "BITBUCKET_AUTH_TOKEN is not defined", 3) - } - - bitbucketService := bitbucket.BitbucketAPI{ - AuthToken: authToken, - HttpClient: http.Client{}, - RepoWorkspace: repoOwner, - RepoName: repositoryName, - } - - if runningMode == "manual" { - command := os.Getenv("INPUT_DIGGER_COMMAND") - if command == "" { - usage.ReportErrorAndExit(actor, "provide 'command' to run in 'manual' mode", 1) - } - project := os.Getenv("INPUT_DIGGER_PROJECT") - if project == "" { - usage.ReportErrorAndExit(actor, "provide 'project' to run in 'manual' mode", 2) - } - - var projectConfig digger_config.Project - for _, projectConfig = range diggerConfig.Projects { - if projectConfig.Name == project { - break - } - } - workflow := diggerConfig.Workflows[projectConfig.Workflow] - - stateEnvVars, commandEnvVars := digger_config.CollectTerraformEnvConfig(workflow.EnvVars) - - planStorage := storage.NewPlanStorage("", repoOwner, repositoryName, actor, nil) - - jobs := orchestrator.Job{ - ProjectName: project, - ProjectDir: projectConfig.Dir, - ProjectWorkspace: projectConfig.Workspace, - Terragrunt: projectConfig.Terragrunt, - OpenTofu: projectConfig.OpenTofu, - Commands: []string{command}, - ApplyStage: orchestrator.ToConfigStage(workflow.Apply), - PlanStage: orchestrator.ToConfigStage(workflow.Plan), - PullRequestNumber: nil, - EventName: "manual_invocation", - RequestedBy: actor, - Namespace: repository, - StateEnvVars: stateEnvVars, - CommandEnvVars: commandEnvVars, - } - err := digger.RunJob(jobs, repository, actor, &bitbucketService, policyChecker, planStorage, backendApi, nil, currentDir) - if err != nil { - usage.ReportErrorAndExit(actor, fmt.Sprintf("Failed to run commands. %s", err), 8) - } - } else if runningMode == "drift-detection" { - - for _, projectConfig := range diggerConfig.Projects { - if !projectConfig.DriftDetection { - continue - } - workflow := diggerConfig.Workflows[projectConfig.Workflow] - - stateEnvVars, commandEnvVars := digger_config.CollectTerraformEnvConfig(workflow.EnvVars) - - StateEnvProvider, CommandEnvProvider := orchestrator.GetStateAndCommandProviders(projectConfig) - - job := orchestrator.Job{ - ProjectName: projectConfig.Name, - ProjectDir: projectConfig.Dir, - ProjectWorkspace: projectConfig.Workspace, - Terragrunt: projectConfig.Terragrunt, - OpenTofu: projectConfig.OpenTofu, - Commands: []string{"digger drift-detect"}, - ApplyStage: orchestrator.ToConfigStage(workflow.Apply), - PlanStage: orchestrator.ToConfigStage(workflow.Plan), - CommandEnvVars: commandEnvVars, - StateEnvVars: stateEnvVars, - RequestedBy: actor, - Namespace: repository, - EventName: "drift-detect", - CommandEnvProvider: CommandEnvProvider, - StateEnvProvider: StateEnvProvider, - } - err := digger.RunJob(job, repository, actor, &bitbucketService, policyChecker, nil, backendApi, nil, currentDir) - if err != nil { - usage.ReportErrorAndExit(actor, fmt.Sprintf("Failed to run commands. %s", err), 8) - } - } - } else { - var jobs []orchestrator.Job - if os.Getenv("BITBUCKET_PR_ID") == "" && os.Getenv("BITBUCKET_BRANCH") == os.Getenv("DEFAULT_BRANCH") { - for _, projectConfig := range diggerConfig.Projects { - - workflow := diggerConfig.Workflows[projectConfig.Workflow] - log.Printf("workflow: %v", workflow) - - stateEnvVars, commandEnvVars := digger_config.CollectTerraformEnvConfig(workflow.EnvVars) - - job := orchestrator.Job{ - ProjectName: projectConfig.Name, - ProjectDir: projectConfig.Dir, - ProjectWorkspace: projectConfig.Workspace, - Terragrunt: projectConfig.Terragrunt, - OpenTofu: projectConfig.OpenTofu, - Commands: workflow.Configuration.OnCommitToDefault, - ApplyStage: orchestrator.ToConfigStage(workflow.Apply), - PlanStage: orchestrator.ToConfigStage(workflow.Plan), - CommandEnvVars: commandEnvVars, - StateEnvVars: stateEnvVars, - RequestedBy: actor, - Namespace: repository, - EventName: "commit_to_default", - } - err := digger.RunJob(job, repository, actor, &bitbucketService, policyChecker, nil, backendApi, nil, currentDir) - if err != nil { - usage.ReportErrorAndExit(actor, fmt.Sprintf("Failed to run commands. %s", err), 8) - } - } - } else if os.Getenv("BITBUCKET_PR_ID") == "" { - for _, projectConfig := range diggerConfig.Projects { - - workflow := diggerConfig.Workflows[projectConfig.Workflow] - - stateEnvVars, commandEnvVars := digger_config.CollectTerraformEnvConfig(workflow.EnvVars) - - job := orchestrator.Job{ - ProjectName: projectConfig.Name, - ProjectDir: projectConfig.Dir, - ProjectWorkspace: projectConfig.Workspace, - Terragrunt: projectConfig.Terragrunt, - OpenTofu: projectConfig.OpenTofu, - Commands: []string{"digger plan"}, - ApplyStage: orchestrator.ToConfigStage(workflow.Apply), - PlanStage: orchestrator.ToConfigStage(workflow.Plan), - CommandEnvVars: commandEnvVars, - StateEnvVars: stateEnvVars, - RequestedBy: actor, - Namespace: repository, - EventName: "commit_to_default", - } - err := digger.RunJob(job, repository, actor, &bitbucketService, policyChecker, nil, backendApi, nil, currentDir) - if err != nil { - usage.ReportErrorAndExit(actor, fmt.Sprintf("Failed to run commands. %s", err), 8) - } - } - } else if os.Getenv("BITBUCKET_PR_ID") != "" { - prNumber, err := strconv.Atoi(os.Getenv("BITBUCKET_PR_ID")) - if err != nil { - usage.ReportErrorAndExit(actor, fmt.Sprintf("Failed to parse PR number. %s", err), 4) - } - impactedProjects, err := bitbucket.FindImpactedProjectsInBitbucket(diggerConfig, prNumber, &bitbucketService) - - if err != nil { - usage.ReportErrorAndExit(actor, fmt.Sprintf("Failed to find impacted projects. %s", err), 5) - } - if len(impactedProjects) == 0 { - usage.ReportErrorAndExit(actor, "No projects impacted", 0) - } - - impactedProjectsMsg := getImpactedProjectsAsString(impactedProjects, prNumber) - log.Println(impactedProjectsMsg) - if err != nil { - usage.ReportErrorAndExit(actor, fmt.Sprintf("Failed to find impacted projects. %s", err), 5) - } - - for _, project := range impactedProjects { - workflow := diggerConfig.Workflows[project.Workflow] - - stateEnvVars, commandEnvVars := digger_config.CollectTerraformEnvConfig(workflow.EnvVars) - - job := orchestrator.Job{ - ProjectName: project.Name, - ProjectDir: project.Dir, - ProjectWorkspace: project.Workspace, - Terragrunt: project.Terragrunt, - OpenTofu: project.OpenTofu, - Commands: workflow.Configuration.OnPullRequestPushed, - ApplyStage: orchestrator.ToConfigStage(workflow.Apply), - PlanStage: orchestrator.ToConfigStage(workflow.Plan), - CommandEnvVars: commandEnvVars, - StateEnvVars: stateEnvVars, - PullRequestNumber: &prNumber, - RequestedBy: actor, - Namespace: repository, - EventName: "pull_request", - } - jobs = append(jobs, job) - } - - reporter := reporting.CiReporter{ - CiService: &bitbucketService, - PrNumber: prNumber, - ReportStrategy: reportingStrategy, - } - - log.Println("Bitbucket trigger converted to commands successfully") - - logCommands(jobs) - - planStorage := storage.NewPlanStorage("", repoOwner, repositoryName, actor, nil) - - jobs = digger.SortedCommandsByDependency(jobs, &dependencyGraph) - - _, _, err = digger.RunJobs(jobs, &bitbucketService, &bitbucketService, lock, &reporter, planStorage, policyChecker, comment_updater.NoopCommentUpdater{}, backendApi, "", false, false, 0, currentDir) - if err != nil { - usage.ReportErrorAndExit(actor, fmt.Sprintf("Failed to run commands. %s", err), 8) - } - } else { - usage.ReportErrorAndExit(actor, "Failed to detect running mode", 1) - } - - } - - usage.ReportErrorAndExit(actor, "Digger finished successfully", 0) -} - func exec(actor string, projectName string, repoNamespace string, command string, prNumber int, lock core_locking.Lock, policyChecker core_policy.Checker, prService orchestrator.PullRequestService, orgService orchestrator.OrgService, reporter reporting.Reporter, backendApi core_backend.Api) { //SCMOrganisation, SCMrepository := utils.ParseRepoNamespace(runConfig.RepoNamespace) @@ -472,7 +28,11 @@ func exec(actor string, projectName string, repoNamespace string, command string planStorage := storage.NewPlanStorage("", "", "", actor, nil) - diggerConfig, _, dependencyGraph, err := digger_config.LoadDiggerConfig("./", true) + changedFiles, err := prService.GetChangedFiles(prNumber) + if err != nil { + usage.ReportErrorAndExit(actor, fmt.Sprintf("could not get changed files: %v", err), 1) + } + diggerConfig, _, dependencyGraph, err := digger_config.LoadDiggerConfig("./", true, changedFiles) if err != nil { usage.ReportErrorAndExit(actor, fmt.Sprintf("Failed to load digger config. %s", err), 4) } diff --git a/cli/pkg/github/github.go b/cli/pkg/github/github.go index bc597def..0f9c0c06 100644 --- a/cli/pkg/github/github.go +++ b/cli/pkg/github/github.go @@ -135,7 +135,12 @@ func GitHubCI(lock core_locking.Lock, policyChecker core_policy.Checker, backend usage.ReportErrorAndExit(githubActor, fmt.Sprintf("Failed to report jobSpec status to backend. Exiting. %s", err), 4) } - diggerConfig, _, _, err := digger_config.LoadDiggerConfig("./", false) + files, err := githubPrService.GetChangedFiles(*jobSpec.PullRequestNumber) + if err != nil { + usage.ReportErrorAndExit(githubActor, fmt.Sprintf("could not get changed files: %v", err), 4) + } + + diggerConfig, _, _, err := digger_config.LoadDiggerConfig("./", false, files) if err != nil { usage.ReportErrorAndExit(githubActor, fmt.Sprintf("Failed to read Digger digger_config. %s", err), 4) } @@ -206,7 +211,7 @@ func GitHubCI(lock core_locking.Lock, policyChecker core_policy.Checker, backend usage.ReportErrorAndExit(githubActor, "Digger finished successfully", 0) } - diggerConfig, diggerConfigYaml, dependencyGraph, err := digger_config.LoadDiggerConfig("./", true) + diggerConfig, diggerConfigYaml, dependencyGraph, err := digger_config.LoadDiggerConfig("./", true, nil) if err != nil { usage.ReportErrorAndExit(githubActor, fmt.Sprintf("Failed to read Digger digger_config. %s", err), 4) } diff --git a/cli/pkg/integration/integration_test.go b/cli/pkg/integration/integration_test.go index 21810271..740ac7ec 100644 --- a/cli/pkg/integration/integration_test.go +++ b/cli/pkg/integration/integration_test.go @@ -358,7 +358,7 @@ func TestHappyPath(t *testing.T) { terraform.CreateValidTerraformTestFile(dir) terraform.CreateSingleEnvDiggerYmlFile(dir) - diggerConfig, _, _, err := configuration.LoadDiggerConfig(dir, true) + diggerConfig, _, _, err := configuration.LoadDiggerConfig(dir, true, nil) assert.NoError(t, err) lock, err := locking.GetLock() @@ -511,7 +511,7 @@ func TestMultiEnvHappyPath(t *testing.T) { terraform.CreateValidTerraformTestFile(dir) terraform.CreateMultiEnvDiggerYmlFile(dir) - diggerConfig, _, _, err := configuration.LoadDiggerConfig(dir, true) + diggerConfig, _, _, err := configuration.LoadDiggerConfig(dir, true, nil) assert.NoError(t, err) cfg, err := config.LoadDefaultConfig(context.TODO(), @@ -730,7 +730,7 @@ workflows: terraform.CreateValidTerraformTestFile(dir) terraform.CreateCustomDiggerYmlFile(dir, diggerCfg) - diggerConfig, _, _, err := configuration.LoadDiggerConfig(dir, true) + diggerConfig, _, _, err := configuration.LoadDiggerConfig(dir, true, nil) assert.NoError(t, err) assert.NotNil(t, diggerConfig.Workflows) diff --git a/cli/pkg/spec/spec.go b/cli/pkg/spec/spec.go index 105634b2..53164e5c 100644 --- a/cli/pkg/spec/spec.go +++ b/cli/pkg/spec/spec.go @@ -26,12 +26,6 @@ func RunSpec( commentUpdaterProvider comment_summary.CommentUpdaterProvider, ) error { - diggerConfig, _, _, err := digger_config.LoadDiggerConfig("./", true) - if err != nil { - usage.ReportErrorAndExit(spec.VCS.Actor, fmt.Sprintf("Failed to read Digger digger_config. %s", err), 4) - } - log.Printf("Digger digger_config read successfully\n") - job, err := jobProvider.GetJob(spec.Job) if err != nil { usage.ReportErrorAndExit(spec.VCS.Actor, fmt.Sprintf("could not get job: %v", err), 1) @@ -59,6 +53,19 @@ func RunSpec( } policyChecker, err := policyProvider.GetPolicyProvider(spec.Policy, spec.Backend.BackendHostname, spec.Backend.BackendOrganisationName, spec.Backend.BackendJobToken) + if err != nil { + usage.ReportErrorAndExit(spec.VCS.Actor, fmt.Sprintf("could not get policy provider: %v", err), 1) + } + + changedFiles, err := prService.GetChangedFiles(*spec.Job.PullRequestNumber) + if err != nil { + usage.ReportErrorAndExit(spec.VCS.Actor, fmt.Sprintf("could not get changed files: %v", err), 1) + } + diggerConfig, _, _, err := digger_config.LoadDiggerConfig("./", true, changedFiles) + if err != nil { + usage.ReportErrorAndExit(spec.VCS.Actor, fmt.Sprintf("Failed to read Digger digger_config. %s", err), 4) + } + log.Printf("Digger digger_config read successfully\n") commentUpdater, err := commentUpdaterProvider.Get(*diggerConfig) if err != nil { diff --git a/cli/pkg/usage/usage.go b/cli/pkg/usage/usage.go index 5cf7ead9..e59f80eb 100644 --- a/cli/pkg/usage/usage.go +++ b/cli/pkg/usage/usage.go @@ -95,7 +95,7 @@ func init() { source = "azure" } - config, _, _, err := configuration.LoadDiggerConfig(currentDir, false) + config, _, _, err := configuration.LoadDiggerConfig(currentDir, false, nil) if err != nil { return } diff --git a/dgctl/cmd/validate.go b/dgctl/cmd/validate.go index 3f1b50bc..9be77737 100644 --- a/dgctl/cmd/validate.go +++ b/dgctl/cmd/validate.go @@ -19,7 +19,7 @@ var validateCmd = &cobra.Command{ Short: "Validate a digger.yml file", Long: `Validate a digger.yml file`, Run: func(cmd *cobra.Command, args []string) { - _, configYaml, _, err := digger_config.LoadDiggerConfig("./", true) + _, configYaml, _, err := digger_config.LoadDiggerConfig("./", true, nil) if err != nil { log.Printf("Invalid digger config file: %v. Exiting.", err) os.Exit(1) diff --git a/docs/howto/generate-projects.mdx b/docs/howto/generate-projects.mdx index e0dbafd3..bb6d9601 100644 --- a/docs/howto/generate-projects.mdx +++ b/docs/howto/generate-projects.mdx @@ -42,3 +42,29 @@ generate_projects: ``` This will create a project for all sub-directories under environments/core. If set to `false`, only the first directory with a .tf file will be evaluated. + +# Blocks syntax with Terragrunt + +You can use blocks generation with terragrunt as well. In order to acheive this all you need to do is specify +`terragrunt: true` for each block. Normally you would only have one terragrunt structure and so you will have +one block entry. However you may be also interested to specify different structure and different parameters +for different folders. For example, you may have a dev, staging and prod account heirarchy and therefore you +would specify the blocks in that way. Alternatively maybe you have different providers and you would like to +specify those different providers as well. Note that for very large terragrunt monorepos segragating by blocks +will lead to improved performance since it will not unnecessarily traverse an entire tree if no files have been +changed within it. + +``` +generate_projects + blocks: + - block_name: dev + terragrunt: true + root_dir: "dev/" + workflow: default + workflow_file: digger_workflow_dev.yml + - block_name: staging + terragrunt: true + root_dir: "staging/" + workflow: default + workflow_file: digger_workflow_staging.yml +``` \ No newline at end of file diff --git a/docs/howto/using-terragrunt.mdx b/docs/howto/using-terragrunt.mdx index 1d253baa..455e60c1 100644 --- a/docs/howto/using-terragrunt.mdx +++ b/docs/howto/using-terragrunt.mdx @@ -24,6 +24,11 @@ This will perform a `terragrunt apply` after changes are detected within this di # Dynamically generating Terragrunt projects + +This is not the prefered way of generating terragrunt projects and we advise you to instead use the [blocks declarative](/howto/generate-projects#blocks-syntax-with-terragrunt) +since this way may be depracated in the future + + [Demo repo](https://github.com/diggerhq/test-terragrunt-racecondition) In many cases with terragrunt you don't want to mention all of your terragrunt components since there can be tens or hundreds of those (not to mention all the dependencies of those). In this case you can just liase it to digger and it will perform dynamic generation of projects for you and trigger the relevant `terragrunt apply` commands on all impacated projects per pull request. It will also handle dependencies of these projects. You can configure this using the following: diff --git a/libs/digger_config/digger_config.go b/libs/digger_config/digger_config.go index 57d1938d..952a30b3 100644 --- a/libs/digger_config/digger_config.go +++ b/libs/digger_config/digger_config.go @@ -132,9 +132,9 @@ func (walker *FileSystemTerragruntDirWalker) GetDirs(workingDir string, configYa var ErrDiggerConfigConflict = errors.New("more than one digger digger_config file detected, please keep either 'digger.yml' or 'digger.yaml'") -func LoadDiggerConfig(workingDir string, generateProjects bool) (*DiggerConfig, *DiggerConfigYaml, graph.Graph[string, Project], error) { +func LoadDiggerConfig(workingDir string, generateProjects bool, changedFiles []string) (*DiggerConfig, *DiggerConfigYaml, graph.Graph[string, Project], error) { config := &DiggerConfig{} - configYaml, err := LoadDiggerConfigYaml(workingDir, generateProjects) + configYaml, err := LoadDiggerConfigYaml(workingDir, generateProjects, changedFiles) if err != nil { return nil, nil, nil, err } @@ -163,7 +163,7 @@ func LoadDiggerConfigFromString(yamlString string, terraformDir string) (*Digger return nil, nil, nil, err } - err = HandleYamlProjectGeneration(configYaml, terraformDir) + err = HandleYamlProjectGeneration(configYaml, terraformDir, nil) if err != nil { return nil, nil, nil, err } @@ -189,13 +189,38 @@ func LoadDiggerConfigYamlFromString(yamlString string) (*DiggerConfigYaml, error return configYaml, nil } -func HandleYamlProjectGeneration(config *DiggerConfigYaml, terraformDir string) error { +func validateBlockYaml(blocks []BlockYaml) error { + for _, b := range blocks { + if b.Terragrunt { + if b.RootDir == nil { + return fmt.Errorf("block %v is a terragrunt block but does not have root_dir specified", b.BlockName) + } + } + } + return nil +} + +func checkBlockInChangedFiles(dir string, changedFiles []string) bool { + if changedFiles == nil { + return true + } + for _, file := range changedFiles { + if strings.HasPrefix(NormalizeFileName(file), NormalizeFileName(dir)) { + return true + } + } + return false +} + +func HandleYamlProjectGeneration(config *DiggerConfigYaml, terraformDir string, changedFiles []string) error { if config.GenerateProjectsConfig != nil && config.GenerateProjectsConfig.TerragruntParsingConfig != nil { + log.Printf("Warning if you would like to use terragrunt generation we recommend using blocks since top level will be deprecated in the future: %v", "https://docs.digger.dev/howto/generate-projects#blocks-syntax-with-terragrunt") err := hydrateDiggerConfigYamlWithTerragrunt(config, *config.GenerateProjectsConfig.TerragruntParsingConfig, terraformDir) if err != nil { return err } } else if config.GenerateProjectsConfig != nil && config.GenerateProjectsConfig.Terragrunt { + log.Printf("Warning if you would like to use terragrunt generation we recommend using blocks since top level will be deprecated in the future: %v", "https://docs.digger.dev/howto/generate-projects#blocks-syntax-with-terragrunt") err := hydrateDiggerConfigYamlWithTerragrunt(config, TerragruntParsingConfig{}, terraformDir) if err != nil { return err @@ -222,20 +247,45 @@ func HandleYamlProjectGeneration(config *DiggerConfigYaml, terraformDir string) } } if config.GenerateProjectsConfig.Blocks != nil && len(config.GenerateProjectsConfig.Blocks) > 0 { + err = validateBlockYaml(config.GenerateProjectsConfig.Blocks) + if err != nil { + return err + } // if blocks of include/exclude patterns defined for _, b := range config.GenerateProjectsConfig.Blocks { - includePatterns = []string{b.Include} - excludePatterns = []string{b.Exclude} - workflow := "default" - if b.Workflow != "" { - workflow = b.Workflow - } + if b.Terragrunt == true { + + if checkBlockInChangedFiles(*b.RootDir, changedFiles) { + log.Printf("generating projects for block: %v", b.BlockName) + workflow := "default" + if b.Workflow != "" { + workflow = b.Workflow + } - for _, dir := range dirs { - if MatchIncludeExcludePatternsToFile(dir, includePatterns, excludePatterns) { - projectName := strings.ReplaceAll(dir, "/", "_") - project := ProjectYaml{Name: projectName, Dir: dir, Workflow: workflow, Workspace: "default", AwsRoleToAssume: b.AwsRoleToAssume} - config.Projects = append(config.Projects, &project) + err := hydrateDiggerConfigYamlWithTerragrunt(config, TerragruntParsingConfig{ + CreateProjectName: true, + DefaultWorkflow: workflow, + WorkflowFile: b.WorkflowFile, + FilterPath: path.Join(terraformDir, *b.RootDir), + }, terraformDir) + if err != nil { + return err + } + } + } else { + includePatterns = []string{b.Include} + excludePatterns = []string{b.Exclude} + workflow := "default" + if b.Workflow != "" { + workflow = b.Workflow + } + + for _, dir := range dirs { + if MatchIncludeExcludePatternsToFile(dir, includePatterns, excludePatterns) { + projectName := strings.ReplaceAll(dir, "/", "_") + project := ProjectYaml{Name: projectName, Dir: dir, Workflow: workflow, Workspace: "default", AwsRoleToAssume: b.AwsRoleToAssume} + config.Projects = append(config.Projects, &project) + } } } } @@ -244,7 +294,7 @@ func HandleYamlProjectGeneration(config *DiggerConfigYaml, terraformDir string) return nil } -func LoadDiggerConfigYaml(workingDir string, generateProjects bool) (*DiggerConfigYaml, error) { +func LoadDiggerConfigYaml(workingDir string, generateProjects bool, changedFiles []string) (*DiggerConfigYaml, error) { configYaml := &DiggerConfigYaml{} fileName, err := retrieveConfigFile(workingDir) if err != nil { @@ -281,7 +331,7 @@ func LoadDiggerConfigYaml(workingDir string, generateProjects bool) (*DiggerConf } if generateProjects == true { - err = HandleYamlProjectGeneration(configYaml, workingDir) + err = HandleYamlProjectGeneration(configYaml, workingDir, changedFiles) if err != nil { return configYaml, err } @@ -373,6 +423,11 @@ func hydrateDiggerConfigYamlWithTerragrunt(configYaml *DiggerConfigYaml, parsing executionOrderGroups = *parsingConfig.ExecutionOrderGroups } + workflowFile := "digger_workflow.yml" + if parsingConfig.WorkflowFile != "" { + workflowFile = parsingConfig.WorkflowFile + } + atlantisConfig, _, err := atlantis.Parse( root, parsingConfig.ProjectHclFiles, @@ -428,6 +483,7 @@ func hydrateDiggerConfigYamlWithTerragrunt(configYaml *DiggerConfigYaml, parsing Workspace: atlantisProject.Workspace, Terragrunt: true, Workflow: atlantisProject.Workflow, + WorkflowFile: &workflowFile, IncludePatterns: atlantisProject.Autoplan.WhenModified, }) } diff --git a/libs/digger_config/digger_config_test.go b/libs/digger_config/digger_config_test.go index 047302ac..635f1036 100644 --- a/libs/digger_config/digger_config_test.go +++ b/libs/digger_config/digger_config_test.go @@ -34,7 +34,7 @@ func TestDiggerConfigWhenMultipleConfigExist(t *testing.T) { t.Fatal(err) } - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.Error(t, err, "expected error to be returned") assert.ErrorContains(t, err, ErrDiggerConfigConflict.Error(), "expected error to match target error") assert.Nil(t, dg, "expected diggerConfig to be nil") @@ -75,7 +75,7 @@ projects: deleteFile := createFile(path.Join(tempDir, "digger.yaml"), diggerCfg) defer deleteFile() - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") assert.Equal(t, "path/to/module/test", dg.GetDirectory("prod")) @@ -90,7 +90,7 @@ func TestNoDiggerYaml(t *testing.T) { defer deleteFile() os.Chdir(tempDir) - dg, _, _, err := LoadDiggerConfig("./", true) + dg, _, _, err := LoadDiggerConfig("./", true, nil) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") @@ -131,7 +131,7 @@ projects: deleteFile := createFile(path.Join(tempDir, "digger.yaml"), diggerCfg) defer deleteFile() - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) fmt.Printf("%v", err) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") @@ -174,7 +174,7 @@ projects: deleteFile := createFile(path.Join(tempDir, "digger.yaml"), diggerCfg) defer deleteFile() - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) fmt.Printf("%v", err) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") @@ -195,7 +195,7 @@ projects: deleteFile := createFile(path.Join(tempDir, "digger.yaml"), diggerCfg) defer deleteFile() - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") assert.Equal(t, "default", dg.Projects[0].Workflow) @@ -217,7 +217,7 @@ projects: deleteFile := createFile(path.Join(tempDir, "digger.yml"), diggerCfg) defer deleteFile() - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") assert.Equal(t, "path/to/module", dg.GetDirectory("dev")) @@ -242,7 +242,7 @@ workflows: deleteFile := createFile(path.Join(tempDir, "digger.yaml"), diggerCfg) defer deleteFile() - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.Equal(t, Step{Action: "run", Value: "echo \"hello\"", Shell: ""}, dg.Workflows["myworkflow"].Plan.Steps[0], "parsed struct does not match expected struct") } @@ -287,7 +287,7 @@ workflows: deleteFile := createFile(path.Join(tempDir, "digger.yaml"), diggerCfg) defer deleteFile() - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.Equal(t, []EnvVar{ {Name: "TF_VAR_state", Value: "s3://mybucket/terraform.tfstate"}, @@ -327,7 +327,7 @@ workflows: deleteFile := createFile(path.Join(tempDir, "digger.yaml"), diggerCfg) defer deleteFile() - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.Equal(t, Step{Action: "run", Value: "rm -rf .terraform", Shell: ""}, dg.Workflows["dev"].Plan.Steps[0], "parsed struct does not match expected struct") assert.Equal(t, Step{Action: "init", ExtraArgs: nil, Shell: ""}, dg.Workflows["dev"].Plan.Steps[1], "parsed struct does not match expected struct") @@ -357,7 +357,7 @@ generate_projects: assert.NoError(t, err, "expected error to be nil") } - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") assert.Equal(t, "dev_test1", dg.Projects[0].Name) @@ -385,7 +385,7 @@ func TestGenerateProjectsWithoutDiggerConfig(t *testing.T) { assert.NoError(t, err, "expected error to be nil") } - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") assert.Equal(t, "dev_project", dg.Projects[0].Name) @@ -421,7 +421,7 @@ generate_projects: assert.NoError(t, err, "expected error to be nil") } - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") assert.Equal(t, "dev_test1_utils", dg.Projects[0].Name) @@ -447,7 +447,7 @@ func TestDiggerGenerateProjectsWithTfvars(t *testing.T) { defer createFile(path.Join(tempDir, "dev", "blank.tfvars"), "")() - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") assert.Equal(t, 1, len(dg.Projects)) @@ -476,7 +476,7 @@ generate_projects: defer createFile(path.Join(tempDir, dir, "main.tf"), "")() assert.NoError(t, err, "expected error to be nil") } - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") assert.Equal(t, "dev", dg.Projects[0].Name) @@ -491,7 +491,7 @@ func TestMissingProjectsReturnsError(t *testing.T) { ` deleteFile := createFile(path.Join(tempDir, "digger.yaml"), diggerCfg) defer deleteFile() - _, _, _, err := LoadDiggerConfig(tempDir, true) + _, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.ErrorContains(t, err, "no projects digger_config found") } @@ -514,7 +514,7 @@ workflows: deleteFile := createFile(path.Join(tempDir, "digger.yaml"), diggerCfg) defer deleteFile() - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") assert.Equal(t, "my_custom_workflow", dg.Projects[0].Workflow) @@ -536,7 +536,7 @@ projects: deleteFile := createFile(path.Join(tempDir, "digger.yaml"), diggerCfg) defer deleteFile() - _, _, _, err := LoadDiggerConfig(tempDir, true) + _, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.Error(t, err, "failed to find workflow digger_config 'my_custom_workflow' for project 'my-first-app'") // steps block is missing for workflows @@ -551,7 +551,7 @@ workflows: deleteFile = createFile(path.Join(tempDir, "digger.yaml"), diggerCfg) defer deleteFile() - diggerConfig, _, _, err := LoadDiggerConfig(tempDir, true) + diggerConfig, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.Equal(t, "my_custom_workflow", diggerConfig.Projects[0].Workflow) workflow, ok := diggerConfig.Workflows["my_custom_workflow"] assert.True(t, ok) @@ -580,7 +580,7 @@ workflows: deleteFile := createFile(path.Join(tempDir, "digger.yaml"), diggerCfg) defer deleteFile() - _, _, _, err := LoadDiggerConfig(tempDir, true) + _, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.Equal(t, "failed to find workflow digger_config 'my_custom_workflow' for project 'my-first-app'", err.Error()) } @@ -605,7 +605,7 @@ workflows: deleteFile := createFile(path.Join(tempDir, "digger.yaml"), diggerCfg) defer deleteFile() - _, _, _, err := LoadDiggerConfig(tempDir, true) + _, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.Nil(t, err) } @@ -845,7 +845,7 @@ workflows: t.Run(tt.name, func(t *testing.T) { deleteFile := createFile(path.Join(tempDir, "digger.yaml"), tt.diggerCfg) defer deleteFile() - _, _, _, err := LoadDiggerConfig(tempDir, true) + _, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.ErrorContains(t, err, tt.wantErr) }) } @@ -941,7 +941,7 @@ workflows: assert.NoError(t, err, "expected error to be nil") } - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") assert.Equal(t, "dev_test1", dg.Projects[0].Name) @@ -1005,7 +1005,7 @@ projects: defer createFile(path.Join(tempDir, "main.tf"), "resource \"null_resource\" \"test4\" {}")() defer createFile(path.Join(tempDir, "terragrunt.hcl"), "terraform {}")() - _, config, _, err := LoadDiggerConfig(tempDir, true) + _, config, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err) print(config) @@ -1038,7 +1038,7 @@ generate_projects: err = createAndCloseFile(path.Join(projectDir, "digger.yml"), diggerCfg) assert.NoError(t, err) - _, _, _, err = LoadDiggerConfig(projectDir, true) + _, _, _, err = LoadDiggerConfig(projectDir, true, nil) assert.NoError(t, err) } @@ -1069,7 +1069,7 @@ inputs = { defer createFile(path.Join(tempDir, "digger.yml"), diggerCfg)() defer createFile(path.Join(tempDir, "terragrunt.hcl"), hclFile)() - _, config, _, err := LoadDiggerConfig(tempDir, true) + _, config, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err) print(config) @@ -1098,7 +1098,7 @@ generate_projects: defer createFile(path.Join(tempDir, "digger.yml"), diggerCfg)() - _, config, _, err := LoadDiggerConfig(tempDir, true) + _, config, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err) assert.NotNil(t, config) @@ -1121,7 +1121,7 @@ func TestDiggerGenerateProjectsMultipleBlocksDemo(t *testing.T) { }) assert.NoError(t, err) - _, config, _, err := LoadDiggerConfig(tempDir, true) + _, config, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err) assert.NotNil(t, config) assert.Equal(t, "projects_dev_test1", config.Projects[0].Name) @@ -1164,7 +1164,7 @@ generate_projects: assert.NoError(t, err, "expected error to be nil") } - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err, "expected error to be nil") assert.NotNil(t, dg, "expected digger digger_config to be not nil") assert.Equal(t, true, dg.TraverseToNestedProjects) @@ -1195,7 +1195,7 @@ projects: defer createFile(path.Join(tempDir, "digger.yml"), diggerCfg)() defer createFile(path.Join(tempDir, "main.tf"), "resource \"null_resource\" \"test4\" {}")() - dg, _, _, err := LoadDiggerConfig(tempDir, true) + dg, _, _, err := LoadDiggerConfig(tempDir, true, nil) assert.NoError(t, err) assert.Equal(t, false, dg.AllowDraftPRs) } diff --git a/libs/digger_config/terragrunt/atlantis/generate.go b/libs/digger_config/terragrunt/atlantis/generate.go index 862633a3..707a1811 100644 --- a/libs/digger_config/terragrunt/atlantis/generate.go +++ b/libs/digger_config/terragrunt/atlantis/generate.go @@ -809,7 +809,7 @@ func Parse(gitRoot string, projectHclFiles []string, createHclProjectExternalChi } } - // Sort the projects in atlantisConfig by Dir + // Sort the projects in atlantisConfig by RootDir sort.Slice(atlantisConfig.Projects, func(i, j int) bool { return atlantisConfig.Projects[i].Dir < atlantisConfig.Projects[j].Dir }) // if executionOrderGroups { diff --git a/libs/digger_config/yaml.go b/libs/digger_config/yaml.go index 8bbb8582..8e90cec3 100644 --- a/libs/digger_config/yaml.go +++ b/libs/digger_config/yaml.go @@ -85,9 +85,18 @@ type EnvVarYaml struct { } type BlockYaml struct { - Include string `yaml:"include"` - Exclude string `yaml:"exclude"` + // these flags for terraform only + Include string `yaml:"include"` + Exclude string `yaml:"exclude"` + + // these flags are only for terragrunt only + Terragrunt bool `yaml:"terragrunt"` + RootDir *string `yaml:"root_dir"` + + // these flags for both terraform and terragrunt + BlockName string `yaml:"block_name"` Workflow string `yaml:"workflow"` + WorkflowFile string `yaml:"workflow_file"` AwsRoleToAssume *AssumeRoleForProjectConfig `yaml:"aws_role_to_assume,omitempty"` } @@ -130,6 +139,7 @@ type TerragruntParsingConfig struct { CreateHclProjectExternalChilds *bool `yaml:"createHclProjectExternalChilds,omitempty"` UseProjectMarkers bool `yaml:"useProjectMarkers"` ExecutionOrderGroups *bool `yaml:"executionOrderGroups"` + WorkflowFile string `yaml:"workflow_file"` } func (p *ProjectYaml) UnmarshalYAML(unmarshal func(interface{}) error) error { diff --git a/libs/orchestrator/json_models_test.go b/libs/orchestrator/json_models_test.go index 3aa6be6f..0c24b705 100644 --- a/libs/orchestrator/json_models_test.go +++ b/libs/orchestrator/json_models_test.go @@ -63,7 +63,7 @@ func TestAllFieldsInJobAreAlsoInJobJson(t *testing.T) { func TestIsPlanForDiggerPlanJobCorrect(t *testing.T) { j := JobJson{ ProjectName: "project.Name", - ProjectDir: "project.Dir", + ProjectDir: "project.RootDir", ProjectWorkspace: "workspace", Terragrunt: false, Commands: []string{"run echo 'hello", "digger plan"}, @@ -76,7 +76,7 @@ func TestIsPlanForDiggerPlanJobCorrect(t *testing.T) { func TestIsApplyForDiggerApplyJobCorrect(t *testing.T) { j := JobJson{ ProjectName: "project.Name", - ProjectDir: "project.Dir", + ProjectDir: "project.RootDir", ProjectWorkspace: "workspace", Terragrunt: false, Commands: []string{"digger apply"},