From 912441544ae023062cce5d819885fd71680b72f8 Mon Sep 17 00:00:00 2001 From: motatoes Date: Sun, 2 Nov 2025 19:14:37 -0800 Subject: [PATCH 01/11] remote runs stubbed endpoints --- taco/internal/api/internal.go | 13 ++- taco/internal/api/routes.go | 3 + .../domain/tfe/configuration-versions.go | 26 +++++ taco/internal/domain/tfe/plan.go | 21 ++++ taco/internal/domain/tfe/run.go | 5 - taco/internal/domain/tfe/runs.go | 39 +++++++ taco/internal/domain/tfe/workspace.go | 13 ++- taco/internal/tfe/configuration-versions.go | 100 ++++++++++++++++ taco/internal/tfe/plan.go | 88 ++++++++++++++ taco/internal/tfe/runs.go | 110 ++++++++++++++++++ taco/internal/tfe/workspaces.go | 27 +++-- ui/src/routes/tfe/$.tsx | 14 ++- 12 files changed, 436 insertions(+), 23 deletions(-) create mode 100644 taco/internal/domain/tfe/configuration-versions.go create mode 100644 taco/internal/domain/tfe/plan.go delete mode 100644 taco/internal/domain/tfe/run.go create mode 100644 taco/internal/domain/tfe/runs.go create mode 100644 taco/internal/tfe/configuration-versions.go create mode 100644 taco/internal/tfe/plan.go create mode 100644 taco/internal/tfe/runs.go diff --git a/taco/internal/api/internal.go b/taco/internal/api/internal.go index 44784c131..09b8da117 100644 --- a/taco/internal/api/internal.go +++ b/taco/internal/api/internal.go @@ -173,7 +173,18 @@ func RegisterInternalRoutes(e *echo.Echo, deps Dependencies) { tfeInternal.POST("/workspaces/:workspace_id/state-versions", tfeHandler.CreateStateVersion) tfeInternal.GET("/state-versions/:id/download", tfeHandler.DownloadStateVersion) tfeInternal.GET("/state-versions/:id", tfeHandler.ShowStateVersion) - + + tfeInternal.POST("/workspaces/:workspace_name/configuration-versions", tfeHandler.CreateConfigurationVersions) + tfeInternal.GET("/configuration-versions/:id", tfeHandler.GetConfigurationVersion) + tfeInternal.POST("/runs", tfeHandler.CreateRun) + tfeInternal.GET("/runs/:id", tfeHandler.GetRun) + tfeInternal.GET("/runs/:id/policy-checks", tfeHandler.EmptyListResponse) + tfeInternal.GET("/runs/:id/task-stages", tfeHandler.EmptyListResponse) + tfeInternal.GET("/runs/:id/cost-estimates", tfeHandler.EmptyListResponse) + tfeInternal.GET("/runs/:id/run-events", tfeHandler.EmptyListResponse) + tfeInternal.GET("/plans/:id", tfeHandler.GetPlan) + + log.Println("TFE API endpoints registered at /internal/tfe/api/v2 with webhook auth") // ==================================================================================== diff --git a/taco/internal/api/routes.go b/taco/internal/api/routes.go index d8c85ddd7..540de7407 100644 --- a/taco/internal/api/routes.go +++ b/taco/internal/api/routes.go @@ -275,6 +275,8 @@ func RegisterRoutes(e *echo.Echo, deps Dependencies) { tfeGroup.GET("/state-versions/:id/download", tfeHandler.DownloadStateVersion) tfeGroup.GET("/state-versions/:id", tfeHandler.ShowStateVersion) + tfeGroup.GET("/plans/:planID/logs/:blobId", tfeHandler.GetPlanLogs) + // Upload endpoints exempt from auth middleware (Terraform doesn't send auth headers) // Security: These validate lock ownership and have RBAC checks in handlers // Upload URLs can only be obtained from authenticated CreateStateVersion calls @@ -282,6 +284,7 @@ func RegisterRoutes(e *echo.Echo, deps Dependencies) { tfeSignedUrlsGroup.Use(middleware.VerifySignedURL) tfeSignedUrlsGroup.PUT("/state-versions/:id/upload", tfeHandler.UploadStateVersion) tfeSignedUrlsGroup.PUT("/state-versions/:id/json-upload", tfeHandler.UploadJSONStateOutputs) + tfeSignedUrlsGroup.PUT("/configuration-versions/:id/upload", tfeHandler.UploadConfigurationArchive) // Keep discovery endpoints unprotected (needed for terraform login) e.GET("/.well-known/terraform.json", tfeHandler.GetWellKnownJson) diff --git a/taco/internal/domain/tfe/configuration-versions.go b/taco/internal/domain/tfe/configuration-versions.go new file mode 100644 index 000000000..ac127a37b --- /dev/null +++ b/taco/internal/domain/tfe/configuration-versions.go @@ -0,0 +1,26 @@ +package tfe + +// ConfigurationVersionRecord is the "data" object Terraform CLI expects back +// from POST /workspaces/:workspace_id/configuration-versions +// +// It will be wrapped by jsonapi.MarshalPayload(...) into: +// { "data": { "id": "...", "type": "configuration-versions", "attributes": {...}, "relationships": {...} } } +type ConfigurationVersionRecord struct { + ID string `jsonapi:"primary,configuration-versions" json:"id"` + + // ---------- attributes ---------- + AutoQueueRuns bool `jsonapi:"attr,auto-queue-runs" json:"auto-queue-runs"` + Error *string `jsonapi:"attr,error" json:"error"` + ErrorMessage *string `jsonapi:"attr,error-message" json:"error-message"` + Source string `jsonapi:"attr,source" json:"source"` + Speculative bool `jsonapi:"attr,speculative" json:"speculative"` + Status string `jsonapi:"attr,status" json:"status"` + StatusTimestamps map[string]string `jsonapi:"attr,status-timestamps" json:"status-timestamps"` + UploadURL *string `jsonapi:"attr,upload-url" json:"upload-url"` + Provisional bool `jsonapi:"attr,provisional" json:"provisional"` + IngressAttributes *IngressAttributesStub `jsonapi:"relation,ingress-attributes" json:"ingress-attributes"` +} + + +type IngressAttributesStub struct { +} diff --git a/taco/internal/domain/tfe/plan.go b/taco/internal/domain/tfe/plan.go new file mode 100644 index 000000000..d157e8918 --- /dev/null +++ b/taco/internal/domain/tfe/plan.go @@ -0,0 +1,21 @@ +package tfe + +type PlanRecord struct { + ID string `jsonapi:"primary,plans" json:"id"` + + // ----- attributes ----- + Status string `jsonapi:"attr,status" json:"status"` + ResourceAdditions int `jsonapi:"attr,resource-additions" json:"resource-additions"` + ResourceChanges int `jsonapi:"attr,resource-changes" json:"resource-changes"` + ResourceDestructions int `jsonapi:"attr,resource-destructions" json:"resource-destructions"` + LogReadURL string `jsonapi:"attr,log-read-url" json:"log-read-url"` + HasChanges bool `jsonapi:"attr,has-changes" json:"has-changes"` + + // ----- relationships ----- + Run *RunRef `jsonapi:"relation,run" json:"run"` +} + +// Minimal run ref for the relationship +type RunRef struct { + ID string `jsonapi:"primary,runs" json:"id"` +} diff --git a/taco/internal/domain/tfe/run.go b/taco/internal/domain/tfe/run.go deleted file mode 100644 index 6416100a4..000000000 --- a/taco/internal/domain/tfe/run.go +++ /dev/null @@ -1,5 +0,0 @@ -package tfe - -type TFERun struct { - ID string `jsonapi:"primary,runs"` -} diff --git a/taco/internal/domain/tfe/runs.go b/taco/internal/domain/tfe/runs.go new file mode 100644 index 000000000..54cc9a92f --- /dev/null +++ b/taco/internal/domain/tfe/runs.go @@ -0,0 +1,39 @@ +package tfe + + +type TFERun struct { + ID string `jsonapi:"primary,runs" json:"id"` + + // ----- attributes ----- + Status string `jsonapi:"attr,status" json:"status"` + IsDestroy bool `jsonapi:"attr,is-destroy" json:"is-destroy"` + Message string `jsonapi:"attr,message" json:"message"` + PlanOnly bool `jsonapi:"attr,plan-only" json:"plan-only"` + Actions *RunActions `jsonapi:"attr,actions" json:"actions"` + + // ----- relationships ----- + Plan *PlanRef `jsonapi:"relation,plan" json:"plan"` + Workspace *WorkspaceRef `jsonapi:"relation,workspace" json:"workspace"` + ConfigurationVersion *ConfigurationVersionRef `jsonapi:"relation,configuration-version" json:"configuration-version"` +} + +// Actions block Terraform likes to see on runs +type RunActions struct { + IsCancelable bool `json:"is-cancelable"` + CanApply bool `json:"can-apply"` +} + +// Relationship: plan +type PlanRef struct { + ID string `jsonapi:"primary,plans" json:"id"` +} + +// Relationship: workspace +type WorkspaceRef struct { + ID string `jsonapi:"primary,workspaces" json:"id"` +} + +// Relationship: configuration-version +type ConfigurationVersionRef struct { + ID string `jsonapi:"primary,configuration-versions" json:"id"` +} diff --git a/taco/internal/domain/tfe/workspace.go b/taco/internal/domain/tfe/workspace.go index dc2a94104..42363cbf2 100644 --- a/taco/internal/domain/tfe/workspace.go +++ b/taco/internal/domain/tfe/workspace.go @@ -7,6 +7,13 @@ type WorkspaceVersion struct { semver string `json:"semver"` } +func NewWorkspaceVersion(latest bool, semver string) *WorkspaceVersion { + return &WorkspaceVersion{ + Latest: latest, + semver: semver, + } +} + type TFEWorkspaceActions struct { IsDestroyable bool `json:"is-destroyable"` } @@ -55,15 +62,15 @@ type WorkspaceRecord struct { GlobalRemoteState bool `jsonapi:"attr,global-remote-state" json:"global-remote-state"` Locked bool `jsonapi:"attr,locked" json:"locked"` MigrationEnvironment string `jsonapi:"attr,migration-environment" json:"migration-environment"` - Name string `jsonapi:"attr,Name" json:"Name"` + Name string `jsonapi:"attr,name" json:"name"` Operations bool `jsonapi:"attr,operations" json:"operations"` Permissions *TFEWorkspacePermissions `jsonapi:"attr,permissions" json:"permissions"` QueueAllRuns bool `jsonapi:"attr,queue-all-runs" json:"queue-all-runs"` SpeculativeEnabled bool `jsonapi:"attr,speculative-enabled" json:"speculative-enabled"` - SourceName string `jsonapi:"attr,source-Name" json:"source-Name"` + SourceName string `jsonapi:"attr,source-name" json:"source-name"` SourceURL string `jsonapi:"attr,source-url" json:"source-url"` StructuredRunOutputEnabled bool `jsonapi:"attr,structured-run-output-enabled" json:"structured-run-output-enabled"` - TerraformVersion *WorkspaceVersion `jsonapi:"attr,terraform-version" json:"terraform-version"` + TerraformVersion string `jsonapi:"attr,terraform-version" json:"terraform-version"` TriggerPrefixes []string `jsonapi:"attr,trigger-prefixes" json:"trigger-prefixes"` TriggerPatterns []string `jsonapi:"attr,trigger-patterns" json:"trigger-patterns"` VCSRepo *TFEVCSRepository `jsonapi:"attr,vcs-repo" json:"vcs-repo"` diff --git a/taco/internal/tfe/configuration-versions.go b/taco/internal/tfe/configuration-versions.go new file mode 100644 index 000000000..c0e9d770c --- /dev/null +++ b/taco/internal/tfe/configuration-versions.go @@ -0,0 +1,100 @@ +package tfe + +import ( + "fmt" + "io" + "log/slog" + "net/http" + "os" + "time" + + "github.com/diggerhq/digger/opentaco/internal/auth" + "github.com/diggerhq/digger/opentaco/internal/domain/tfe" + "github.com/google/jsonapi" + "github.com/labstack/echo/v4" +) + +func (h *TfeHandler) GetConfigurationVersion(c echo.Context) error { + cvID := c.Param("id") + + // In a real impl you'd look this up from memory/db. For now assume we stored: + // - that cvID exists + // - upload already happened + // We'll fake a timestamp. + uploadedAt := time.Now().UTC().Format(time.RFC3339) + + cv := tfe.ConfigurationVersionRecord{ + ID: cvID, + AutoQueueRuns: false, + Error: nil, + ErrorMessage: nil, + Source: "cli", + Speculative: true, + Status: "uploaded", // <-- important change + StatusTimestamps: map[string]string{ + "uploaded-at": uploadedAt, + }, + UploadURL: nil, // <-- becomes null in JSON now + Provisional: false, + IngressAttributes: nil, // emit relationships.ingress-attributes.data = null + } + + c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") + c.Response().WriteHeader(http.StatusOK) + + if err := jsonapi.MarshalPayload(c.Response().Writer, &cv); err != nil { + return err + } + return nil +} + +func (h *TfeHandler) CreateConfigurationVersions(c echo.Context) error { + cvId := "cv-1234567890" + publicBase := os.Getenv("OPENTACO_PUBLIC_BASE_URL") + if publicBase == "" { + slog.Error("OPENTACO_PUBLIC_BASE_URL environment variable not set") + return fmt.Errorf("OPENTACO_PUBLIC_BASE_URL environment variable not set") + } + signedUploadUrl, err := auth.SignURL(publicBase, fmt.Sprintf("/tfe/api/v2/configuration-versions/%v/upload", cvId), time.Now().Add(2*time.Minute)) + if err != nil { + return err + } + cv := tfe.ConfigurationVersionRecord{ + ID: cvId, + AutoQueueRuns: false, // you can choose true/false; docs default true + Error: nil, + ErrorMessage: nil, + Source: "cli", // HashiCorp examples show "tfe-api" or "gitlab"; "cli" is fine for CLI-driven runs. + Speculative: true, // for terraform plan in remote mode + Status: "pending", // initial status according to docs + StatusTimestamps: map[string]string{}, + UploadURL: &signedUploadUrl, + Provisional: false, + IngressAttributes: nil, + } + + c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") + c.Response().WriteHeader(http.StatusCreated) + + if err := jsonapi.MarshalPayload(c.Response().Writer, &cv); err != nil { + fmt.Printf("error marshaling configuration version payload: %v\n", err) + return err + } + + return nil +} + + +func (h *TfeHandler) UploadConfigurationArchive(c echo.Context) error { + configVersionID := c.Param("configVersionID") + + body, err := io.ReadAll(c.Request().Body) + if err != nil { + return c.NoContent(http.StatusBadRequest) + } + + fmt.Printf("received %d bytes for %s\n", len(body), configVersionID) + + // 200 OK, empty body. Terraform does not expect JSON here. + return c.NoContent(http.StatusOK) +} \ No newline at end of file diff --git a/taco/internal/tfe/plan.go b/taco/internal/tfe/plan.go new file mode 100644 index 000000000..f9fa29709 --- /dev/null +++ b/taco/internal/tfe/plan.go @@ -0,0 +1,88 @@ +package tfe + +import ( + "fmt" + "log/slog" + "net/http" + "os" + "strconv" + + "github.com/diggerhq/digger/opentaco/internal/domain/tfe" + "github.com/google/jsonapi" + "github.com/labstack/echo/v4" +) + +func (h *TfeHandler) GetPlan(c echo.Context) error { + planID := c.Param("id") + + // This has to match whatever runID you created in POST /runs + runID := "run-abc123" + + publicBase := os.Getenv("OPENTACO_PUBLIC_BASE_URL") + if publicBase == "" { + slog.Error("OPENTACO_PUBLIC_BASE_URL not set") + return fmt.Errorf("OPENTACO_PUBLIC_BASE_URL environment variable not set") + } + + + blobID := "a-really-long-string-for-blob-id-secret" + logsurl := fmt.Sprintf("%v/tfe/api/v2/plans/%s/logs/%s", publicBase, planID, blobID) + + plan := tfe.PlanRecord{ + ID: planID, + Status: "finished", + ResourceAdditions: 0, + ResourceChanges: 0, + ResourceDestructions: 0, + HasChanges: false, + LogReadURL: logsurl, + Run: &tfe.RunRef{ + ID: runID, + }, + } + + c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") + c.Response().WriteHeader(http.StatusOK) + + if err := jsonapi.MarshalPayload(c.Response().Writer, &plan); err != nil { + fmt.Printf("error marshaling plan payload: %v\n", err) + return err + } + return nil +} + +func (h *TfeHandler) GetPlanLogs(c echo.Context) error { + //TODO: verify the blobID from DB and ensure that it belongs to the right planID + //blobID := c.Param("blobID") + + offset := c.QueryParam("offset") + offsetInt , _ := strconv.ParseInt(offset, 10, 64) + + c.Response().Header().Set(echo.HeaderContentType, "text/plain") + c.Response().WriteHeader(http.StatusOK) + + // Minimal realistic Terraform plan text for "no changes" + logText := `Terraform used the selected providers to generate the following execution plan. +Resource actions are indicated with the following symbols: + + create + - destroy + ~ update in-place + +No changes. Your infrastructure matches the configuration. + +Plan: 0 to add, 0 to change, 0 to destroy. + +NOTE: This streamed logs are FAKEEEE +` + + if offsetInt > 100 { + logText = "" + } + + _, err := c.Response().Writer.Write([]byte(logText)) + if err != nil { + return err + } + return nil +} + diff --git a/taco/internal/tfe/runs.go b/taco/internal/tfe/runs.go new file mode 100644 index 000000000..c9bb8655d --- /dev/null +++ b/taco/internal/tfe/runs.go @@ -0,0 +1,110 @@ +package tfe + +import ( + "fmt" + "net/http" + + "github.com/diggerhq/digger/opentaco/internal/domain/tfe" + "github.com/google/jsonapi" + "github.com/labstack/echo/v4" +) + + +func (h *TfeHandler) GetRun(c echo.Context) error { + runID := c.Param("id") + + // You should look these up from storage in a real impl. + // For now we're going to hardcode stable IDs that match what you + // returned from POST /runs. + planID := "plan-xyz789" + workspaceID := "ws-0b44c5b1-8321-43e3-864d-a2921d004835" + cvID := "cv-1234567890" + + run := tfe.TFERun{ + ID: runID, + Status: "planned_and_finished", + IsDestroy: false, + Message: "Queued manually via Terraform CLI", + PlanOnly: true, + Actions: &tfe.RunActions{ + IsCancelable: false, + CanApply: false, + }, + Plan: &tfe.PlanRef{ + ID: planID, + }, + Workspace: &tfe.WorkspaceRef{ + ID: workspaceID, + }, + ConfigurationVersion: &tfe.ConfigurationVersionRef{ + ID: cvID, + }, + } + + c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") + c.Response().WriteHeader(http.StatusOK) + + if err := jsonapi.MarshalPayload(c.Response().Writer, &run); err != nil { + fmt.Printf("error marshaling run payload: %v\n", err) + return err + } + return nil +} + + +func (h *TfeHandler) CreateRun(c echo.Context) error { + // You could decode the incoming JSON here to read workspace ID, + // configuration-version ID, message, plan-only, etc. + // For now we’ll just hardcode / stub. + workspaceID := "ws-0b44c5b1-8321-43e3-864d-a2921d004835" + cvID := "cv-1234567890" + + runID := "run-abc123" + planID := "plan-xyz789" + + run := tfe.TFERun{ + ID: runID, + Status: "planning", // Terraform will expect to poll until it becomes "planned_and_finished" + IsDestroy: false, + Message: "Queued manually via Terraform CLI", + PlanOnly: true, + Actions: &tfe.RunActions{ + IsCancelable: true, + CanApply: false, + }, + Plan: &tfe.PlanRef{ + ID: planID, + }, + Workspace: &tfe.WorkspaceRef{ + ID: workspaceID, + }, + ConfigurationVersion: &tfe.ConfigurationVersionRef{ + ID: cvID, + }, + } + + c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") + c.Response().WriteHeader(http.StatusCreated) + + if err := jsonapi.MarshalPayload(c.Response().Writer, &run); err != nil { + fmt.Printf("error marshaling run payload: %v\n", err) + return err + } + + return nil +} + + +func (h *TfeHandler) EmptyListResponse(c echo.Context) error { + c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") + c.Response().WriteHeader(http.StatusOK) + + // We can't use jsonapi.MarshalPayload here because that produces + // OnePayload or ManyPayload from structs. We just want a bare + // `{ "data": [] }`, which is valid JSON:API for "no resources". + _, err := c.Response().Writer.Write([]byte(`{"data":[]}`)) + if err != nil { + return err + } + return nil +} \ No newline at end of file diff --git a/taco/internal/tfe/workspaces.go b/taco/internal/tfe/workspaces.go index beddf7118..abbd4571c 100644 --- a/taco/internal/tfe/workspaces.go +++ b/taco/internal/tfe/workspaces.go @@ -415,22 +415,33 @@ func (h *TfeHandler) GetWorkspace(c echo.Context) error { UpdatedAt: time.Time{}, Description: workspaceName, Environment: workspaceName, - ExecutionMode: "local", + ExecutionMode: "remote", FileTriggersEnabled: false, GlobalRemoteState: false, Locked: locked, MigrationEnvironment: "", Name: workspaceName, - Operations: false, - Permissions: nil, + Operations: true, + Permissions: &tfe.TFEWorkspacePermissions{ + CanDestroy: true, + CanForceUnlock: true, + CanLock: true, + CanQueueApply: true, + CanQueueDestroy: true, + CanQueueRun: true, + CanUnlock: true, + CanUpdate: true, + CanReadSettings: true, + CanUpdateVariable: false, + }, QueueAllRuns: false, - SpeculativeEnabled: false, + SpeculativeEnabled: true, SourceName: "", SourceURL: "", StructuredRunOutputEnabled: false, - TerraformVersion: nil, - TriggerPrefixes: nil, - TriggerPatterns: nil, + TerraformVersion: "1.5.6", + TriggerPrefixes: []string{}, + TriggerPatterns: []string{}, VCSRepo: nil, WorkingDirectory: "", ResourceCount: 0, @@ -439,7 +450,7 @@ func (h *TfeHandler) GetWorkspace(c echo.Context) error { PolicyCheckFailures: 0, RunFailures: 0, RunsCount: 0, - TagNames: nil, + TagNames: []string{}, CurrentRun: currentRun, // Include lock details when workspace is locked Organization: &tfe.TFEOrganization{ Name: orgParam, // Return the full org param (includes display name if provided) diff --git a/ui/src/routes/tfe/$.tsx b/ui/src/routes/tfe/$.tsx index f2f1ea089..dd4c90022 100644 --- a/ui/src/routes/tfe/$.tsx +++ b/ui/src/routes/tfe/$.tsx @@ -2,7 +2,7 @@ import { createFileRoute } from '@tanstack/react-router' async function handler({ request }) { const url = new URL(request.url); - + console.log('url', url); // OAuth/discovery paths that don't require token auth (login flow) const isOAuthPath = url.pathname.startsWith('/tfe/app/oauth2/') || @@ -11,12 +11,14 @@ async function handler({ request }) { url.pathname === '/tfe/api/v2/motd'; // Upload paths that use signed URLs (no Bearer token) - const isUploadPath = + const signedUrlPaths = /^\/tfe\/api\/v2\/state-versions\/[^\/]+\/upload$/.test(url.pathname) || - /^\/tfe\/api\/v2\/state-versions\/[^\/]+\/json-upload$/.test(url.pathname); + /^\/tfe\/api\/v2\/state-versions\/[^\/]+\/json-upload$/.test(url.pathname ) || + /^\/tfe\/api\/v2\/configuration-versions\/[^\/]+\/upload$/.test(url.pathname ) || + /^\/tfe\/api\/v2\/plans\/[^\/]+\/logs\/[^\/]+$/.test(url.pathname ); // OAuth and upload paths: forward directly to public statesman endpoints - if (isOAuthPath || isUploadPath) { + if (isOAuthPath || signedUrlPaths) { const outgoingHeaders = new Headers(request.headers); const originalHost = outgoingHeaders.get('host') ?? ''; if (originalHost) outgoingHeaders.set('x-forwarded-host', originalHost); @@ -108,9 +110,9 @@ async function handler({ request }) { } // Use webhook auth to forward to internal TFE routes - const webhookSecret = process.env.OPENTACO_ENABLE_INTERNAL_ENDPOINTS; + const webhookSecret = process.env.STATESMAN_BACKEND_WEBHOOK_SECRET; if (!webhookSecret) { - console.error('OPENTACO_ENABLE_INTERNAL_ENDPOINTS not configured'); + console.error('STATESMAN_BACKEND_WEBHOOK_SECRET not configured'); return new Response('Internal configuration error', { status: 500 }); } From c9d2617434f14d302eefeeb4a594668de07785db Mon Sep 17 00:00:00 2001 From: Brian Reardon Date: Thu, 13 Nov 2025 18:01:29 -0800 Subject: [PATCH 02/11] wip remote runs --- taco/internal/api/internal.go | 31 +- taco/internal/api/routes.go | 49 +- taco/internal/auth/signed_url.go | 81 ++++ taco/internal/domain/interfaces.go | 130 ++++++ taco/internal/domain/tfe/apply.go | 12 + .../domain/tfe/configuration-versions.go | 2 +- taco/internal/domain/tfe/runs.go | 11 +- taco/internal/query/common/sql_store.go | 26 ++ taco/internal/query/interface.go | 1 + taco/internal/query/types/models.go | 145 ++++++ .../tfe_configuration_version_repository.go | 163 +++++++ .../repositories/tfe_plan_repository.go | 165 +++++++ .../repositories/tfe_run_repository.go | 204 ++++++++ taco/internal/storage/interface.go | 1 + taco/internal/storage/memstore.go | 20 + taco/internal/storage/s3store.go | 20 + taco/internal/tfe/apply.go | 131 ++++++ taco/internal/tfe/apply_executor.go | 200 ++++++++ taco/internal/tfe/configuration-versions.go | 156 +++++-- taco/internal/tfe/plan.go | 126 +++-- taco/internal/tfe/plan_executor.go | 424 +++++++++++++++++ taco/internal/tfe/runs.go | 438 ++++++++++++++++-- taco/internal/tfe/tfe.go | 26 +- taco/internal/unit/handler.go | 27 +- .../mysql/20251114000000_add_tfe_tables.sql | 97 ++++ .../20251114000000_add_tfe_tables.sql | 103 ++++ .../sqlite/20251114000000_add_tfe_tables.sql | 89 ++++ .../20251114000001_add_auto_apply_to_runs.sql | 5 + ui/src/api/statesman_serverFunctions.ts | 23 +- ui/src/api/statesman_units.ts | 15 +- ui/src/components/UnitCreateForm.tsx | 18 +- ui/src/routes/tfe/$.tsx | 2 +- 32 files changed, 2810 insertions(+), 131 deletions(-) create mode 100644 taco/internal/domain/tfe/apply.go create mode 100644 taco/internal/repositories/tfe_configuration_version_repository.go create mode 100644 taco/internal/repositories/tfe_plan_repository.go create mode 100644 taco/internal/repositories/tfe_run_repository.go create mode 100644 taco/internal/tfe/apply.go create mode 100644 taco/internal/tfe/apply_executor.go create mode 100644 taco/internal/tfe/plan_executor.go create mode 100644 taco/migrations/mysql/20251114000000_add_tfe_tables.sql create mode 100644 taco/migrations/postgres/20251114000000_add_tfe_tables.sql create mode 100644 taco/migrations/sqlite/20251114000000_add_tfe_tables.sql create mode 100644 taco/migrations/sqlite/20251114000001_add_auto_apply_to_runs.sql diff --git a/taco/internal/api/internal.go b/taco/internal/api/internal.go index 183282852..e37cf695f 100644 --- a/taco/internal/api/internal.go +++ b/taco/internal/api/internal.go @@ -142,15 +142,34 @@ func RegisterInternalRoutes(e *echo.Echo, deps Dependencies) { // Create identifier resolver for TFE org resolution var tfeIdentifierResolver domain.IdentifierResolver + var runRepo domain.TFERunRepository + var planRepo domain.TFEPlanRepository + var configVerRepo domain.TFEConfigurationVersionRepository + if deps.QueryStore != nil { if db := repositories.GetDBFromQueryStore(deps.QueryStore); db != nil { tfeIdentifierResolver = repositories.NewIdentifierResolver(db) + // Create TFE repositories for runs, plans, and configuration versions + runRepo = repositories.NewTFERunRepository(db) + planRepo = repositories.NewTFEPlanRepository(db) + configVerRepo = repositories.NewTFEConfigurationVersionRepository(db) + log.Println("TFE repositories initialized successfully (internal routes)") } } // Create TFE handler with webhook auth context // Pass both wrapped (for authenticated calls) and unwrapped (for signed URLs) repositories - tfeHandler := tfe.NewTFETokenHandler(authHandler, deps.Repository, deps.UnwrappedRepository, deps.BlobStore, deps.RBACManager, tfeIdentifierResolver) + tfeHandler := tfe.NewTFETokenHandler( + authHandler, + deps.Repository, + deps.UnwrappedRepository, + deps.BlobStore, + deps.RBACManager, + tfeIdentifierResolver, + runRepo, + planRepo, + configVerRepo, + ) // TFE group with webhook auth (for UI pass-through) tfeInternal := e.Group("/internal/tfe/api/v2") @@ -179,11 +198,13 @@ func RegisterInternalRoutes(e *echo.Echo, deps Dependencies) { tfeInternal.GET("/configuration-versions/:id", tfeHandler.GetConfigurationVersion) tfeInternal.POST("/runs", tfeHandler.CreateRun) tfeInternal.GET("/runs/:id", tfeHandler.GetRun) - tfeInternal.GET("/runs/:id/policy-checks", tfeHandler.EmptyListResponse) - tfeInternal.GET("/runs/:id/task-stages", tfeHandler.EmptyListResponse) - tfeInternal.GET("/runs/:id/cost-estimates", tfeHandler.EmptyListResponse) - tfeInternal.GET("/runs/:id/run-events", tfeHandler.EmptyListResponse) + tfeInternal.POST("/runs/:id/actions/apply", tfeHandler.ApplyRun) + tfeInternal.GET("/runs/:id/policy-checks", tfeHandler.GetPolicyChecks) + tfeInternal.GET("/runs/:id/task-stages", tfeHandler.GetTaskStages) + tfeInternal.GET("/runs/:id/cost-estimates", tfeHandler.GetCostEstimates) + tfeInternal.GET("/runs/:id/run-events", tfeHandler.GetRunEvents) tfeInternal.GET("/plans/:id", tfeHandler.GetPlan) + tfeInternal.GET("/applies/:id", tfeHandler.GetApply) log.Println("TFE API endpoints registered at /internal/tfe/api/v2 with webhook auth") diff --git a/taco/internal/api/routes.go b/taco/internal/api/routes.go index 791573ff1..1bff84ddf 100644 --- a/taco/internal/api/routes.go +++ b/taco/internal/api/routes.go @@ -250,12 +250,32 @@ func RegisterRoutes(e *echo.Echo, deps Dependencies) { // Unwrapped repository is used for signed URL operations (pre-authorized, no RBAC checks needed) // Create identifier resolver for org resolution var tfeIdentifierResolver domain.IdentifierResolver + var runRepo domain.TFERunRepository + var planRepo domain.TFEPlanRepository + var configVerRepo domain.TFEConfigurationVersionRepository + if deps.QueryStore != nil { if db := repositories.GetDBFromQueryStore(deps.QueryStore); db != nil { tfeIdentifierResolver = repositories.NewIdentifierResolver(db) + // Create TFE repositories for runs, plans, and configuration versions + runRepo = repositories.NewTFERunRepository(db) + planRepo = repositories.NewTFEPlanRepository(db) + configVerRepo = repositories.NewTFEConfigurationVersionRepository(db) + log.Println("TFE repositories initialized successfully") } } - tfeHandler := tfe.NewTFETokenHandler(authHandler, deps.Repository, deps.UnwrappedRepository, deps.BlobStore, deps.RBACManager, tfeIdentifierResolver) + + tfeHandler := tfe.NewTFETokenHandler( + authHandler, + deps.Repository, + deps.UnwrappedRepository, + deps.BlobStore, + deps.RBACManager, + tfeIdentifierResolver, + runRepo, + planRepo, + configVerRepo, + ) // Create protected TFE group - opaque tokens only tfeGroup := e.Group("/tfe/api/v2") @@ -276,7 +296,24 @@ func RegisterRoutes(e *echo.Echo, deps Dependencies) { tfeGroup.POST("/workspaces/:workspace_id/state-versions", tfeHandler.CreateStateVersion) tfeGroup.GET("/state-versions/:id", tfeHandler.ShowStateVersion) - tfeGroup.GET("/plans/:planID/logs/:blobId", tfeHandler.GetPlanLogs) + // Configuration version routes + tfeGroup.POST("/workspaces/:workspace_name/configuration-versions", tfeHandler.CreateConfigurationVersions) + tfeGroup.GET("/configuration-versions/:id", tfeHandler.GetConfigurationVersion) + + // Run routes + tfeGroup.POST("/runs", tfeHandler.CreateRun) + tfeGroup.GET("/runs/:id", tfeHandler.GetRun) + tfeGroup.POST("/runs/:id/actions/apply", tfeHandler.ApplyRun) + tfeGroup.GET("/runs/:id/policy-checks", tfeHandler.GetPolicyChecks) + tfeGroup.GET("/runs/:id/task-stages", tfeHandler.GetTaskStages) + tfeGroup.GET("/runs/:id/cost-estimates", tfeHandler.GetCostEstimates) + tfeGroup.GET("/runs/:id/run-events", tfeHandler.GetRunEvents) + + // Plan routes + tfeGroup.GET("/plans/:id", tfeHandler.GetPlan) + + // Apply routes + tfeGroup.GET("/applies/:id", tfeHandler.GetApply) // Upload endpoints exempt from auth middleware (Terraform doesn't send auth headers) // Security: These validate lock ownership and have RBAC checks in handlers @@ -287,6 +324,14 @@ func RegisterRoutes(e *echo.Echo, deps Dependencies) { tfeSignedUrlsGroup.PUT("/state-versions/:id/upload", tfeHandler.UploadStateVersion) tfeSignedUrlsGroup.PUT("/state-versions/:id/json-upload", tfeHandler.UploadJSONStateOutputs) tfeSignedUrlsGroup.PUT("/configuration-versions/:id/upload", tfeHandler.UploadConfigurationArchive) + + // Plan log streaming - token-based auth (token embedded in path, not query string) + // Security: Time-limited HMAC-signed tokens, Terraform CLI preserves path + e.GET("/tfe/api/v2/plans/:planID/logs/:token", tfeHandler.GetPlanLogs) + + // Apply log streaming - token-based auth (token embedded in path, not query string) + // Security: Time-limited HMAC-signed tokens, Terraform CLI preserves path + e.GET("/tfe/api/v2/applies/:applyID/logs/:token", tfeHandler.GetApplyLogs) // Keep discovery endpoints unprotected (needed for terraform login) e.GET("/.well-known/terraform.json", tfeHandler.GetWellKnownJson) diff --git a/taco/internal/auth/signed_url.go b/taco/internal/auth/signed_url.go index ed9450405..2207e7f62 100644 --- a/taco/internal/auth/signed_url.go +++ b/taco/internal/auth/signed_url.go @@ -7,6 +7,7 @@ import ( "fmt" "net/url" "strconv" + "strings" "time" "github.com/diggerhq/digger/opentaco/internal/config" @@ -65,4 +66,84 @@ func VerifySignedUrl(signedUrl string) error { return fmt.Errorf("the signed url is invalid") } return nil +} + +// GenerateLogStreamToken creates a time-limited token for log streaming +// Format: {expiry-unix}.{base64-hmac-signature} +// This is designed to be embedded in URL paths (not query strings) since +// Terraform CLI preserves paths but strips/replaces query parameters +func GenerateLogStreamToken(planID string, validFor time.Duration) (string, error) { + secret, err := config.GetConfig().GetSecretKey() + if err != nil { + return "", fmt.Errorf("failed to get secret key: %w", err) + } + + expiry := time.Now().Add(validFor).Unix() + expiryStr := strconv.FormatInt(expiry, 10) + + // Compute HMAC: HMAC(planID + expiry) + mac := hmac.New(sha256.New, []byte(secret)) + mac.Write([]byte(planID + expiryStr)) + sig := base64.URLEncoding.EncodeToString(mac.Sum(nil)) + + // Token format: expiry.signature (URL-safe, no special chars) + token := expiryStr + "." + sig + return token, nil +} + +// VerifyLogStreamToken verifies a log streaming token for a specific plan +// Token format: {expiry}.{signature} (embedded in URL path, not query string) +func VerifyLogStreamToken(token string, planID string) bool { + secret, err := config.GetConfig().GetSecretKey() + if err != nil { + fmt.Printf("[VerifyLogStreamToken] Failed to get secret key: %v\n", err) + return false + } + + // Parse token: expiry.signature (use strings.Split - simpler!) + parts := strings.SplitN(token, ".", 2) + if len(parts) != 2 { + fmt.Printf("[VerifyLogStreamToken] Invalid token format (expected expiry.signature): %s\n", token) + return false + } + + expiryStr := parts[0] + sig := parts[1] + + fmt.Printf("[VerifyLogStreamToken] planID=%s, expiry=%s, sig=%s...\n", planID, expiryStr, sig[:min(20, len(sig))]) + + // Check expiry + expiry, err := strconv.ParseInt(expiryStr, 10, 64) + if err != nil { + fmt.Printf("[VerifyLogStreamToken] Failed to parse expiry: %v\n", err) + return false + } + + now := time.Now().Unix() + if now > expiry { + fmt.Printf("[VerifyLogStreamToken] Token expired (now=%d, expiry=%d, diff=%d sec)\n", now, expiry, now-expiry) + return false + } + + // Verify signature using same HMAC logic as generation + mac := hmac.New(sha256.New, []byte(secret)) + mac.Write([]byte(planID + expiryStr)) + expectedSig := base64.URLEncoding.EncodeToString(mac.Sum(nil)) + + isValid := hmac.Equal([]byte(sig), []byte(expectedSig)) + if !isValid { + fmt.Printf("[VerifyLogStreamToken] SIGNATURE MISMATCH - expected=%s..., got=%s...\n", + expectedSig[:min(20, len(expectedSig))], sig[:min(20, len(sig))]) + } else { + fmt.Printf("[VerifyLogStreamToken] ✓ Token valid for planID=%s\n", planID) + } + + return isValid +} + +func min(a, b int) int { + if a < b { + return a + } + return b } \ No newline at end of file diff --git a/taco/internal/domain/interfaces.go b/taco/internal/domain/interfaces.go index bd3bd2410..d749aa259 100644 --- a/taco/internal/domain/interfaces.go +++ b/taco/internal/domain/interfaces.go @@ -61,6 +61,57 @@ type TFEOperations interface { StateOperations } +// TFERunRepository manages TFE run lifecycle +type TFERunRepository interface { + // Create a new run + CreateRun(ctx context.Context, run *TFERun) error + + // Get run by ID + GetRun(ctx context.Context, runID string) (*TFERun, error) + + // List runs for a unit (workspace) + ListRunsForUnit(ctx context.Context, unitID string, limit int) ([]*TFERun, error) + + // Update run status + UpdateRunStatus(ctx context.Context, runID string, status string) error + + // Update run with plan ID + UpdateRunPlanID(ctx context.Context, runID string, planID string) error + + // Update run status and can_apply together + UpdateRunStatusAndCanApply(ctx context.Context, runID string, status string, canApply bool) error +} + +// TFEPlanRepository manages TFE plan lifecycle +type TFEPlanRepository interface { + // Create a new plan + CreatePlan(ctx context.Context, plan *TFEPlan) error + + // Get plan by ID + GetPlan(ctx context.Context, planID string) (*TFEPlan, error) + + // Update plan status and results + UpdatePlan(ctx context.Context, planID string, updates *TFEPlanUpdate) error + + // Get plan by run ID + GetPlanByRunID(ctx context.Context, runID string) (*TFEPlan, error) +} + +// TFEConfigurationVersionRepository manages configuration versions +type TFEConfigurationVersionRepository interface { + // Create a new configuration version + CreateConfigurationVersion(ctx context.Context, cv *TFEConfigurationVersion) error + + // Get configuration version by ID + GetConfigurationVersion(ctx context.Context, cvID string) (*TFEConfigurationVersion, error) + + // Update configuration version status (and optionally the archive blob ID) + UpdateConfigurationVersionStatus(ctx context.Context, cvID string, status string, uploadedAt *time.Time, archiveBlobID *string) error + + // List configuration versions for a unit (workspace) + ListConfigurationVersionsForUnit(ctx context.Context, unitID string, limit int) ([]*TFEConfigurationVersion, error) +} + // ============================================ // Full Repository Interface // ============================================ @@ -156,3 +207,82 @@ func DecodeUnitID(encoded string) string { return NormalizeUnitID(encoded) } +// ============================================ +// TFE Domain Models +// ============================================ + +// TFERun represents a Terraform run (plan/apply execution) +type TFERun struct { + ID string + OrgID string + UnitID string + CreatedAt time.Time + UpdatedAt time.Time + Status string + IsDestroy bool + Message string + PlanOnly bool + AutoApply bool // Whether to auto-trigger apply after successful plan + Source string + IsCancelable bool + CanApply bool + ConfigurationVersionID string + PlanID *string + ApplyID *string + CreatedBy string + ApplyLogBlobID *string +} + +// TFEPlan represents a Terraform plan execution +type TFEPlan struct { + ID string + OrgID string + RunID string + CreatedAt time.Time + UpdatedAt time.Time + Status string + ResourceAdditions int + ResourceChanges int + ResourceDestructions int + HasChanges bool + LogBlobID *string + LogReadURL *string + PlanOutputBlobID *string + PlanOutputJSON *string + CreatedBy string +} + +// TFEPlanUpdate contains fields that can be updated on a plan +type TFEPlanUpdate struct { + Status *string + ResourceAdditions *int + ResourceChanges *int + ResourceDestructions *int + HasChanges *bool + LogBlobID *string + LogReadURL *string + PlanOutputBlobID *string + PlanOutputJSON *string +} + +// TFEConfigurationVersion represents an uploaded Terraform configuration +type TFEConfigurationVersion struct { + ID string + OrgID string + UnitID string + CreatedAt time.Time + UpdatedAt time.Time + Status string + Source string + Speculative bool + AutoQueueRuns bool + Provisional bool + Error *string + ErrorMessage *string + UploadURL *string + UploadedAt *time.Time + ArchiveBlobID *string + StatusTimestamps string + CreatedBy string +} + diff --git a/taco/internal/domain/tfe/apply.go b/taco/internal/domain/tfe/apply.go new file mode 100644 index 000000000..b309881fa --- /dev/null +++ b/taco/internal/domain/tfe/apply.go @@ -0,0 +1,12 @@ +package tfe + +// ApplyRecord represents a Terraform apply operation +type ApplyRecord struct { + ID string `jsonapi:"primary,applies" json:"id"` + Status string `jsonapi:"attr,status" json:"status"` + LogReadURL string `jsonapi:"attr,log-read-url" json:"log-read-url"` + + // Relationship to run (RunRef is declared in plan.go) + Run *RunRef `jsonapi:"relation,run,omitempty" json:"run,omitempty"` +} + diff --git a/taco/internal/domain/tfe/configuration-versions.go b/taco/internal/domain/tfe/configuration-versions.go index ac127a37b..2607d982f 100644 --- a/taco/internal/domain/tfe/configuration-versions.go +++ b/taco/internal/domain/tfe/configuration-versions.go @@ -18,7 +18,7 @@ type ConfigurationVersionRecord struct { StatusTimestamps map[string]string `jsonapi:"attr,status-timestamps" json:"status-timestamps"` UploadURL *string `jsonapi:"attr,upload-url" json:"upload-url"` Provisional bool `jsonapi:"attr,provisional" json:"provisional"` - IngressAttributes *IngressAttributesStub `jsonapi:"relation,ingress-attributes" json:"ingress-attributes"` + // IngressAttributes omitted - not used in remote execution mode } diff --git a/taco/internal/domain/tfe/runs.go b/taco/internal/domain/tfe/runs.go index 54cc9a92f..59405cae5 100644 --- a/taco/internal/domain/tfe/runs.go +++ b/taco/internal/domain/tfe/runs.go @@ -13,14 +13,16 @@ type TFERun struct { // ----- relationships ----- Plan *PlanRef `jsonapi:"relation,plan" json:"plan"` + Apply *ApplyRef `jsonapi:"relation,apply,omitempty" json:"apply,omitempty"` Workspace *WorkspaceRef `jsonapi:"relation,workspace" json:"workspace"` ConfigurationVersion *ConfigurationVersionRef `jsonapi:"relation,configuration-version" json:"configuration-version"` } // Actions block Terraform likes to see on runs type RunActions struct { - IsCancelable bool `json:"is-cancelable"` - CanApply bool `json:"can-apply"` + IsCancelable bool `json:"is-cancelable"` + IsConfirmable bool `json:"is-confirmable"` + CanApply bool `json:"can-apply"` } // Relationship: plan @@ -28,6 +30,11 @@ type PlanRef struct { ID string `jsonapi:"primary,plans" json:"id"` } +// Relationship: apply +type ApplyRef struct { + ID string `jsonapi:"primary,applies" json:"id"` +} + // Relationship: workspace type WorkspaceRef struct { ID string `jsonapi:"primary,workspaces" json:"id"` diff --git a/taco/internal/query/common/sql_store.go b/taco/internal/query/common/sql_store.go index 9ef433ba6..78be2b059 100644 --- a/taco/internal/query/common/sql_store.go +++ b/taco/internal/query/common/sql_store.go @@ -250,6 +250,32 @@ func (s *SQLStore) SyncDeleteUnit(ctx context.Context, blobPath string) error { Delete(&types.Unit{}).Error } +// UpdateUnitTFESettings updates TFE-specific settings for a unit +func (s *SQLStore) UpdateUnitTFESettings(ctx context.Context, unitID string, autoApply *bool, executionMode *string, terraformVersion *string, workingDirectory *string) error { + updates := make(map[string]interface{}) + + if autoApply != nil { + updates["tfe_auto_apply"] = *autoApply + } + if executionMode != nil { + updates["tfe_execution_mode"] = *executionMode + } + if terraformVersion != nil { + updates["tfe_terraform_version"] = *terraformVersion + } + if workingDirectory != nil { + updates["tfe_working_directory"] = *workingDirectory + } + + if len(updates) == 0 { + return nil // Nothing to update + } + + return s.db.WithContext(ctx).Model(&types.Unit{}). + Where("id = ?", unitID). + Updates(updates).Error +} + func (s *SQLStore) SyncUnitLock(ctx context.Context, blobPath string, lockID, lockWho string, lockCreated time.Time) error { orgUUID, unitUUID, err := s.parseBlobPath(ctx, blobPath) if err != nil { diff --git a/taco/internal/query/interface.go b/taco/internal/query/interface.go index c8e041a4a..e5c4d2307 100644 --- a/taco/internal/query/interface.go +++ b/taco/internal/query/interface.go @@ -19,6 +19,7 @@ type UnitQuery interface { SyncUnitLock(ctx context.Context, unitName string, lockID, lockWho string, lockCreated time.Time) error SyncUnitUnlock(ctx context.Context, unitName string) error SyncDeleteUnit(ctx context.Context, unitName string) error + UpdateUnitTFESettings(ctx context.Context, unitID string, autoApply *bool, executionMode *string, terraformVersion *string, workingDirectory *string) error } type RBACQuery interface { diff --git a/taco/internal/query/types/models.go b/taco/internal/query/types/models.go index f7f139c79..9d44fa15d 100644 --- a/taco/internal/query/types/models.go +++ b/taco/internal/query/types/models.go @@ -1,6 +1,7 @@ package types import ( + "strings" "time" "github.com/google/uuid" "gorm.io/gorm" @@ -150,6 +151,12 @@ type Unit struct { LockWho string `gorm:"default:''"` LockCreated *time.Time Tags []Tag `gorm:"many2many:unit_tags;constraint:OnDelete:CASCADE,OnUpdate:CASCADE"` + + // TFE workspace settings (nullable for non-TFE usage) + TFEAutoApply *bool `gorm:"default:null"` + TFETerraformVersion *string `gorm:"type:varchar(50);default:null"` + TFEWorkingDirectory *string `gorm:"type:varchar(500);default:null"` + TFEExecutionMode *string `gorm:"type:varchar(50);default:null"` // 'remote', 'local', 'agent' } func (u *Unit) BeforeCreate(tx *gorm.DB) error { @@ -217,6 +224,141 @@ func (t *Token) BeforeCreate(tx *gorm.DB) error { func (Token) TableName() string { return "tokens" } +// TFE Run model - represents a Terraform run (plan/apply execution) +type TFERun struct { + ID string `gorm:"type:varchar(36);primaryKey"` + OrgID string `gorm:"type:varchar(36);index;not null"` + UnitID string `gorm:"type:varchar(36);not null;index"` // FK to units.id (the workspace) + CreatedAt time.Time `gorm:"autoCreateTime"` + UpdatedAt time.Time `gorm:"autoUpdateTime"` + + // TFE-specific attributes + Status string `gorm:"type:varchar(50);not null;default:'pending'"` + IsDestroy bool `gorm:"default:false"` + Message string `gorm:"type:text"` + PlanOnly bool `gorm:"default:true"` + AutoApply bool `gorm:"default:false"` // Whether to auto-trigger apply after successful plan + Source string `gorm:"type:varchar(50);default:'cli'"` // 'cli', 'api', 'ui', 'vcs' + + // Actions (stored as fields) + IsCancelable bool `gorm:"default:true"` + CanApply bool `gorm:"default:false"` + + // Relationships (foreign keys) + ConfigurationVersionID string `gorm:"type:varchar(36);not null;index"` + PlanID *string `gorm:"type:varchar(36);index"` // Nullable until plan is created + ApplyID *string `gorm:"type:varchar(36);index"` // Nullable if plan-only + + // Blob storage references + ApplyLogBlobID *string `gorm:"type:varchar(255)"` // Blob ID for apply logs + + // Metadata + CreatedBy string `gorm:"type:varchar(255)"` + + // Associations + Unit *Unit `gorm:"foreignKey:UnitID"` + Plan *TFEPlan `gorm:"foreignKey:PlanID"` + ConfigurationVersion *TFEConfigurationVersion `gorm:"foreignKey:ConfigurationVersionID"` +} + +func (r *TFERun) BeforeCreate(tx *gorm.DB) error { + if r.ID == "" { + // Generate TFE-style ID: run-{uuid} (full UUID for compatibility) + r.ID = "run-" + strings.ReplaceAll(uuid.New().String(), "-", "") + } + return nil +} + +func (TFERun) TableName() string { return "tfe_runs" } + +// TFE Plan model - represents a Terraform plan execution +type TFEPlan struct { + ID string `gorm:"type:varchar(36);primaryKey"` + OrgID string `gorm:"type:varchar(36);index;not null"` + RunID string `gorm:"type:varchar(36);not null;index"` + CreatedAt time.Time `gorm:"autoCreateTime"` + UpdatedAt time.Time `gorm:"autoUpdateTime"` + + // Plan attributes + Status string `gorm:"type:varchar(50);not null;default:'pending'"` + ResourceAdditions int `gorm:"default:0"` + ResourceChanges int `gorm:"default:0"` + ResourceDestructions int `gorm:"default:0"` + HasChanges bool `gorm:"default:false"` + + // Log storage - reference to blob storage + LogBlobID *string `gorm:"type:varchar(255)"` // Reference to blob storage key + LogReadURL *string `gorm:"type:text"` // Signed URL for log access (temporary) + + // Plan output/data stored in blob storage or as JSON + PlanOutputBlobID *string `gorm:"type:varchar(255)"` // Reference to blob storage for large plans + PlanOutputJSON *string `gorm:"type:longtext"` // Inline JSON for smaller plans + + // Metadata + CreatedBy string `gorm:"type:varchar(255)"` + + // Associations + Run *TFERun `gorm:"foreignKey:RunID"` +} + +func (p *TFEPlan) BeforeCreate(tx *gorm.DB) error { + if p.ID == "" { + // Generate TFE-style ID: plan-{uuid} (full UUID for compatibility) + p.ID = "plan-" + strings.ReplaceAll(uuid.New().String(), "-", "") + } + return nil +} + +func (TFEPlan) TableName() string { return "tfe_plans" } + +// TFE Configuration Version model - represents an uploaded Terraform configuration +type TFEConfigurationVersion struct { + ID string `gorm:"type:varchar(36);primaryKey"` + OrgID string `gorm:"type:varchar(36);index;not null"` + UnitID string `gorm:"type:varchar(36);not null;index"` // FK to units.id (the workspace) + CreatedAt time.Time `gorm:"autoCreateTime"` + UpdatedAt time.Time `gorm:"autoUpdateTime"` + + // Configuration version attributes + Status string `gorm:"type:varchar(50);not null;default:'pending'"` + Source string `gorm:"type:varchar(50);default:'cli'"` // 'cli', 'api', 'vcs', 'terraform' + Speculative bool `gorm:"default:true"` + AutoQueueRuns bool `gorm:"default:false"` + Provisional bool `gorm:"default:false"` + + // Error handling + Error *string `gorm:"type:text"` + ErrorMessage *string `gorm:"type:text"` + + // Upload handling + UploadURL *string `gorm:"type:text"` // Signed upload URL (temporary) + UploadedAt *time.Time // When upload completed + ArchiveBlobID *string `gorm:"type:varchar(255)"` // Reference to stored archive in blob storage + + // Status timestamps stored as JSON + StatusTimestamps string `gorm:"type:json;default:'{}'"` // JSON map of status -> timestamp + + // Metadata + CreatedBy string `gorm:"type:varchar(255)"` + + // Associations + Unit *Unit `gorm:"foreignKey:UnitID"` + Runs []TFERun `gorm:"foreignKey:ConfigurationVersionID"` +} + +func (cv *TFEConfigurationVersion) BeforeCreate(tx *gorm.DB) error { + if cv.ID == "" { + // Generate TFE-style ID: cv-{uuid} (full UUID for compatibility) + cv.ID = "cv-" + strings.ReplaceAll(uuid.New().String(), "-", "") + } + if cv.StatusTimestamps == "" { + cv.StatusTimestamps = "{}" + } + return nil +} + +func (TFEConfigurationVersion) TableName() string { return "tfe_configuration_versions" } + var DefaultModels = []any{ &Organization{}, &User{}, @@ -232,4 +374,7 @@ var DefaultModels = []any{ &Tag{}, &UnitTag{}, &Token{}, + &TFERun{}, + &TFEPlan{}, + &TFEConfigurationVersion{}, } \ No newline at end of file diff --git a/taco/internal/repositories/tfe_configuration_version_repository.go b/taco/internal/repositories/tfe_configuration_version_repository.go new file mode 100644 index 000000000..0d8f9981c --- /dev/null +++ b/taco/internal/repositories/tfe_configuration_version_repository.go @@ -0,0 +1,163 @@ +package repositories + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/diggerhq/digger/opentaco/internal/domain" + "github.com/diggerhq/digger/opentaco/internal/query/types" + "gorm.io/gorm" +) + +// TFEConfigurationVersionRepository manages TFE configuration versions using GORM +type TFEConfigurationVersionRepository struct { + db *gorm.DB +} + +// NewTFEConfigurationVersionRepository creates a new TFE configuration version repository +func NewTFEConfigurationVersionRepository(db *gorm.DB) *TFEConfigurationVersionRepository { + return &TFEConfigurationVersionRepository{db: db} +} + +// CreateConfigurationVersion creates a new configuration version +func (r *TFEConfigurationVersionRepository) CreateConfigurationVersion(ctx context.Context, cv *domain.TFEConfigurationVersion) error { + fmt.Printf("DEBUG CreateConfigurationVersion: Input cv.ID=%s, cv.UnitID=%s, cv.OrgID=%s\n", cv.ID, cv.UnitID, cv.OrgID) + + dbCV := &types.TFEConfigurationVersion{ + ID: cv.ID, + OrgID: cv.OrgID, + UnitID: cv.UnitID, + Status: cv.Status, + Source: cv.Source, + Speculative: cv.Speculative, + AutoQueueRuns: cv.AutoQueueRuns, + Provisional: cv.Provisional, + Error: cv.Error, + ErrorMessage: cv.ErrorMessage, + UploadURL: cv.UploadURL, + UploadedAt: cv.UploadedAt, + ArchiveBlobID: cv.ArchiveBlobID, + StatusTimestamps: cv.StatusTimestamps, + CreatedBy: cv.CreatedBy, + } + + fmt.Printf("DEBUG Before GORM Create: dbCV.ID=%s, dbCV.UnitID=%s, dbCV.OrgID=%s\n", dbCV.ID, dbCV.UnitID, dbCV.OrgID) + + if err := r.db.WithContext(ctx).Create(dbCV).Error; err != nil { + fmt.Printf("DEBUG GORM Create ERROR: %v, Generated ID was: %s\n", err, dbCV.ID) + return fmt.Errorf("failed to create configuration version: %w", err) + } + + fmt.Printf("DEBUG After GORM Create SUCCESS: Generated ID=%s\n", dbCV.ID) + + // Update the domain model with generated timestamps and ID + cv.ID = dbCV.ID + cv.CreatedAt = dbCV.CreatedAt + cv.UpdatedAt = dbCV.UpdatedAt + + return nil +} + +// GetConfigurationVersion retrieves a configuration version by ID +func (r *TFEConfigurationVersionRepository) GetConfigurationVersion(ctx context.Context, cvID string) (*domain.TFEConfigurationVersion, error) { + var dbCV types.TFEConfigurationVersion + + if err := r.db.WithContext(ctx).Where("id = ?", cvID).First(&dbCV).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, fmt.Errorf("configuration version not found: %s", cvID) + } + return nil, fmt.Errorf("failed to get configuration version: %w", err) + } + + return &domain.TFEConfigurationVersion{ + ID: dbCV.ID, + OrgID: dbCV.OrgID, + UnitID: dbCV.UnitID, + CreatedAt: dbCV.CreatedAt, + UpdatedAt: dbCV.UpdatedAt, + Status: dbCV.Status, + Source: dbCV.Source, + Speculative: dbCV.Speculative, + AutoQueueRuns: dbCV.AutoQueueRuns, + Provisional: dbCV.Provisional, + Error: dbCV.Error, + ErrorMessage: dbCV.ErrorMessage, + UploadURL: dbCV.UploadURL, + UploadedAt: dbCV.UploadedAt, + ArchiveBlobID: dbCV.ArchiveBlobID, + StatusTimestamps: dbCV.StatusTimestamps, + CreatedBy: dbCV.CreatedBy, + }, nil +} + +// UpdateConfigurationVersionStatus updates the status and optionally the uploaded timestamp +func (r *TFEConfigurationVersionRepository) UpdateConfigurationVersionStatus(ctx context.Context, cvID string, status string, uploadedAt *time.Time, archiveBlobID *string) error { + updateMap := map[string]interface{}{ + "status": status, + } + + if uploadedAt != nil { + updateMap["uploaded_at"] = *uploadedAt + } + + if archiveBlobID != nil { + updateMap["archive_blob_id"] = *archiveBlobID + } + + result := r.db.WithContext(ctx). + Model(&types.TFEConfigurationVersion{}). + Where("id = ?", cvID). + Updates(updateMap) + + if result.Error != nil { + return fmt.Errorf("failed to update configuration version status: %w", result.Error) + } + + if result.RowsAffected == 0 { + return fmt.Errorf("configuration version not found: %s", cvID) + } + + return nil +} + +// ListConfigurationVersionsForUnit retrieves configuration versions for a specific unit (workspace) +func (r *TFEConfigurationVersionRepository) ListConfigurationVersionsForUnit(ctx context.Context, unitID string, limit int) ([]*domain.TFEConfigurationVersion, error) { + var dbCVs []types.TFEConfigurationVersion + + query := r.db.WithContext(ctx).Where("unit_id = ?", unitID).Order("created_at DESC") + if limit > 0 { + query = query.Limit(limit) + } + + if err := query.Find(&dbCVs).Error; err != nil { + return nil, fmt.Errorf("failed to list configuration versions for unit: %w", err) + } + + cvs := make([]*domain.TFEConfigurationVersion, len(dbCVs)) + for i, dbCV := range dbCVs { + cvs[i] = &domain.TFEConfigurationVersion{ + ID: dbCV.ID, + OrgID: dbCV.OrgID, + UnitID: dbCV.UnitID, + CreatedAt: dbCV.CreatedAt, + UpdatedAt: dbCV.UpdatedAt, + Status: dbCV.Status, + Source: dbCV.Source, + Speculative: dbCV.Speculative, + AutoQueueRuns: dbCV.AutoQueueRuns, + Provisional: dbCV.Provisional, + Error: dbCV.Error, + ErrorMessage: dbCV.ErrorMessage, + UploadURL: dbCV.UploadURL, + UploadedAt: dbCV.UploadedAt, + ArchiveBlobID: dbCV.ArchiveBlobID, + StatusTimestamps: dbCV.StatusTimestamps, + CreatedBy: dbCV.CreatedBy, + } + } + + return cvs, nil +} + diff --git a/taco/internal/repositories/tfe_plan_repository.go b/taco/internal/repositories/tfe_plan_repository.go new file mode 100644 index 000000000..15fb8b5e6 --- /dev/null +++ b/taco/internal/repositories/tfe_plan_repository.go @@ -0,0 +1,165 @@ +package repositories + +import ( + "context" + "errors" + "fmt" + + "github.com/diggerhq/digger/opentaco/internal/domain" + "github.com/diggerhq/digger/opentaco/internal/query/types" + "gorm.io/gorm" +) + +// TFEPlanRepository manages TFE plans using GORM +type TFEPlanRepository struct { + db *gorm.DB +} + +// NewTFEPlanRepository creates a new TFE plan repository +func NewTFEPlanRepository(db *gorm.DB) *TFEPlanRepository { + return &TFEPlanRepository{db: db} +} + +// CreatePlan creates a new TFE plan +func (r *TFEPlanRepository) CreatePlan(ctx context.Context, plan *domain.TFEPlan) error { + dbPlan := &types.TFEPlan{ + ID: plan.ID, + OrgID: plan.OrgID, + RunID: plan.RunID, + Status: plan.Status, + ResourceAdditions: plan.ResourceAdditions, + ResourceChanges: plan.ResourceChanges, + ResourceDestructions: plan.ResourceDestructions, + HasChanges: plan.HasChanges, + LogBlobID: plan.LogBlobID, + LogReadURL: plan.LogReadURL, + PlanOutputBlobID: plan.PlanOutputBlobID, + PlanOutputJSON: plan.PlanOutputJSON, + CreatedBy: plan.CreatedBy, + } + + if err := r.db.WithContext(ctx).Create(dbPlan).Error; err != nil { + return fmt.Errorf("failed to create plan: %w", err) + } + + // Update the domain model with generated ID and timestamps + plan.ID = dbPlan.ID + plan.CreatedAt = dbPlan.CreatedAt + plan.UpdatedAt = dbPlan.UpdatedAt + + return nil +} + +// GetPlan retrieves a plan by ID +func (r *TFEPlanRepository) GetPlan(ctx context.Context, planID string) (*domain.TFEPlan, error) { + var dbPlan types.TFEPlan + + if err := r.db.WithContext(ctx).Where("id = ?", planID).First(&dbPlan).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, fmt.Errorf("plan not found: %s", planID) + } + return nil, fmt.Errorf("failed to get plan: %w", err) + } + + return &domain.TFEPlan{ + ID: dbPlan.ID, + OrgID: dbPlan.OrgID, + RunID: dbPlan.RunID, + CreatedAt: dbPlan.CreatedAt, + UpdatedAt: dbPlan.UpdatedAt, + Status: dbPlan.Status, + ResourceAdditions: dbPlan.ResourceAdditions, + ResourceChanges: dbPlan.ResourceChanges, + ResourceDestructions: dbPlan.ResourceDestructions, + HasChanges: dbPlan.HasChanges, + LogBlobID: dbPlan.LogBlobID, + LogReadURL: dbPlan.LogReadURL, + PlanOutputBlobID: dbPlan.PlanOutputBlobID, + PlanOutputJSON: dbPlan.PlanOutputJSON, + CreatedBy: dbPlan.CreatedBy, + }, nil +} + +// GetPlanByRunID retrieves a plan by run ID +func (r *TFEPlanRepository) GetPlanByRunID(ctx context.Context, runID string) (*domain.TFEPlan, error) { + var dbPlan types.TFEPlan + + if err := r.db.WithContext(ctx).Where("run_id = ?", runID).First(&dbPlan).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, fmt.Errorf("plan not found for run: %s", runID) + } + return nil, fmt.Errorf("failed to get plan by run ID: %w", err) + } + + return &domain.TFEPlan{ + ID: dbPlan.ID, + OrgID: dbPlan.OrgID, + RunID: dbPlan.RunID, + CreatedAt: dbPlan.CreatedAt, + UpdatedAt: dbPlan.UpdatedAt, + Status: dbPlan.Status, + ResourceAdditions: dbPlan.ResourceAdditions, + ResourceChanges: dbPlan.ResourceChanges, + ResourceDestructions: dbPlan.ResourceDestructions, + HasChanges: dbPlan.HasChanges, + LogBlobID: dbPlan.LogBlobID, + LogReadURL: dbPlan.LogReadURL, + PlanOutputBlobID: dbPlan.PlanOutputBlobID, + PlanOutputJSON: dbPlan.PlanOutputJSON, + CreatedBy: dbPlan.CreatedBy, + }, nil +} + +// UpdatePlan updates a plan with the provided fields +func (r *TFEPlanRepository) UpdatePlan(ctx context.Context, planID string, updates *domain.TFEPlanUpdate) error { + // Build update map dynamically based on non-nil fields + updateMap := make(map[string]interface{}) + + if updates.Status != nil { + updateMap["status"] = *updates.Status + } + if updates.ResourceAdditions != nil { + updateMap["resource_additions"] = *updates.ResourceAdditions + } + if updates.ResourceChanges != nil { + updateMap["resource_changes"] = *updates.ResourceChanges + } + if updates.ResourceDestructions != nil { + updateMap["resource_destructions"] = *updates.ResourceDestructions + } + if updates.HasChanges != nil { + updateMap["has_changes"] = *updates.HasChanges + } + if updates.LogBlobID != nil { + updateMap["log_blob_id"] = *updates.LogBlobID + } + if updates.LogReadURL != nil { + updateMap["log_read_url"] = *updates.LogReadURL + } + if updates.PlanOutputBlobID != nil { + updateMap["plan_output_blob_id"] = *updates.PlanOutputBlobID + } + if updates.PlanOutputJSON != nil { + updateMap["plan_output_json"] = *updates.PlanOutputJSON + } + + if len(updateMap) == 0 { + return nil // Nothing to update + } + + result := r.db.WithContext(ctx). + Model(&types.TFEPlan{}). + Where("id = ?", planID). + Updates(updateMap) + + if result.Error != nil { + return fmt.Errorf("failed to update plan: %w", result.Error) + } + + if result.RowsAffected == 0 { + return fmt.Errorf("plan not found: %s", planID) + } + + return nil +} + diff --git a/taco/internal/repositories/tfe_run_repository.go b/taco/internal/repositories/tfe_run_repository.go new file mode 100644 index 000000000..39fc2bb9b --- /dev/null +++ b/taco/internal/repositories/tfe_run_repository.go @@ -0,0 +1,204 @@ +package repositories + +import ( + "context" + "errors" + "fmt" + + "github.com/diggerhq/digger/opentaco/internal/domain" + "github.com/diggerhq/digger/opentaco/internal/query/types" + "gorm.io/gorm" +) + +// TFERunRepository manages TFE runs using GORM +type TFERunRepository struct { + db *gorm.DB +} + +// NewTFERunRepository creates a new TFE run repository +func NewTFERunRepository(db *gorm.DB) *TFERunRepository { + return &TFERunRepository{db: db} +} + +// CreateRun creates a new TFE run +func (r *TFERunRepository) CreateRun(ctx context.Context, run *domain.TFERun) error { + dbRun := &types.TFERun{ + ID: run.ID, + OrgID: run.OrgID, + UnitID: run.UnitID, + Status: run.Status, + IsDestroy: run.IsDestroy, + Message: run.Message, + PlanOnly: run.PlanOnly, + AutoApply: run.AutoApply, + Source: run.Source, + IsCancelable: run.IsCancelable, + CanApply: run.CanApply, + ConfigurationVersionID: run.ConfigurationVersionID, + PlanID: run.PlanID, + ApplyID: run.ApplyID, + ApplyLogBlobID: run.ApplyLogBlobID, + CreatedBy: run.CreatedBy, + } + + // Create the run - but GORM may not respect false for boolean fields with database defaults + if err := r.db.WithContext(ctx).Create(dbRun).Error; err != nil { + return fmt.Errorf("failed to create run: %w", err) + } + + // Explicitly update PlanOnly if it's false (workaround for database default constraint) + if !run.PlanOnly { + if err := r.db.WithContext(ctx).Model(&types.TFERun{}).Where("id = ?", dbRun.ID).Update("plan_only", false).Error; err != nil { + return fmt.Errorf("failed to update plan_only field: %w", err) + } + fmt.Printf("[CreateRun] Explicitly set PlanOnly=false for run %s\n", dbRun.ID) + } + + // Update the domain model with generated ID and timestamps + run.ID = dbRun.ID + run.CreatedAt = dbRun.CreatedAt + run.UpdatedAt = dbRun.UpdatedAt + + fmt.Printf("[CreateRun] Created run: ID=%s, PlanOnly=%v\n", run.ID, run.PlanOnly) + + return nil +} + +// GetRun retrieves a run by ID +func (r *TFERunRepository) GetRun(ctx context.Context, runID string) (*domain.TFERun, error) { + var dbRun types.TFERun + + if err := r.db.WithContext(ctx).Where("id = ?", runID).First(&dbRun).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, fmt.Errorf("run not found: %s", runID) + } + return nil, fmt.Errorf("failed to get run: %w", err) + } + + return &domain.TFERun{ + ID: dbRun.ID, + OrgID: dbRun.OrgID, + UnitID: dbRun.UnitID, + CreatedAt: dbRun.CreatedAt, + UpdatedAt: dbRun.UpdatedAt, + Status: dbRun.Status, + IsDestroy: dbRun.IsDestroy, + Message: dbRun.Message, + PlanOnly: dbRun.PlanOnly, + AutoApply: dbRun.AutoApply, + Source: dbRun.Source, + IsCancelable: dbRun.IsCancelable, + CanApply: dbRun.CanApply, + ConfigurationVersionID: dbRun.ConfigurationVersionID, + PlanID: dbRun.PlanID, + ApplyID: dbRun.ApplyID, + ApplyLogBlobID: dbRun.ApplyLogBlobID, + CreatedBy: dbRun.CreatedBy, + }, nil +} + +// ListRunsForUnit retrieves runs for a specific unit (workspace) +func (r *TFERunRepository) ListRunsForUnit(ctx context.Context, unitID string, limit int) ([]*domain.TFERun, error) { + var dbRuns []types.TFERun + + query := r.db.WithContext(ctx).Where("unit_id = ?", unitID).Order("created_at DESC") + if limit > 0 { + query = query.Limit(limit) + } + + if err := query.Find(&dbRuns).Error; err != nil { + return nil, fmt.Errorf("failed to list runs for unit: %w", err) + } + + runs := make([]*domain.TFERun, len(dbRuns)) + for i, dbRun := range dbRuns { + runs[i] = &domain.TFERun{ + ID: dbRun.ID, + OrgID: dbRun.OrgID, + UnitID: dbRun.UnitID, + CreatedAt: dbRun.CreatedAt, + UpdatedAt: dbRun.UpdatedAt, + Status: dbRun.Status, + IsDestroy: dbRun.IsDestroy, + Message: dbRun.Message, + PlanOnly: dbRun.PlanOnly, + Source: dbRun.Source, + IsCancelable: dbRun.IsCancelable, + CanApply: dbRun.CanApply, + ConfigurationVersionID: dbRun.ConfigurationVersionID, + PlanID: dbRun.PlanID, + ApplyID: dbRun.ApplyID, + CreatedBy: dbRun.CreatedBy, + } + } + + return runs, nil +} + +// UpdateRunStatus updates the status of a run +func (r *TFERunRepository) UpdateRunStatus(ctx context.Context, runID string, status string) error { + fmt.Printf("[UpdateRunStatus] Attempting to update run %s to status '%s'\n", runID, status) + + result := r.db.WithContext(ctx). + Model(&types.TFERun{}). + Where("id = ?", runID). + Update("status", status) + + if result.Error != nil { + fmt.Printf("[UpdateRunStatus] ERROR: DB error for run %s: %v\n", runID, result.Error) + return fmt.Errorf("failed to update run status: %w", result.Error) + } + + if result.RowsAffected == 0 { + fmt.Printf("[UpdateRunStatus] ERROR: No rows affected for run %s (not found)\n", runID) + return fmt.Errorf("run not found: %s", runID) + } + + fmt.Printf("[UpdateRunStatus] ✅ Successfully updated run %s to '%s' (%d rows affected)\n", runID, status, result.RowsAffected) + return nil +} + +// UpdateRunPlanID updates the plan ID of a run +func (r *TFERunRepository) UpdateRunPlanID(ctx context.Context, runID string, planID string) error { + result := r.db.WithContext(ctx). + Model(&types.TFERun{}). + Where("id = ?", runID). + Update("plan_id", planID) + + if result.Error != nil { + return fmt.Errorf("failed to update run plan ID: %w", result.Error) + } + + if result.RowsAffected == 0 { + return fmt.Errorf("run not found: %s", runID) + } + + return nil +} + +// UpdateRunStatusAndCanApply updates both status and can_apply fields +func (r *TFERunRepository) UpdateRunStatusAndCanApply(ctx context.Context, runID string, status string, canApply bool) error { + fmt.Printf("[UpdateRunStatusAndCanApply] Updating run %s to status='%s', canApply=%v\n", runID, status, canApply) + + result := r.db.WithContext(ctx). + Model(&types.TFERun{}). + Where("id = ?", runID). + Updates(map[string]interface{}{ + "status": status, + "can_apply": canApply, + }) + + if result.Error != nil { + fmt.Printf("[UpdateRunStatusAndCanApply] ERROR: %v\n", result.Error) + return fmt.Errorf("failed to update run: %w", result.Error) + } + + if result.RowsAffected == 0 { + fmt.Printf("[UpdateRunStatusAndCanApply] ERROR: Run not found\n") + return fmt.Errorf("run not found: %s", runID) + } + + fmt.Printf("[UpdateRunStatusAndCanApply] ✅ Updated run %s\n", runID) + return nil +} + diff --git a/taco/internal/storage/interface.go b/taco/internal/storage/interface.go index fe1188969..696809df6 100644 --- a/taco/internal/storage/interface.go +++ b/taco/internal/storage/interface.go @@ -49,6 +49,7 @@ type UnitStore interface { // Data operations Download(ctx context.Context, id string) ([]byte, error) Upload(ctx context.Context, id string, data []byte, lockID string) error + UploadBlob(ctx context.Context, key string, data []byte) error // For non-state files (no lock checks) // Lock operations Lock(ctx context.Context, id string, info *LockInfo) error diff --git a/taco/internal/storage/memstore.go b/taco/internal/storage/memstore.go index 5d5b69425..228346e85 100644 --- a/taco/internal/storage/memstore.go +++ b/taco/internal/storage/memstore.go @@ -127,6 +127,26 @@ func (m *memStore) Download(ctx context.Context, id string) ([]byte, error) { return content, nil } +// UploadBlob uploads arbitrary data (config archives, logs, etc.) without lock checks. +// Use this for non-state files. For state files, use Upload() which includes lock checking. +func (m *memStore) UploadBlob(ctx context.Context, key string, data []byte) error { + m.mu.Lock() + defer m.mu.Unlock() + + // Store as a simple blob (no versioning, no lock checks) + m.units[key] = &unitData{ + metadata: &UnitMetadata{ + ID: key, + Size: int64(len(data)), + Updated: time.Now(), + Locked: false, + }, + content: data, + versions: nil, + } + return nil +} + func (m *memStore) Upload(ctx context.Context, id string, data []byte, lockID string) error { m.mu.Lock() defer m.mu.Unlock() diff --git a/taco/internal/storage/s3store.go b/taco/internal/storage/s3store.go index ad628ad18..7d34ffde1 100644 --- a/taco/internal/storage/s3store.go +++ b/taco/internal/storage/s3store.go @@ -292,6 +292,26 @@ func (s *s3Store) Download(ctx context.Context, id string) ([]byte, error) { return io.ReadAll(out.Body) } +// UploadBlob uploads arbitrary data (config archives, logs, etc.) without lock checks or versioning. +// Use this for non-state files. For state files, use Upload() which includes lock checking. +func (s *s3Store) UploadBlob(ctx context.Context, key string, data []byte) error { + fmt.Printf("[S3Store.UploadBlob] START - key=%s, dataLen=%d\n", key, len(data)) + + fullKey := s.objKey(key) + if _, err := s.client.PutObject(ctx, &s3.PutObjectInput{ + Bucket: &s.bucket, + Key: aws.String(fullKey), + Body: bytes.NewReader(data), + ContentType: aws.String("application/octet-stream"), + }); err != nil { + fmt.Printf("[S3Store.UploadBlob] Upload failed: %v\n", err) + return err + } + + fmt.Printf("[S3Store.UploadBlob] SUCCESS: Blob uploaded to %s\n", fullKey) + return nil +} + func (s *s3Store) Upload(ctx context.Context, id string, data []byte, lockID string) error { fmt.Printf("[S3Store.Upload] START - id=%s, dataLen=%d, lockID=%s\n", id, len(data), lockID) diff --git a/taco/internal/tfe/apply.go b/taco/internal/tfe/apply.go new file mode 100644 index 000000000..7c73035d1 --- /dev/null +++ b/taco/internal/tfe/apply.go @@ -0,0 +1,131 @@ +package tfe + +import ( + "fmt" + "net/http" + "os" + "strconv" + "time" + + "github.com/diggerhq/digger/opentaco/internal/auth" + "github.com/diggerhq/digger/opentaco/internal/domain/tfe" + "github.com/google/jsonapi" + "github.com/labstack/echo/v4" +) + +// GetApply returns details about a specific apply +func (h *TfeHandler) GetApply(c echo.Context) error { + ctx := c.Request().Context() + applyID := c.Param("id") + + // Get the apply from database (for now, we derive apply from run) + // In future, we could have a separate TFEApply table + run, err := h.runRepo.GetRun(ctx, applyID) // Using apply ID as run ID for simplicity + if err != nil { + return c.JSON(http.StatusNotFound, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "404", + "title": "not found", + "detail": fmt.Sprintf("Apply %s not found", applyID), + }}, + }) + } + + publicBase := os.Getenv("OPENTACO_PUBLIC_BASE_URL") + if publicBase == "" { + return fmt.Errorf("OPENTACO_PUBLIC_BASE_URL environment variable not set") + } + + // Generate signed token for apply log streaming + logToken, err := auth.GenerateLogStreamToken(applyID, 24*time.Hour) + if err != nil { + return c.JSON(http.StatusInternalServerError, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "500", + "title": "internal error", + "detail": "Failed to generate log token", + }}, + }) + } + logsurl := fmt.Sprintf("%s/tfe/api/v2/applies/%s/logs/%s", publicBase, applyID, logToken) + + // Determine apply status based on run status + applyStatus := "pending" + switch run.Status { + case "applying": + applyStatus = "running" + case "applied": + applyStatus = "finished" + case "errored": + applyStatus = "errored" + } + + response := tfe.ApplyRecord{ + ID: applyID, + Status: applyStatus, + LogReadURL: logsurl, + Run: &tfe.RunRef{ + ID: run.ID, + }, + } + + c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") + c.Response().WriteHeader(http.StatusOK) + + if err := jsonapi.MarshalPayload(c.Response().Writer, &response); err != nil { + fmt.Printf("error marshaling apply payload: %v\n", err) + return err + } + return nil +} + +// GetApplyLogs streams apply logs to the Terraform CLI +func (h *TfeHandler) GetApplyLogs(c echo.Context) error { + ctx := c.Request().Context() + applyID := c.Param("applyID") + logToken := c.Param("token") + + // Verify the log streaming token + if !auth.VerifyLogStreamToken(logToken, applyID) { + fmt.Printf("Invalid log stream token for apply %s\n", applyID) + return c.JSON(http.StatusUnauthorized, map[string]string{"error": "invalid or expired log token"}) + } + + offset := c.QueryParam("offset") + offsetInt, _ := strconv.ParseInt(offset, 10, 64) + + // Get run (apply ID is the same as run ID in our simplified model) + run, err := h.runRepo.GetRun(ctx, applyID) + if err != nil { + return c.JSON(http.StatusNotFound, map[string]string{"error": "apply not found"}) + } + + // Try to get apply logs from blob storage + var logText string + applyLogBlobID := fmt.Sprintf("runs/%s/apply-logs.txt", run.ID) + + logData, err := h.blobStore.Download(ctx, applyLogBlobID) + if err == nil { + logText = string(logData) + } else { + // If logs don't exist yet, return placeholder + if run.Status == "applying" || run.Status == "apply_queued" { + logText = "Waiting for apply to start...\n" + } else { + logText = "Apply logs not available\n" + } + } + + // Handle offset for streaming + if offsetInt > 0 && offsetInt < int64(len(logText)) { + logText = logText[offsetInt:] + } else if offsetInt >= int64(len(logText)) { + logText = "" + } + + c.Response().Header().Set("Content-Type", "text/plain") + c.Response().WriteHeader(http.StatusOK) + _, err = c.Response().Write([]byte(logText)) + return err +} + diff --git a/taco/internal/tfe/apply_executor.go b/taco/internal/tfe/apply_executor.go new file mode 100644 index 000000000..fad6c65c0 --- /dev/null +++ b/taco/internal/tfe/apply_executor.go @@ -0,0 +1,200 @@ +package tfe + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/diggerhq/digger/opentaco/internal/domain" + "github.com/diggerhq/digger/opentaco/internal/storage" +) + +// ApplyExecutor handles real Terraform apply execution +type ApplyExecutor struct { + runRepo domain.TFERunRepository + planRepo domain.TFEPlanRepository + configVerRepo domain.TFEConfigurationVersionRepository + blobStore storage.UnitStore +} + +// NewApplyExecutor creates a new apply executor +func NewApplyExecutor( + runRepo domain.TFERunRepository, + planRepo domain.TFEPlanRepository, + configVerRepo domain.TFEConfigurationVersionRepository, + blobStore storage.UnitStore, +) *ApplyExecutor { + return &ApplyExecutor{ + runRepo: runRepo, + planRepo: planRepo, + configVerRepo: configVerRepo, + blobStore: blobStore, + } +} + +// ExecuteApply executes a Terraform apply for a run +func (e *ApplyExecutor) ExecuteApply(ctx context.Context, runID string) error { + fmt.Printf("Starting apply execution for run %s\n", runID) + + // Get run + run, err := e.runRepo.GetRun(ctx, runID) + if err != nil { + return fmt.Errorf("failed to get run: %w", err) + } + + // Check if run can be applied + if run.Status != "planned_and_finished" && run.Status != "apply_queued" { + return fmt.Errorf("run cannot be applied in status: %s", run.Status) + } + + // Update run status to "applying" + if err := e.runRepo.UpdateRunStatus(ctx, runID, "applying"); err != nil { + return fmt.Errorf("failed to update run status: %w", err) + } + + // Get configuration version + configVer, err := e.configVerRepo.GetConfigurationVersion(ctx, run.ConfigurationVersionID) + if err != nil { + return fmt.Errorf("failed to get configuration version: %w", err) + } + + // Download configuration archive + archivePath := fmt.Sprintf("config-versions/%s/archive.tar.gz", configVer.ID) + archiveData, err := e.blobStore.Download(ctx, archivePath) + if err != nil { + return e.handleApplyError(ctx, run.ID, fmt.Sprintf("Failed to download archive: %v", err)) + } + + // Extract to temp directory + workDir, err := extractArchive(archiveData) + if err != nil { + return e.handleApplyError(ctx, run.ID, fmt.Sprintf("Failed to extract archive: %v", err)) + } + defer cleanupWorkDir(workDir) + + fmt.Printf("Extracted archive to %s for apply\n", workDir) + + // Remove cloud/backend configuration to prevent circular dependencies + if err := createBackendOverride(workDir); err != nil { + return e.handleApplyError(ctx, run.ID, fmt.Sprintf("Failed to remove backend configuration: %v", err)) + } + + // Download current state for this unit (must exist before apply) + // Construct org-scoped state ID: / + stateID := fmt.Sprintf("%s/%s", run.OrgID, run.UnitID) + stateData, err := e.blobStore.Download(ctx, stateID) + if err != nil { + fmt.Printf("Warning: Failed to download state for %s: %v\n", stateID, err) + // Continue anyway - might be a fresh deployment + } else { + // Write state to terraform.tfstate in the working directory + statePath := filepath.Join(workDir, "terraform.tfstate") + if err := os.WriteFile(statePath, stateData, 0644); err != nil { + return e.handleApplyError(ctx, run.ID, fmt.Sprintf("Failed to write state file: %v", err)) + } + fmt.Printf("Downloaded and wrote existing state for %s (%d bytes)\n", stateID, len(stateData)) + } + + // Run terraform apply + logs, err := e.runTerraformApply(ctx, workDir, run.IsDestroy) + + // Store apply logs in blob storage (use UploadBlob - no lock checks needed for logs) + applyLogBlobID := fmt.Sprintf("runs/%s/apply-logs.txt", run.ID) + if storeErr := e.blobStore.UploadBlob(ctx, applyLogBlobID, []byte(logs)); storeErr != nil { + fmt.Printf("Failed to store apply logs: %v\n", storeErr) + } + + // Update run status + runStatus := "applied" + if err != nil { + runStatus = "errored" + logs = logs + "\n\nError: " + err.Error() + // Store error logs even on failure + _ = e.blobStore.UploadBlob(ctx, applyLogBlobID, []byte(logs)) + } else { + // Upload the updated state back to storage after successful apply + // Construct org-scoped state ID: / + stateID := fmt.Sprintf("%s/%s", run.OrgID, run.UnitID) + statePath := filepath.Join(workDir, "terraform.tfstate") + newStateData, readErr := os.ReadFile(statePath) + if readErr != nil { + fmt.Printf("Warning: Failed to read updated state file: %v\n", readErr) + } else { + // Upload state with empty lock ID (state is already locked during apply) + if uploadErr := e.blobStore.Upload(ctx, stateID, newStateData, ""); uploadErr != nil { + fmt.Printf("ERROR: Failed to upload updated state for %s: %v\n", stateID, uploadErr) + // This is critical - mark as errored + runStatus = "errored" + logs = logs + fmt.Sprintf("\n\nCritical Error: Failed to upload state: %v\n", uploadErr) + } else { + fmt.Printf("Successfully uploaded updated state for %s (%d bytes)\n", stateID, len(newStateData)) + } + } + } + + if err := e.runRepo.UpdateRunStatus(ctx, run.ID, runStatus); err != nil { + return fmt.Errorf("failed to update run status: %w", err) + } + + fmt.Printf("Apply execution completed for run %s: status=%s\n", runID, runStatus) + + if err != nil { + return fmt.Errorf("apply failed: %w", err) + } + + return nil +} + +// runTerraformApply executes terraform init and apply +func (e *ApplyExecutor) runTerraformApply(ctx context.Context, workDir string, isDestroy bool) (logs string, err error) { + var allLogs strings.Builder + + // Run terraform init (cloud/backend config already removed by createBackendOverride) + fmt.Printf("Running terraform init in %s\n", workDir) + initCmd := exec.CommandContext(ctx, "terraform", "init", "-no-color", "-input=false") + initCmd.Dir = workDir + initCmd.Env = append(os.Environ(), "TF_IN_AUTOMATION=1") + initOutput, initErr := initCmd.CombinedOutput() + allLogs.WriteString("=== Terraform Init ===\n") + allLogs.Write(initOutput) + allLogs.WriteString("\n\n") + + if initErr != nil { + return allLogs.String(), fmt.Errorf("terraform init failed: %w", initErr) + } + + // Run terraform apply + fmt.Printf("Running terraform apply in %s\n", workDir) + applyArgs := []string{"apply", "-no-color", "-input=false", "-auto-approve"} + if isDestroy { + applyArgs = []string{"destroy", "-no-color", "-input=false", "-auto-approve"} + } + + applyCmd := exec.CommandContext(ctx, "terraform", applyArgs...) + applyCmd.Dir = workDir + applyCmd.Env = append(os.Environ(), "TF_IN_AUTOMATION=1") + applyOutput, applyErr := applyCmd.CombinedOutput() + allLogs.WriteString("=== Terraform Apply ===\n") + allLogs.Write(applyOutput) + allLogs.WriteString("\n") + + if applyErr != nil { + return allLogs.String(), fmt.Errorf("terraform apply failed: %w", applyErr) + } + + return allLogs.String(), nil +} + +// handleApplyError handles apply execution errors +func (e *ApplyExecutor) handleApplyError(ctx context.Context, runID string, errorMsg string) error { + fmt.Printf("Apply error for run %s: %s\n", runID, errorMsg) + + // Update run status + _ = e.runRepo.UpdateRunStatus(ctx, runID, "errored") + + return fmt.Errorf("apply execution failed: %s", errorMsg) +} + diff --git a/taco/internal/tfe/configuration-versions.go b/taco/internal/tfe/configuration-versions.go index c0e9d770c..ea32317ff 100644 --- a/taco/internal/tfe/configuration-versions.go +++ b/taco/internal/tfe/configuration-versions.go @@ -3,40 +3,51 @@ package tfe import ( "fmt" "io" - "log/slog" "net/http" "os" "time" "github.com/diggerhq/digger/opentaco/internal/auth" + "github.com/diggerhq/digger/opentaco/internal/domain" "github.com/diggerhq/digger/opentaco/internal/domain/tfe" "github.com/google/jsonapi" "github.com/labstack/echo/v4" ) func (h *TfeHandler) GetConfigurationVersion(c echo.Context) error { + ctx := c.Request().Context() cvID := c.Param("id") - // In a real impl you'd look this up from memory/db. For now assume we stored: - // - that cvID exists - // - upload already happened - // We'll fake a timestamp. - uploadedAt := time.Now().UTC().Format(time.RFC3339) + // Get configuration version from database + configVer, err := h.configVerRepo.GetConfigurationVersion(ctx, cvID) + if err != nil { + fmt.Printf("Failed to get configuration version %s: %v\n", cvID, err) + return c.JSON(http.StatusNotFound, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "404", + "title": "not found", + "detail": fmt.Sprintf("Configuration version %s not found", cvID), + }}, + }) + } + + // Parse status timestamps + statusTimestamps := make(map[string]string) + if configVer.UploadedAt != nil { + statusTimestamps["uploaded-at"] = configVer.UploadedAt.UTC().Format(time.RFC3339) + } cv := tfe.ConfigurationVersionRecord{ - ID: cvID, - AutoQueueRuns: false, - Error: nil, - ErrorMessage: nil, - Source: "cli", - Speculative: true, - Status: "uploaded", // <-- important change - StatusTimestamps: map[string]string{ - "uploaded-at": uploadedAt, - }, - UploadURL: nil, // <-- becomes null in JSON now - Provisional: false, - IngressAttributes: nil, // emit relationships.ingress-attributes.data = null + ID: cvID, + AutoQueueRuns: configVer.AutoQueueRuns, + Error: configVer.Error, + ErrorMessage: configVer.ErrorMessage, + Source: configVer.Source, + Speculative: configVer.Speculative, + Status: configVer.Status, + StatusTimestamps: statusTimestamps, + UploadURL: configVer.UploadURL, + Provisional: configVer.Provisional, } c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") @@ -49,28 +60,82 @@ func (h *TfeHandler) GetConfigurationVersion(c echo.Context) error { } func (h *TfeHandler) CreateConfigurationVersions(c echo.Context) error { - cvId := "cv-1234567890" + ctx := c.Request().Context() + workspaceName := c.Param("workspace_name") + + // Get org and user context + orgIdentifier, _ := c.Get("organization_id").(string) + userID, _ := c.Get("user_id").(string) + + if orgIdentifier == "" { + orgIdentifier = "default-org" + } + if userID == "" { + userID = "system" + } + + // Resolve external org ID to UUID (needed for S3 paths) + orgUUID, err := h.identifierResolver.ResolveOrganization(ctx, orgIdentifier) + if err != nil { + fmt.Printf("Failed to resolve organization %s: %v\n", orgIdentifier, err) + return c.JSON(http.StatusInternalServerError, map[string]string{ + "error": fmt.Sprintf("Failed to resolve organization: %v", err), + }) + } + + // Strip ws- prefix if present to get the actual unit ID + unitID := convertWorkspaceToStateID(workspaceName) + publicBase := os.Getenv("OPENTACO_PUBLIC_BASE_URL") if publicBase == "" { - slog.Error("OPENTACO_PUBLIC_BASE_URL environment variable not set") - return fmt.Errorf("OPENTACO_PUBLIC_BASE_URL environment variable not set") + publicBase = "http://localhost:8080" // Fallback for testing } - signedUploadUrl, err := auth.SignURL(publicBase, fmt.Sprintf("/tfe/api/v2/configuration-versions/%v/upload", cvId), time.Now().Add(2*time.Minute)) + + // Create configuration version in database + configVer := &domain.TFEConfigurationVersion{ + OrgID: orgUUID, // Store UUID, not external ID! + UnitID: unitID, + Status: "pending", + Source: "cli", + Speculative: true, + AutoQueueRuns: false, + Provisional: false, + StatusTimestamps: "{}", + CreatedBy: userID, + } + + if err := h.configVerRepo.CreateConfigurationVersion(ctx, configVer); err != nil { + fmt.Printf("Failed to create configuration version: %v\n", err) + return c.JSON(http.StatusInternalServerError, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "500", + "title": "internal error", + "detail": "Failed to create configuration version", + }}, + }) + } + + fmt.Printf("Created configuration version %s for unit %s\n", configVer.ID, unitID) + + // Generate signed upload URL + signedUploadUrl, err := auth.SignURL(publicBase, fmt.Sprintf("/tfe/api/v2/configuration-versions/%v/upload", configVer.ID), time.Now().Add(2*time.Minute)) if err != nil { return err } + + fmt.Printf("DEBUG Generated upload URL: %s\n", signedUploadUrl) + cv := tfe.ConfigurationVersionRecord{ - ID: cvId, - AutoQueueRuns: false, // you can choose true/false; docs default true + ID: configVer.ID, + AutoQueueRuns: configVer.AutoQueueRuns, Error: nil, ErrorMessage: nil, - Source: "cli", // HashiCorp examples show "tfe-api" or "gitlab"; "cli" is fine for CLI-driven runs. - Speculative: true, // for terraform plan in remote mode - Status: "pending", // initial status according to docs + Source: configVer.Source, + Speculative: configVer.Speculative, + Status: configVer.Status, StatusTimestamps: map[string]string{}, UploadURL: &signedUploadUrl, - Provisional: false, - IngressAttributes: nil, + Provisional: configVer.Provisional, } c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") @@ -86,14 +151,41 @@ func (h *TfeHandler) CreateConfigurationVersions(c echo.Context) error { func (h *TfeHandler) UploadConfigurationArchive(c echo.Context) error { - configVersionID := c.Param("configVersionID") + ctx := c.Request().Context() + configVersionID := c.Param("id") body, err := io.ReadAll(c.Request().Body) if err != nil { return c.NoContent(http.StatusBadRequest) } - fmt.Printf("received %d bytes for %s\n", len(body), configVersionID) + if len(body) == 0 { + return c.JSON(http.StatusBadRequest, map[string]string{"error": "empty archive"}) + } + + fmt.Printf("Received %d bytes for configuration version %s\n", len(body), configVersionID) + + // Get configuration version from database + _, err = h.configVerRepo.GetConfigurationVersion(ctx, configVersionID) + if err != nil { + return c.JSON(http.StatusNotFound, map[string]string{"error": "configuration version not found"}) + } + + // Store archive in blob storage (use UploadBlob - no lock checks needed for archives) + archiveBlobID := fmt.Sprintf("config-versions/%s/archive.tar.gz", configVersionID) + if err := h.blobStore.UploadBlob(ctx, archiveBlobID, body); err != nil { + fmt.Printf("Failed to store archive: %v\n", err) + return c.JSON(http.StatusInternalServerError, map[string]string{"error": "failed to store archive"}) + } + + // Update configuration version status to uploaded AND set the archive blob ID + uploadedAt := time.Now() + if err := h.configVerRepo.UpdateConfigurationVersionStatus(ctx, configVersionID, "uploaded", &uploadedAt, &archiveBlobID); err != nil { + fmt.Printf("Failed to update configuration version status: %v\n", err) + return c.JSON(http.StatusInternalServerError, map[string]string{"error": "failed to update status"}) + } + + fmt.Printf("Successfully uploaded and stored archive for configuration version %s (blob: %s)\n", configVersionID, archiveBlobID) // 200 OK, empty body. Terraform does not expect JSON here. return c.NoContent(http.StatusOK) diff --git a/taco/internal/tfe/plan.go b/taco/internal/tfe/plan.go index f9fa29709..104c11400 100644 --- a/taco/internal/tfe/plan.go +++ b/taco/internal/tfe/plan.go @@ -6,17 +6,31 @@ import ( "net/http" "os" "strconv" + "time" + "github.com/diggerhq/digger/opentaco/internal/auth" + "github.com/diggerhq/digger/opentaco/internal/domain" "github.com/diggerhq/digger/opentaco/internal/domain/tfe" "github.com/google/jsonapi" "github.com/labstack/echo/v4" ) func (h *TfeHandler) GetPlan(c echo.Context) error { + ctx := c.Request().Context() planID := c.Param("id") - // This has to match whatever runID you created in POST /runs - runID := "run-abc123" + // Get plan from database + plan, err := h.planRepo.GetPlan(ctx, planID) + if err != nil { + fmt.Printf("Failed to get plan %s: %v\n", planID, err) + return c.JSON(http.StatusNotFound, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "404", + "title": "not found", + "detail": fmt.Sprintf("Plan %s not found", planID), + }}, + }) + } publicBase := os.Getenv("OPENTACO_PUBLIC_BASE_URL") if publicBase == "" { @@ -24,27 +38,40 @@ func (h *TfeHandler) GetPlan(c echo.Context) error { return fmt.Errorf("OPENTACO_PUBLIC_BASE_URL environment variable not set") } - - blobID := "a-really-long-string-for-blob-id-secret" - logsurl := fmt.Sprintf("%v/tfe/api/v2/plans/%s/logs/%s", publicBase, planID, blobID) - - plan := tfe.PlanRecord{ - ID: planID, - Status: "finished", - ResourceAdditions: 0, - ResourceChanges: 0, - ResourceDestructions: 0, - HasChanges: false, + // Generate signed token for log streaming (embedded in path, not query string) + // This is secure because: token is time-limited, HMAC-signed, and in the path + // (Terraform CLI strips query params but preserves path) + // 24-hour validity to support long-running plans + logToken, err := auth.GenerateLogStreamToken(planID, 24*time.Hour) + if err != nil { + fmt.Printf("Failed to generate log token: %v\n", err) + return c.JSON(http.StatusInternalServerError, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "500", + "title": "internal error", + "detail": "Failed to generate log token", + }}, + }) + } + logsurl := fmt.Sprintf("%s/tfe/api/v2/plans/%s/logs/%s", publicBase, planID, logToken) + + response := tfe.PlanRecord{ + ID: plan.ID, + Status: plan.Status, + ResourceAdditions: plan.ResourceAdditions, + ResourceChanges: plan.ResourceChanges, + ResourceDestructions: plan.ResourceDestructions, + HasChanges: plan.HasChanges, LogReadURL: logsurl, Run: &tfe.RunRef{ - ID: runID, + ID: plan.RunID, }, } c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") c.Response().WriteHeader(http.StatusOK) - if err := jsonapi.MarshalPayload(c.Response().Writer, &plan); err != nil { + if err := jsonapi.MarshalPayload(c.Response().Writer, &response); err != nil { fmt.Printf("error marshaling plan payload: %v\n", err) return err } @@ -52,37 +79,66 @@ func (h *TfeHandler) GetPlan(c echo.Context) error { } func (h *TfeHandler) GetPlanLogs(c echo.Context) error { - //TODO: verify the blobID from DB and ensure that it belongs to the right planID - //blobID := c.Param("blobID") + ctx := c.Request().Context() + planID := c.Param("planID") + logToken := c.Param("token") + + // Verify the log streaming token + if !auth.VerifyLogStreamToken(logToken, planID) { + fmt.Printf("Invalid log stream token for plan %s\n", planID) + return c.JSON(http.StatusUnauthorized, map[string]string{"error": "invalid or expired log token"}) + } offset := c.QueryParam("offset") - offsetInt , _ := strconv.ParseInt(offset, 10, 64) + offsetInt, _ := strconv.ParseInt(offset, 10, 64) + + // Get plan from database + plan, err := h.planRepo.GetPlan(ctx, planID) + if err != nil { + return c.JSON(http.StatusNotFound, map[string]string{"error": "plan not found"}) + } + + // Check if logs exist in blob storage + var logText string + if plan.LogBlobID != nil { + // Try to get logs from blob storage + logData, err := h.blobStore.Download(ctx, *plan.LogBlobID) + if err != nil { + fmt.Printf("Failed to get logs from blob storage: %v\n", err) + // Fall back to default logs + logText = generateDefaultPlanLogs(plan) + } else { + logText = string(logData) + } + } else { + // Generate default logs based on plan status + logText = generateDefaultPlanLogs(plan) + } + + // Handle offset for streaming (TFE streams logs incrementally) + if offsetInt > 0 && offsetInt < int64(len(logText)) { + logText = logText[offsetInt:] + } else if offsetInt >= int64(len(logText)) { + logText = "" // No new data + } c.Response().Header().Set(echo.HeaderContentType, "text/plain") c.Response().WriteHeader(http.StatusOK) - // Minimal realistic Terraform plan text for "no changes" - logText := `Terraform used the selected providers to generate the following execution plan. + _, err = c.Response().Writer.Write([]byte(logText)) + return err +} + +func generateDefaultPlanLogs(plan *domain.TFEPlan) string { + return fmt.Sprintf(`Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + create - destroy ~ update in-place -No changes. Your infrastructure matches the configuration. - -Plan: 0 to add, 0 to change, 0 to destroy. - -NOTE: This streamed logs are FAKEEEE -` +Plan: %d to add, %d to change, %d to destroy. - if offsetInt > 100 { - logText = "" - } - - _, err := c.Response().Writer.Write([]byte(logText)) - if err != nil { - return err - } - return nil +Status: %s +`, plan.ResourceAdditions, plan.ResourceChanges, plan.ResourceDestructions, plan.Status) } diff --git a/taco/internal/tfe/plan_executor.go b/taco/internal/tfe/plan_executor.go new file mode 100644 index 000000000..8bfedcf65 --- /dev/null +++ b/taco/internal/tfe/plan_executor.go @@ -0,0 +1,424 @@ +package tfe + +import ( + "archive/tar" + "compress/gzip" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + + "github.com/diggerhq/digger/opentaco/internal/domain" + "github.com/diggerhq/digger/opentaco/internal/storage" +) + +// PlanExecutor handles real Terraform plan execution +type PlanExecutor struct { + runRepo domain.TFERunRepository + planRepo domain.TFEPlanRepository + configVerRepo domain.TFEConfigurationVersionRepository + blobStore storage.UnitStore +} + +// NewPlanExecutor creates a new plan executor +func NewPlanExecutor( + runRepo domain.TFERunRepository, + planRepo domain.TFEPlanRepository, + configVerRepo domain.TFEConfigurationVersionRepository, + blobStore storage.UnitStore, +) *PlanExecutor { + return &PlanExecutor{ + runRepo: runRepo, + planRepo: planRepo, + configVerRepo: configVerRepo, + blobStore: blobStore, + } +} + +// ExecutePlan executes a Terraform plan for a run +func (e *PlanExecutor) ExecutePlan(ctx context.Context, runID string) error { + fmt.Printf("[ExecutePlan] === STARTING FOR RUN %s ===\n", runID) + + // Get run + run, err := e.runRepo.GetRun(ctx, runID) + if err != nil { + fmt.Printf("[ExecutePlan] ERROR: Failed to get run: %v\n", err) + return fmt.Errorf("failed to get run: %w", err) + } + fmt.Printf("[ExecutePlan] Got run, configVersionID=%s\n", run.ConfigurationVersionID) + + // Update run status to "planning" + if err := e.runRepo.UpdateRunStatus(ctx, runID, "planning"); err != nil { + fmt.Printf("[ExecutePlan] ERROR: Failed to update status to planning: %v\n", err) + return fmt.Errorf("failed to update run status: %w", err) + } + fmt.Printf("[ExecutePlan] Updated run status to 'planning'\n") + + // Get configuration version + configVer, err := e.configVerRepo.GetConfigurationVersion(ctx, run.ConfigurationVersionID) + if err != nil { + return fmt.Errorf("failed to get configuration version: %w", err) + } + + // Check if configuration was uploaded + if configVer.Status != "uploaded" || configVer.ArchiveBlobID == nil { + return e.handlePlanError(ctx, run.ID, run.PlanID, "Configuration not uploaded") + } + + // Download configuration archive from blob storage + archivePath := fmt.Sprintf("config-versions/%s/archive.tar.gz", configVer.ID) + archiveData, err := e.blobStore.Download(ctx, archivePath) + if err != nil { + return e.handlePlanError(ctx, run.ID, run.PlanID, fmt.Sprintf("Failed to download archive: %v", err)) + } + + fmt.Printf("Downloaded %d bytes for configuration version %s\n", len(archiveData), configVer.ID) + + // Extract to temp directory + workDir, err := extractArchive(archiveData) + if err != nil { + return e.handlePlanError(ctx, run.ID, run.PlanID, fmt.Sprintf("Failed to extract archive: %v", err)) + } + defer cleanupWorkDir(workDir) + + fmt.Printf("Extracted archive to %s\n", workDir) + + // Create an override file to disable cloud/remote backend + // This is required for server-side execution to prevent circular dependencies + if err := createBackendOverride(workDir); err != nil { + return e.handlePlanError(ctx, run.ID, run.PlanID, fmt.Sprintf("Failed to create backend override: %v", err)) + } + + // Download current state for this unit (if it exists) + // Construct org-scoped state ID: / + stateID := fmt.Sprintf("%s/%s", run.OrgID, run.UnitID) + stateData, err := e.blobStore.Download(ctx, stateID) + if err == nil { + // Write state to terraform.tfstate in the working directory + statePath := filepath.Join(workDir, "terraform.tfstate") + if err := os.WriteFile(statePath, stateData, 0644); err != nil { + fmt.Printf("Warning: Failed to write state file: %v\n", err) + } else { + fmt.Printf("Downloaded and wrote existing state for %s (%d bytes)\n", stateID, len(stateData)) + } + } else { + fmt.Printf("No existing state found for %s (will start fresh): %v\n", stateID, err) + } + + // Run terraform plan + planOutput, logs, hasChanges, adds, changes, destroys, err := e.runTerraformPlan(ctx, workDir, run.IsDestroy) + + // Store logs in blob storage (use UploadBlob - no lock checks needed for logs) + logBlobID := fmt.Sprintf("plans/%s/logs.txt", *run.PlanID) + if err := e.blobStore.UploadBlob(ctx, logBlobID, []byte(logs)); err != nil { + fmt.Printf("Failed to store logs: %v\n", err) + } + + // Generate signed log URL + logReadURL := fmt.Sprintf("/tfe/api/v2/plans/%s/logs/logs", *run.PlanID) + + // Update plan with results + planStatus := "finished" + if err != nil { + planStatus = "errored" + logs = logs + "\n\nError: " + err.Error() + } + + planUpdates := &domain.TFEPlanUpdate{ + Status: &planStatus, + ResourceAdditions: &adds, + ResourceChanges: &changes, + ResourceDestructions: &destroys, + HasChanges: &hasChanges, + LogBlobID: &logBlobID, + LogReadURL: &logReadURL, + } + + // Store plan output if not too large + if len(planOutput) < 1024*1024 { // < 1MB + planUpdates.PlanOutputJSON = &planOutput + } + + if err := e.planRepo.UpdatePlan(ctx, *run.PlanID, planUpdates); err != nil { + return fmt.Errorf("failed to update plan: %w", err) + } + + // Update run status and can_apply + runStatus := "planned_and_finished" + canApply := (err == nil) // Can apply if plan succeeded (regardless of whether there are changes) + + if err != nil { + runStatus = "errored" + } + + fmt.Printf("[ExecutePlan] Updating run %s status to '%s', canApply=%v\n", run.ID, runStatus, canApply) + if err := e.runRepo.UpdateRunStatusAndCanApply(ctx, run.ID, runStatus, canApply); err != nil { + fmt.Printf("[ExecutePlan] ERROR: Failed to update run: %v\n", err) + return fmt.Errorf("failed to update run: %w", err) + } + + fmt.Printf("[ExecutePlan] ✅ Plan execution completed for run %s: status=%s, canApply=%v, hasChanges=%v, adds=%d, changes=%d, destroys=%d\n", + runID, runStatus, canApply, hasChanges, adds, changes, destroys) + + // Only auto-trigger apply if AutoApply flag is true (i.e., terraform apply -auto-approve) + fmt.Printf("[ExecutePlan] Auto-apply check: run.AutoApply=%v, err=%v\n", run.AutoApply, err) + if run.AutoApply && err == nil { + fmt.Printf("[ExecutePlan] Auto-applying run %s (AutoApply=true)\n", runID) + + // Queue the apply by updating the run status + if err := e.runRepo.UpdateRunStatus(ctx, run.ID, "apply_queued"); err != nil { + fmt.Printf("[ExecutePlan] ERROR: Failed to queue apply: %v\n", err) + return nil // Don't fail the plan if we can't queue the apply + } + + // Trigger apply execution in background + go func() { + fmt.Printf("[ExecutePlan] Starting async apply execution for run %s\n", run.ID) + applyExecutor := NewApplyExecutor(e.runRepo, e.planRepo, e.configVerRepo, e.blobStore) + if err := applyExecutor.ExecuteApply(context.Background(), run.ID); err != nil { + fmt.Printf("[ExecutePlan] ❌ Apply execution failed for run %s: %v\n", run.ID, err) + } else { + fmt.Printf("[ExecutePlan] ✅ Apply execution completed successfully for run %s\n", run.ID) + } + }() + } + + return nil +} + +// runTerraformPlan executes terraform init and plan +func (e *PlanExecutor) runTerraformPlan(ctx context.Context, workDir string, isDestroy bool) (output string, logs string, hasChanges bool, adds, changes, destroys int, err error) { + var allLogs strings.Builder + + // Run terraform init (backend override file disables cloud/remote backend) + fmt.Printf("Running terraform init in %s\n", workDir) + initCmd := exec.CommandContext(ctx, "terraform", "init", "-no-color", "-input=false") + initCmd.Dir = workDir + initCmd.Env = append(os.Environ(), "TF_IN_AUTOMATION=1") // Tell Terraform it's running in automation + initOutput, initErr := initCmd.CombinedOutput() + allLogs.WriteString("=== Terraform Init ===\n") + allLogs.Write(initOutput) + allLogs.WriteString("\n\n") + + if initErr != nil { + return "", allLogs.String(), false, 0, 0, 0, fmt.Errorf("terraform init failed: %w", initErr) + } + + // Run terraform plan + fmt.Printf("Running terraform plan in %s\n", workDir) + planArgs := []string{"plan", "-no-color", "-input=false", "-detailed-exitcode"} + if isDestroy { + planArgs = append(planArgs, "-destroy") + } + + planCmd := exec.CommandContext(ctx, "terraform", planArgs...) + planCmd.Dir = workDir + planCmd.Env = append(os.Environ(), "TF_IN_AUTOMATION=1") + planOutput, planErr := planCmd.CombinedOutput() + allLogs.WriteString("=== Terraform Plan ===\n") + allLogs.Write(planOutput) + allLogs.WriteString("\n") + + // Parse output for resource counts + adds, changes, destroys = parsePlanOutput(string(planOutput)) + hasChanges = adds > 0 || changes > 0 || destroys > 0 + + // Exit code 2 means changes detected (not an error) + if exitErr, ok := planErr.(*exec.ExitError); ok { + if exitErr.ExitCode() == 2 { + // Changes detected - this is success + planErr = nil + } + } + + return string(planOutput), allLogs.String(), hasChanges, adds, changes, destroys, planErr +} + +// handlePlanError handles plan execution errors +func (e *PlanExecutor) handlePlanError(ctx context.Context, runID string, planID *string, errorMsg string) error { + fmt.Printf("Plan error for run %s: %s\n", runID, errorMsg) + + // Update plan status if we have a plan ID + if planID != nil { + errStatus := "errored" + planUpdates := &domain.TFEPlanUpdate{ + Status: &errStatus, + } + _ = e.planRepo.UpdatePlan(ctx, *planID, planUpdates) + } + + // Update run status + _ = e.runRepo.UpdateRunStatus(ctx, runID, "errored") + + return fmt.Errorf("plan execution failed: %s", errorMsg) +} + +// parsePlanOutput parses "Plan: X to add, Y to change, Z to destroy" from Terraform output +func parsePlanOutput(output string) (adds, changes, destroys int) { + // Look for "Plan: X to add, Y to change, Z to destroy" + planRegex := regexp.MustCompile(`Plan: (\d+) to add, (\d+) to change, (\d+) to destroy`) + matches := planRegex.FindStringSubmatch(output) + + if len(matches) == 4 { + fmt.Sscanf(matches[1], "%d", &adds) + fmt.Sscanf(matches[2], "%d", &changes) + fmt.Sscanf(matches[3], "%d", &destroys) + } + + return +} + +// extractArchive extracts a tar.gz archive to a temp directory +func extractArchive(data []byte) (string, error) { + // Create temp directory + tempDir, err := os.MkdirTemp("", "terraform-plan-*") + if err != nil { + return "", fmt.Errorf("failed to create temp dir: %w", err) + } + + // Create gzip reader + gzipReader, err := gzip.NewReader(strings.NewReader(string(data))) + if err != nil { + os.RemoveAll(tempDir) + return "", fmt.Errorf("failed to create gzip reader: %w", err) + } + defer gzipReader.Close() + + // Create tar reader + tarReader := tar.NewReader(gzipReader) + + // Extract all files + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + os.RemoveAll(tempDir) + return "", fmt.Errorf("failed to read tar: %w", err) + } + + // Construct target path + target := filepath.Join(tempDir, header.Name) + + // Ensure target is within tempDir (security check) + if !strings.HasPrefix(target, filepath.Clean(tempDir)+string(os.PathSeparator)) { + os.RemoveAll(tempDir) + return "", fmt.Errorf("illegal file path in archive: %s", header.Name) + } + + switch header.Typeflag { + case tar.TypeDir: + // Create directory + if err := os.MkdirAll(target, 0755); err != nil { + os.RemoveAll(tempDir) + return "", fmt.Errorf("failed to create directory: %w", err) + } + case tar.TypeReg: + // Create parent directory if needed + if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { + os.RemoveAll(tempDir) + return "", fmt.Errorf("failed to create parent directory: %w", err) + } + + // Create file + outFile, err := os.Create(target) + if err != nil { + os.RemoveAll(tempDir) + return "", fmt.Errorf("failed to create file: %w", err) + } + + // Copy contents + if _, err := io.Copy(outFile, tarReader); err != nil { + outFile.Close() + os.RemoveAll(tempDir) + return "", fmt.Errorf("failed to write file: %w", err) + } + outFile.Close() + } + } + + return tempDir, nil +} + +// cleanupWorkDir removes the temporary work directory +func cleanupWorkDir(dir string) { + if dir != "" { + if err := os.RemoveAll(dir); err != nil { + fmt.Printf("Warning: failed to cleanup work dir %s: %v\n", dir, err) + } + } +} + +// createBackendOverride removes cloud/backend configuration from Terraform files +// This is required for server-side execution to prevent circular dependencies +func createBackendOverride(workDir string) error { + // Walk through all .tf files and remove cloud{} and backend{} blocks from terraform{} blocks + err := filepath.Walk(workDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Only process .tf files + if !info.IsDir() && strings.HasSuffix(path, ".tf") { + content, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read %s: %w", path, err) + } + + contentStr := string(content) + + // Check if file contains terraform block with cloud or backend + if strings.Contains(contentStr, "cloud") || strings.Contains(contentStr, "backend") { + fmt.Printf("Removing cloud/backend configuration from %s\n", path) + + // Comment out cloud and backend blocks + // This is a simple approach - we comment out lines containing "cloud {" and "backend " + lines := strings.Split(contentStr, "\n") + var inBlock bool + var blockDepth int + + for i, line := range lines { + trimmed := strings.TrimSpace(line) + + // Start of cloud or backend block + if (strings.Contains(trimmed, "cloud {") || strings.Contains(trimmed, "backend ")) && !strings.HasPrefix(trimmed, "#") { + lines[i] = "# " + line + " # Disabled by TFE executor" + inBlock = true + blockDepth = strings.Count(line, "{") - strings.Count(line, "}") + continue + } + + // Inside block - comment out + if inBlock { + blockDepth += strings.Count(line, "{") - strings.Count(line, "}") + lines[i] = "# " + line + + if blockDepth <= 0 { + inBlock = false + } + } + } + + modifiedContent := strings.Join(lines, "\n") + if err := os.WriteFile(path, []byte(modifiedContent), info.Mode()); err != nil { + return fmt.Errorf("failed to write %s: %w", path, err) + } + } + } + + return nil + }) + + if err != nil { + return fmt.Errorf("failed to process terraform files: %w", err) + } + + fmt.Printf("Successfully removed cloud/backend configuration from Terraform files\n") + return nil +} + diff --git a/taco/internal/tfe/runs.go b/taco/internal/tfe/runs.go index c9bb8655d..f637f26a7 100644 --- a/taco/internal/tfe/runs.go +++ b/taco/internal/tfe/runs.go @@ -1,9 +1,12 @@ package tfe import ( + "context" + "encoding/json" "fmt" "net/http" + "github.com/diggerhq/digger/opentaco/internal/domain" "github.com/diggerhq/digger/opentaco/internal/domain/tfe" "github.com/google/jsonapi" "github.com/labstack/echo/v4" @@ -11,40 +14,68 @@ import ( func (h *TfeHandler) GetRun(c echo.Context) error { + ctx := c.Request().Context() runID := c.Param("id") - // You should look these up from storage in a real impl. - // For now we're going to hardcode stable IDs that match what you - // returned from POST /runs. - planID := "plan-xyz789" - workspaceID := "ws-0b44c5b1-8321-43e3-864d-a2921d004835" - cvID := "cv-1234567890" - - run := tfe.TFERun{ - ID: runID, - Status: "planned_and_finished", - IsDestroy: false, - Message: "Queued manually via Terraform CLI", - PlanOnly: true, + // Get run from database + run, err := h.runRepo.GetRun(ctx, runID) + if err != nil { + fmt.Printf("Failed to get run %s: %v\n", runID, err) + return c.JSON(http.StatusNotFound, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "404", + "title": "not found", + "detail": fmt.Sprintf("Run %s not found", runID), + }}, + }) + } + + // Determine if run is confirmable (waiting for user approval) + isConfirmable := run.Status == "planned_and_finished" && run.CanApply && !run.AutoApply + + fmt.Printf("[GetRun] 🔍 Poll: runID=%s, status=%s, canApply=%v, autoApply=%v, planOnly=%v, isConfirmable=%v\n", + run.ID, run.Status, run.CanApply, run.AutoApply, run.PlanOnly, isConfirmable) + + // Use unit ID as workspace ID (they're the same in our architecture) + workspaceID := run.UnitID + + // Build response + response := tfe.TFERun{ + ID: run.ID, + Status: run.Status, + IsDestroy: run.IsDestroy, + Message: run.Message, + PlanOnly: run.PlanOnly, Actions: &tfe.RunActions{ - IsCancelable: false, - CanApply: false, - }, - Plan: &tfe.PlanRef{ - ID: planID, + IsCancelable: run.IsCancelable, + IsConfirmable: isConfirmable, + CanApply: run.CanApply, }, Workspace: &tfe.WorkspaceRef{ ID: workspaceID, }, ConfigurationVersion: &tfe.ConfigurationVersionRef{ - ID: cvID, + ID: run.ConfigurationVersionID, }, } + if run.PlanID != nil { + response.Plan = &tfe.PlanRef{ID: *run.PlanID} + } + + // Include apply reference when run is applying or applied + // In our simplified model, apply ID is the same as run ID + if run.Status == "applying" || run.Status == "applied" || run.Status == "apply_queued" { + response.Apply = &tfe.ApplyRef{ID: run.ID} + fmt.Printf("[GetRun] Added Apply reference: applyID=%s for status=%s\n", run.ID, run.Status) + } else { + fmt.Printf("[GetRun] No Apply reference (status=%s)\n", run.Status) + } + c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") c.Response().WriteHeader(http.StatusOK) - if err := jsonapi.MarshalPayload(c.Response().Writer, &run); err != nil { + if err := jsonapi.MarshalPayload(c.Response().Writer, &response); err != nil { fmt.Printf("error marshaling run payload: %v\n", err) return err } @@ -53,27 +84,167 @@ func (h *TfeHandler) GetRun(c echo.Context) error { func (h *TfeHandler) CreateRun(c echo.Context) error { - // You could decode the incoming JSON here to read workspace ID, - // configuration-version ID, message, plan-only, etc. - // For now we’ll just hardcode / stub. - workspaceID := "ws-0b44c5b1-8321-43e3-864d-a2921d004835" - cvID := "cv-1234567890" - - runID := "run-abc123" - planID := "plan-xyz789" - - run := tfe.TFERun{ - ID: runID, - Status: "planning", // Terraform will expect to poll until it becomes "planned_and_finished" - IsDestroy: false, - Message: "Queued manually via Terraform CLI", - PlanOnly: true, + ctx := c.Request().Context() + + // Decode the JSON:API request + var requestData struct { + Data struct { + Attributes struct { + Message string `json:"message"` + IsDestroy bool `json:"is-destroy"` + AutoApply bool `json:"auto-apply"` // Terraform CLI sends this with -auto-approve + } `json:"attributes"` + Relationships struct { + Workspace struct { + Data struct { + ID string `json:"id"` + } `json:"data"` + } `json:"workspace"` + ConfigurationVersion struct { + Data struct { + ID string `json:"id"` + } `json:"data"` + } `json:"configuration-version"` + } `json:"relationships"` + } `json:"data"` + } + + // Manually decode JSON since content-type is application/vnd.api+json + if err := json.NewDecoder(c.Request().Body).Decode(&requestData); err != nil { + fmt.Printf("Failed to decode request: %v\n", err) + return c.JSON(http.StatusBadRequest, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "400", + "title": "bad request", + "detail": "Invalid request format", + }}, + }) + } + + workspaceID := requestData.Data.Relationships.Workspace.Data.ID + cvID := requestData.Data.Relationships.ConfigurationVersion.Data.ID + message := requestData.Data.Attributes.Message + isDestroy := requestData.Data.Attributes.IsDestroy + autoApply := requestData.Data.Attributes.AutoApply + + // Log the full request for debugging + fmt.Printf("📥 CreateRun request: message=%q, isDestroy=%v, autoApply=%v, workspaceID=%s\n", + message, isDestroy, autoApply, workspaceID) + + // Get org and user context from middleware + orgIdentifier, _ := c.Get("organization_id").(string) + userID, _ := c.Get("user_id").(string) + + if orgIdentifier == "" { + orgIdentifier = "default-org" // Fallback for testing + } + if userID == "" { + userID = "system" + } + + // Resolve external org ID (e.g., "org_01K9X3...") to internal UUID + // This is critical for S3 path construction: //terraform.tfstate + orgUUID, err := h.identifierResolver.ResolveOrganization(ctx, orgIdentifier) + if err != nil { + fmt.Printf("Failed to resolve organization %s: %v\n", orgIdentifier, err) + return c.JSON(http.StatusInternalServerError, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "500", + "title": "internal error", + "detail": fmt.Sprintf("Failed to resolve organization: %v", err), + }}, + }) + } + fmt.Printf("Resolved org identifier '%s' to UUID '%s'\n", orgIdentifier, orgUUID) + + // Strip ws- prefix from workspace ID to get the actual unit ID + unitID := convertWorkspaceToStateID(workspaceID) + + // Create the run in database + run := &domain.TFERun{ + OrgID: orgUUID, // Store UUID, not external ID! + UnitID: unitID, + Status: "pending", + IsDestroy: isDestroy, + Message: message, + PlanOnly: false, // Always false for apply operations (terraform apply with or without -auto-approve) + AutoApply: autoApply, // Only auto-trigger if -auto-approve was used + Source: "cli", + IsCancelable: true, + CanApply: false, + ConfigurationVersionID: cvID, + CreatedBy: userID, + } + + fmt.Printf("Creating run: autoApply=%v, planOnly=%v\n", autoApply, run.PlanOnly) + + if err := h.runRepo.CreateRun(ctx, run); err != nil { + fmt.Printf("Failed to create run: %v\n", err) + return c.JSON(http.StatusInternalServerError, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "500", + "title": "internal error", + "detail": "Failed to create run", + }}, + }) + } + + fmt.Printf("Created run %s for unit %s\n", run.ID, unitID) + + // Create a plan for this run + plan := &domain.TFEPlan{ + OrgID: orgUUID, + RunID: run.ID, + Status: "pending", + CreatedBy: userID, + } + + if err := h.planRepo.CreatePlan(ctx, plan); err != nil { + fmt.Printf("Failed to create plan: %v\n", err) + return c.JSON(http.StatusInternalServerError, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "500", + "title": "internal error", + "detail": "Failed to create plan", + }}, + }) + } + + fmt.Printf("Created plan %s for run %s\n", plan.ID, run.ID) + + // Update run with plan ID + if err := h.runRepo.UpdateRunPlanID(ctx, run.ID, plan.ID); err != nil { + fmt.Printf("Failed to update run with plan ID: %v\n", err) + // Non-fatal, continue + } + + // Trigger real plan execution asynchronously + go func() { + fmt.Printf("[CreateRun] Starting async plan execution for run %s\n", run.ID) + // Create plan executor + executor := NewPlanExecutor(h.runRepo, h.planRepo, h.configVerRepo, h.blobStore) + + // Execute the plan (this will run terraform plan) + if err := executor.ExecutePlan(context.Background(), run.ID); err != nil { + fmt.Printf("[CreateRun] ❌ Plan execution failed for run %s: %v\n", run.ID, err) + } else { + fmt.Printf("[CreateRun] ✅ Plan execution completed successfully for run %s\n", run.ID) + } + }() + + // Return JSON:API response + response := tfe.TFERun{ + ID: run.ID, + Status: "planning", // Return as planning immediately + IsDestroy: run.IsDestroy, + Message: run.Message, + PlanOnly: run.PlanOnly, Actions: &tfe.RunActions{ - IsCancelable: true, - CanApply: false, + IsCancelable: run.IsCancelable, + CanApply: run.CanApply, }, Plan: &tfe.PlanRef{ - ID: planID, + ID: plan.ID, }, Workspace: &tfe.WorkspaceRef{ ID: workspaceID, @@ -82,11 +253,19 @@ func (h *TfeHandler) CreateRun(c echo.Context) error { ID: cvID, }, } + + // For auto-apply runs, include Apply reference immediately so Terraform CLI knows to expect it + if run.AutoApply { + response.Apply = &tfe.ApplyRef{ID: run.ID} + fmt.Printf("[CreateRun] Added Apply reference for auto-apply run: applyID=%s\n", run.ID) + } else { + fmt.Printf("[CreateRun] No Apply reference (AutoApply=false, user will confirm apply manually)\n") + } c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") c.Response().WriteHeader(http.StatusCreated) - if err := jsonapi.MarshalPayload(c.Response().Writer, &run); err != nil { + if err := jsonapi.MarshalPayload(c.Response().Writer, &response); err != nil { fmt.Printf("error marshaling run payload: %v\n", err) return err } @@ -94,6 +273,187 @@ func (h *TfeHandler) CreateRun(c echo.Context) error { return nil } +// Helper function for pointer to string +func stringPtr(s string) *string { + return &s +} + +// ApplyRun handles POST /runs/:id/actions/apply +func (h *TfeHandler) ApplyRun(c echo.Context) error { + ctx := c.Request().Context() + runID := c.Param("id") + + // Get run from database + run, err := h.runRepo.GetRun(ctx, runID) + if err != nil { + fmt.Printf("Failed to get run %s: %v\n", runID, err) + return c.JSON(http.StatusNotFound, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "404", + "title": "not found", + "detail": fmt.Sprintf("Run %s not found", runID), + }}, + }) + } + + // Check if run can be applied + if run.Status != "planned_and_finished" { + return c.JSON(http.StatusConflict, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "409", + "title": "conflict", + "detail": fmt.Sprintf("Run cannot be applied in status %s", run.Status), + }}, + }) + } + + // Check if plan has changes + if run.PlanID != nil { + plan, err := h.planRepo.GetPlan(ctx, *run.PlanID) + if err == nil && !plan.HasChanges { + return c.JSON(http.StatusConflict, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "409", + "title": "conflict", + "detail": "Plan has no changes to apply", + }}, + }) + } + } + + fmt.Printf("Triggering apply for run %s\n", runID) + + // Update run status to apply_queued + if err := h.runRepo.UpdateRunStatus(ctx, runID, "apply_queued"); err != nil { + return c.JSON(http.StatusInternalServerError, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "500", + "title": "internal error", + "detail": "Failed to queue apply", + }}, + }) + } + + // Trigger real apply execution asynchronously + go func() { + // Create apply executor + executor := NewApplyExecutor(h.runRepo, h.planRepo, h.configVerRepo, h.blobStore) + + // Execute the apply (this will run terraform apply) + if err := executor.ExecuteApply(context.Background(), runID); err != nil { + fmt.Printf("Apply execution failed for run %s: %v\n", runID, err) + } + }() + + // Return updated run + run.Status = "apply_queued" + response := tfe.TFERun{ + ID: run.ID, + Status: run.Status, + IsDestroy: run.IsDestroy, + Message: run.Message, + PlanOnly: run.PlanOnly, + Actions: &tfe.RunActions{ + IsCancelable: false, + CanApply: false, + }, + Workspace: &tfe.WorkspaceRef{ + ID: run.UnitID, + }, + ConfigurationVersion: &tfe.ConfigurationVersionRef{ + ID: run.ConfigurationVersionID, + }, + } + + if run.PlanID != nil { + response.Plan = &tfe.PlanRef{ID: *run.PlanID} + } + + // Include Apply reference so Terraform CLI knows to fetch apply logs + response.Apply = &tfe.ApplyRef{ID: run.ID} + fmt.Printf("[ApplyRun] Added Apply reference: applyID=%s\n", run.ID) + + c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") + c.Response().WriteHeader(http.StatusOK) + + if err := jsonapi.MarshalPayload(c.Response().Writer, &response); err != nil { + fmt.Printf("error marshaling run payload: %v\n", err) + return err + } + return nil +} + + +// GetRunEvents returns timeline events for a run (used by Terraform CLI to track progress) +func (h *TfeHandler) GetRunEvents(c echo.Context) error { + ctx := c.Request().Context() + runID := c.Param("id") + + // Get run to generate events based on status + run, err := h.runRepo.GetRun(ctx, runID) + if err != nil { + return c.JSON(http.StatusNotFound, map[string]interface{}{ + "errors": []map[string]string{{ + "status": "404", + "title": "not found", + "detail": fmt.Sprintf("Run %s not found", runID), + }}, + }) + } + + // Generate events based on run status (JSON:API format with type field) + events := []map[string]interface{}{} + eventCounter := 0 + + // Helper to create a properly formatted event + addEvent := func(action, description string) { + eventCounter++ + events = append(events, map[string]interface{}{ + "type": "run-events", + "id": fmt.Sprintf("%s-%d", runID, eventCounter), + "attributes": map[string]interface{}{ + "action": action, + "created-at": run.UpdatedAt.Format("2006-01-02T15:04:05Z"), + "description": description, + }, + }) + } + + // Always include "run created" event + addEvent("created", "Run was created") + + // Add status-specific events + switch run.Status { + case "planning", "planned", "planned_and_finished": + addEvent("planning", "Plan is running") + case "applying", "applied": + addEvent("planning", "Plan completed") + addEvent("applying", "Apply is running") + case "errored": + addEvent("errored", "Run encountered an error") + } + + c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") + return c.JSON(http.StatusOK, map[string]interface{}{"data": events}) +} + +// GetPolicyChecks returns Sentinel policy check results (enterprise feature - return empty) +func (h *TfeHandler) GetPolicyChecks(c echo.Context) error { + c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") + return c.JSON(http.StatusOK, map[string]interface{}{"data": []interface{}{}}) +} + +// GetTaskStages returns run task stages (newer feature - return empty) +func (h *TfeHandler) GetTaskStages(c echo.Context) error { + c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") + return c.JSON(http.StatusOK, map[string]interface{}{"data": []interface{}{}}) +} + +// GetCostEstimates returns cost estimation results (enterprise feature - return empty) +func (h *TfeHandler) GetCostEstimates(c echo.Context) error { + c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") + return c.JSON(http.StatusOK, map[string]interface{}{"data": []interface{}{}}) +} func (h *TfeHandler) EmptyListResponse(c echo.Context) error { c.Response().Header().Set(echo.HeaderContentType, "application/vnd.api+json") diff --git a/taco/internal/tfe/tfe.go b/taco/internal/tfe/tfe.go index 378ff00cb..988fb2ea4 100644 --- a/taco/internal/tfe/tfe.go +++ b/taco/internal/tfe/tfe.go @@ -16,18 +16,38 @@ type TfeHandler struct { rbacManager *rbac.RBACManager apiTokens *auth.APITokenManager identifierResolver domain.IdentifierResolver // For resolving org external IDs + + // TFE repositories for runs, plans, and configuration versions + runRepo domain.TFERunRepository + planRepo domain.TFEPlanRepository + configVerRepo domain.TFEConfigurationVersionRepository + blobStore storage.UnitStore } // NewTFETokenHandler creates a new TFE handler. // Accepts wrapped (RBAC-enforced) and unwrapped (direct) repositories. // The unwrapped repository is used for signed URL operations which are pre-authorized. -func NewTFETokenHandler(authHandler *auth.Handler, wrappedRepo domain.UnitRepository, unwrappedRepo domain.UnitRepository, blobStore storage.UnitStore, rbacManager *rbac.RBACManager, identifierResolver domain.IdentifierResolver) *TfeHandler { +func NewTFETokenHandler( + authHandler *auth.Handler, + wrappedRepo domain.UnitRepository, + unwrappedRepo domain.UnitRepository, + blobStore storage.UnitStore, + rbacManager *rbac.RBACManager, + identifierResolver domain.IdentifierResolver, + runRepo domain.TFERunRepository, + planRepo domain.TFEPlanRepository, + configVerRepo domain.TFEConfigurationVersionRepository, +) *TfeHandler { return &TfeHandler{ authHandler: authHandler, - stateStore: domain.TFEOperations(wrappedRepo), // Use RBAC wrapper for authenticated calls - directStateStore: domain.TFEOperations(unwrappedRepo), // Bypass RBAC for signed URLs + stateStore: domain.TFEOperations(wrappedRepo), + directStateStore: domain.TFEOperations(unwrappedRepo), rbacManager: rbacManager, apiTokens: auth.NewAPITokenManagerFromStore(blobStore), identifierResolver: identifierResolver, + runRepo: runRepo, + planRepo: planRepo, + configVerRepo: configVerRepo, + blobStore: blobStore, } } diff --git a/taco/internal/unit/handler.go b/taco/internal/unit/handler.go index 3fbbfdd23..070ca14c4 100644 --- a/taco/internal/unit/handler.go +++ b/taco/internal/unit/handler.go @@ -80,7 +80,11 @@ func (h *Handler) resolveUnitIdentifier(ctx context.Context, identifier string) } type CreateUnitRequest struct { - Name string `json:"name"` + Name string `json:"name"` + TFEAutoApply *bool `json:"tfe_auto_apply"` + TFEExecutionMode *string `json:"tfe_execution_mode"` + TFETerraformVersion *string `json:"tfe_terraform_version"` + TFEWorkingDirectory *string `json:"tfe_working_directory"` } type CreateUnitResponse struct { @@ -127,6 +131,7 @@ func (h *Handler) CreateUnit(c echo.Context) error { "operation", "create_unit", "name", name, "org_id", orgCtx.OrgID, + "tfe_execution_mode", req.TFEExecutionMode, ) metadata, err := h.store.Create(ctx, orgCtx.OrgID, name) @@ -156,6 +161,26 @@ func (h *Handler) CreateUnit(c echo.Context) error { }) } + // Update TFE fields if provided (after unit creation) + if req.TFEAutoApply != nil || req.TFEExecutionMode != nil || req.TFETerraformVersion != nil || req.TFEWorkingDirectory != nil { + if h.queryStore != nil { + if err := h.queryStore.UpdateUnitTFESettings(ctx, metadata.ID, req.TFEAutoApply, req.TFEExecutionMode, req.TFETerraformVersion, req.TFEWorkingDirectory); err != nil { + logger.Warn("Failed to update TFE settings for unit", + "operation", "create_unit", + "unit_id", metadata.ID, + "error", err, + ) + // Don't fail the request, just log the warning + } else { + logger.Info("Updated TFE settings for unit", + "operation", "create_unit", + "unit_id", metadata.ID, + "tfe_execution_mode", req.TFEExecutionMode, + ) + } + } + } + logger.Info("Unit created successfully", "operation", "create_unit", "name", name, diff --git a/taco/migrations/mysql/20251114000000_add_tfe_tables.sql b/taco/migrations/mysql/20251114000000_add_tfe_tables.sql new file mode 100644 index 000000000..c74560edb --- /dev/null +++ b/taco/migrations/mysql/20251114000000_add_tfe_tables.sql @@ -0,0 +1,97 @@ +-- Add TFE-specific fields to units table +ALTER TABLE `units` + ADD COLUMN IF NOT EXISTS `tfe_auto_apply` boolean DEFAULT NULL, + ADD COLUMN IF NOT EXISTS `tfe_terraform_version` varchar(50) DEFAULT NULL, + ADD COLUMN IF NOT EXISTS `tfe_working_directory` varchar(500) DEFAULT NULL, + ADD COLUMN IF NOT EXISTS `tfe_execution_mode` varchar(50) DEFAULT NULL; + +-- Create tfe_runs table +CREATE TABLE IF NOT EXISTS `tfe_runs` ( + `id` varchar(36) NOT NULL PRIMARY KEY, + `org_id` varchar(36) NOT NULL, + `unit_id` varchar(36) NOT NULL, + `created_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `updated_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `status` varchar(50) NOT NULL DEFAULT 'pending', + `is_destroy` boolean NOT NULL DEFAULT false, + `message` text, + `plan_only` boolean NOT NULL DEFAULT true, + `source` varchar(50) NOT NULL DEFAULT 'cli', + `is_cancelable` boolean NOT NULL DEFAULT true, + `can_apply` boolean NOT NULL DEFAULT false, + `configuration_version_id` varchar(36) NOT NULL, + `plan_id` varchar(36), + `apply_id` varchar(36), + `created_by` varchar(255), + INDEX `idx_tfe_runs_org_id` (`org_id`), + INDEX `idx_tfe_runs_unit_id` (`unit_id`), + INDEX `idx_tfe_runs_configuration_version_id` (`configuration_version_id`), + INDEX `idx_tfe_runs_plan_id` (`plan_id`), + INDEX `idx_tfe_runs_status` (`status`), + INDEX `idx_tfe_runs_created_at` (`created_at` DESC) +) CHARSET utf8mb4 COLLATE utf8mb4_0900_ai_ci; + +-- Create tfe_plans table +CREATE TABLE IF NOT EXISTS `tfe_plans` ( + `id` varchar(36) NOT NULL PRIMARY KEY, + `org_id` varchar(36) NOT NULL, + `run_id` varchar(36) NOT NULL, + `created_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `updated_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `status` varchar(50) NOT NULL DEFAULT 'pending', + `resource_additions` int NOT NULL DEFAULT 0, + `resource_changes` int NOT NULL DEFAULT 0, + `resource_destructions` int NOT NULL DEFAULT 0, + `has_changes` boolean NOT NULL DEFAULT false, + `log_blob_id` varchar(255), + `log_read_url` text, + `plan_output_blob_id` varchar(255), + `plan_output_json` longtext, + `created_by` varchar(255), + INDEX `idx_tfe_plans_org_id` (`org_id`), + INDEX `idx_tfe_plans_run_id` (`run_id`), + INDEX `idx_tfe_plans_status` (`status`) +) CHARSET utf8mb4 COLLATE utf8mb4_0900_ai_ci; + +-- Create tfe_configuration_versions table +CREATE TABLE IF NOT EXISTS `tfe_configuration_versions` ( + `id` varchar(36) NOT NULL PRIMARY KEY, + `org_id` varchar(36) NOT NULL, + `unit_id` varchar(36) NOT NULL, + `created_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `updated_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `status` varchar(50) NOT NULL DEFAULT 'pending', + `source` varchar(50) NOT NULL DEFAULT 'cli', + `speculative` boolean NOT NULL DEFAULT true, + `auto_queue_runs` boolean NOT NULL DEFAULT false, + `provisional` boolean NOT NULL DEFAULT false, + `error` text, + `error_message` text, + `upload_url` text, + `uploaded_at` datetime, + `archive_blob_id` varchar(255), + `status_timestamps` json NOT NULL, + `created_by` varchar(255), + INDEX `idx_tfe_configuration_versions_org_id` (`org_id`), + INDEX `idx_tfe_configuration_versions_unit_id` (`unit_id`), + INDEX `idx_tfe_configuration_versions_status` (`status`), + INDEX `idx_tfe_configuration_versions_created_at` (`created_at` DESC) +) CHARSET utf8mb4 COLLATE utf8mb4_0900_ai_ci; + +-- Add foreign key constraints +ALTER TABLE `tfe_runs` + ADD CONSTRAINT `fk_tfe_runs_unit` + FOREIGN KEY (`unit_id`) REFERENCES `units` (`id`) ON DELETE CASCADE; + +ALTER TABLE `tfe_runs` + ADD CONSTRAINT `fk_tfe_runs_configuration_version` + FOREIGN KEY (`configuration_version_id`) REFERENCES `tfe_configuration_versions` (`id`) ON DELETE CASCADE; + +ALTER TABLE `tfe_plans` + ADD CONSTRAINT `fk_tfe_plans_run` + FOREIGN KEY (`run_id`) REFERENCES `tfe_runs` (`id`) ON DELETE CASCADE; + +ALTER TABLE `tfe_configuration_versions` + ADD CONSTRAINT `fk_tfe_configuration_versions_unit` + FOREIGN KEY (`unit_id`) REFERENCES `units` (`id`) ON DELETE CASCADE; + diff --git a/taco/migrations/postgres/20251114000000_add_tfe_tables.sql b/taco/migrations/postgres/20251114000000_add_tfe_tables.sql new file mode 100644 index 000000000..75fd4c4d4 --- /dev/null +++ b/taco/migrations/postgres/20251114000000_add_tfe_tables.sql @@ -0,0 +1,103 @@ +-- Add TFE-specific fields to units table +ALTER TABLE "public"."units" + ADD COLUMN IF NOT EXISTS "tfe_auto_apply" boolean DEFAULT NULL, + ADD COLUMN IF NOT EXISTS "tfe_terraform_version" varchar(50) DEFAULT NULL, + ADD COLUMN IF NOT EXISTS "tfe_working_directory" varchar(500) DEFAULT NULL, + ADD COLUMN IF NOT EXISTS "tfe_execution_mode" varchar(50) DEFAULT NULL; + +-- Create tfe_runs table +CREATE TABLE IF NOT EXISTS "public"."tfe_runs" ( + "id" varchar(36) NOT NULL PRIMARY KEY, + "org_id" varchar(36) NOT NULL, + "unit_id" varchar(36) NOT NULL, + "created_at" timestamp NOT NULL DEFAULT now(), + "updated_at" timestamp NOT NULL DEFAULT now(), + "status" varchar(50) NOT NULL DEFAULT 'pending', + "is_destroy" boolean NOT NULL DEFAULT false, + "message" text, + "plan_only" boolean NOT NULL DEFAULT true, + "source" varchar(50) NOT NULL DEFAULT 'cli', + "is_cancelable" boolean NOT NULL DEFAULT true, + "can_apply" boolean NOT NULL DEFAULT false, + "configuration_version_id" varchar(36) NOT NULL, + "plan_id" varchar(36), + "apply_id" varchar(36), + "created_by" varchar(255) +); + +-- Create indexes for tfe_runs +CREATE INDEX IF NOT EXISTS "idx_tfe_runs_org_id" ON "public"."tfe_runs" ("org_id"); +CREATE INDEX IF NOT EXISTS "idx_tfe_runs_unit_id" ON "public"."tfe_runs" ("unit_id"); +CREATE INDEX IF NOT EXISTS "idx_tfe_runs_configuration_version_id" ON "public"."tfe_runs" ("configuration_version_id"); +CREATE INDEX IF NOT EXISTS "idx_tfe_runs_plan_id" ON "public"."tfe_runs" ("plan_id"); +CREATE INDEX IF NOT EXISTS "idx_tfe_runs_status" ON "public"."tfe_runs" ("status"); +CREATE INDEX IF NOT EXISTS "idx_tfe_runs_created_at" ON "public"."tfe_runs" ("created_at" DESC); + +-- Create tfe_plans table +CREATE TABLE IF NOT EXISTS "public"."tfe_plans" ( + "id" varchar(36) NOT NULL PRIMARY KEY, + "org_id" varchar(36) NOT NULL, + "run_id" varchar(36) NOT NULL, + "created_at" timestamp NOT NULL DEFAULT now(), + "updated_at" timestamp NOT NULL DEFAULT now(), + "status" varchar(50) NOT NULL DEFAULT 'pending', + "resource_additions" integer NOT NULL DEFAULT 0, + "resource_changes" integer NOT NULL DEFAULT 0, + "resource_destructions" integer NOT NULL DEFAULT 0, + "has_changes" boolean NOT NULL DEFAULT false, + "log_blob_id" varchar(255), + "log_read_url" text, + "plan_output_blob_id" varchar(255), + "plan_output_json" text, + "created_by" varchar(255) +); + +-- Create indexes for tfe_plans +CREATE INDEX IF NOT EXISTS "idx_tfe_plans_org_id" ON "public"."tfe_plans" ("org_id"); +CREATE INDEX IF NOT EXISTS "idx_tfe_plans_run_id" ON "public"."tfe_plans" ("run_id"); +CREATE INDEX IF NOT EXISTS "idx_tfe_plans_status" ON "public"."tfe_plans" ("status"); + +-- Create tfe_configuration_versions table +CREATE TABLE IF NOT EXISTS "public"."tfe_configuration_versions" ( + "id" varchar(36) NOT NULL PRIMARY KEY, + "org_id" varchar(36) NOT NULL, + "unit_id" varchar(36) NOT NULL, + "created_at" timestamp NOT NULL DEFAULT now(), + "updated_at" timestamp NOT NULL DEFAULT now(), + "status" varchar(50) NOT NULL DEFAULT 'pending', + "source" varchar(50) NOT NULL DEFAULT 'cli', + "speculative" boolean NOT NULL DEFAULT true, + "auto_queue_runs" boolean NOT NULL DEFAULT false, + "provisional" boolean NOT NULL DEFAULT false, + "error" text, + "error_message" text, + "upload_url" text, + "uploaded_at" timestamp, + "archive_blob_id" varchar(255), + "status_timestamps" json NOT NULL DEFAULT '{}', + "created_by" varchar(255) +); + +-- Create indexes for tfe_configuration_versions +CREATE INDEX IF NOT EXISTS "idx_tfe_configuration_versions_org_id" ON "public"."tfe_configuration_versions" ("org_id"); +CREATE INDEX IF NOT EXISTS "idx_tfe_configuration_versions_unit_id" ON "public"."tfe_configuration_versions" ("unit_id"); +CREATE INDEX IF NOT EXISTS "idx_tfe_configuration_versions_status" ON "public"."tfe_configuration_versions" ("status"); +CREATE INDEX IF NOT EXISTS "idx_tfe_configuration_versions_created_at" ON "public"."tfe_configuration_versions" ("created_at" DESC); + +-- Add foreign key constraints (optional - for referential integrity) +ALTER TABLE "public"."tfe_runs" + ADD CONSTRAINT IF NOT EXISTS "fk_tfe_runs_unit" + FOREIGN KEY ("unit_id") REFERENCES "public"."units" ("id") ON DELETE CASCADE; + +ALTER TABLE "public"."tfe_runs" + ADD CONSTRAINT IF NOT EXISTS "fk_tfe_runs_configuration_version" + FOREIGN KEY ("configuration_version_id") REFERENCES "public"."tfe_configuration_versions" ("id") ON DELETE CASCADE; + +ALTER TABLE "public"."tfe_plans" + ADD CONSTRAINT IF NOT EXISTS "fk_tfe_plans_run" + FOREIGN KEY ("run_id") REFERENCES "public"."tfe_runs" ("id") ON DELETE CASCADE; + +ALTER TABLE "public"."tfe_configuration_versions" + ADD CONSTRAINT IF NOT EXISTS "fk_tfe_configuration_versions_unit" + FOREIGN KEY ("unit_id") REFERENCES "public"."units" ("id") ON DELETE CASCADE; + diff --git a/taco/migrations/sqlite/20251114000000_add_tfe_tables.sql b/taco/migrations/sqlite/20251114000000_add_tfe_tables.sql new file mode 100644 index 000000000..c68319a46 --- /dev/null +++ b/taco/migrations/sqlite/20251114000000_add_tfe_tables.sql @@ -0,0 +1,89 @@ +-- Add TFE-specific fields to units table +ALTER TABLE `units` ADD COLUMN `tfe_auto_apply` integer DEFAULT NULL; +ALTER TABLE `units` ADD COLUMN `tfe_terraform_version` varchar(50) DEFAULT NULL; +ALTER TABLE `units` ADD COLUMN `tfe_working_directory` varchar(500) DEFAULT NULL; +ALTER TABLE `units` ADD COLUMN `tfe_execution_mode` varchar(50) DEFAULT NULL; + +-- Create tfe_runs table +CREATE TABLE IF NOT EXISTS `tfe_runs` ( + `id` varchar(36) NOT NULL PRIMARY KEY, + `org_id` varchar(36) NOT NULL, + `unit_id` varchar(36) NOT NULL, + `created_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `updated_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `status` varchar(50) NOT NULL DEFAULT 'pending', + `is_destroy` integer NOT NULL DEFAULT 0, + `message` text, + `plan_only` integer NOT NULL DEFAULT 1, + `source` varchar(50) NOT NULL DEFAULT 'cli', + `is_cancelable` integer NOT NULL DEFAULT 1, + `can_apply` integer NOT NULL DEFAULT 0, + `configuration_version_id` varchar(36) NOT NULL, + `plan_id` varchar(36), + `apply_id` varchar(36), + `created_by` varchar(255), + FOREIGN KEY (`unit_id`) REFERENCES `units` (`id`) ON DELETE CASCADE, + FOREIGN KEY (`configuration_version_id`) REFERENCES `tfe_configuration_versions` (`id`) ON DELETE CASCADE +); + +-- Create indexes for tfe_runs +CREATE INDEX IF NOT EXISTS `idx_tfe_runs_org_id` ON `tfe_runs` (`org_id`); +CREATE INDEX IF NOT EXISTS `idx_tfe_runs_unit_id` ON `tfe_runs` (`unit_id`); +CREATE INDEX IF NOT EXISTS `idx_tfe_runs_configuration_version_id` ON `tfe_runs` (`configuration_version_id`); +CREATE INDEX IF NOT EXISTS `idx_tfe_runs_plan_id` ON `tfe_runs` (`plan_id`); +CREATE INDEX IF NOT EXISTS `idx_tfe_runs_status` ON `tfe_runs` (`status`); +CREATE INDEX IF NOT EXISTS `idx_tfe_runs_created_at` ON `tfe_runs` (`created_at` DESC); + +-- Create tfe_plans table +CREATE TABLE IF NOT EXISTS `tfe_plans` ( + `id` varchar(36) NOT NULL PRIMARY KEY, + `org_id` varchar(36) NOT NULL, + `run_id` varchar(36) NOT NULL, + `created_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `updated_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `status` varchar(50) NOT NULL DEFAULT 'pending', + `resource_additions` integer NOT NULL DEFAULT 0, + `resource_changes` integer NOT NULL DEFAULT 0, + `resource_destructions` integer NOT NULL DEFAULT 0, + `has_changes` integer NOT NULL DEFAULT 0, + `log_blob_id` varchar(255), + `log_read_url` text, + `plan_output_blob_id` varchar(255), + `plan_output_json` text, + `created_by` varchar(255), + FOREIGN KEY (`run_id`) REFERENCES `tfe_runs` (`id`) ON DELETE CASCADE +); + +-- Create indexes for tfe_plans +CREATE INDEX IF NOT EXISTS `idx_tfe_plans_org_id` ON `tfe_plans` (`org_id`); +CREATE INDEX IF NOT EXISTS `idx_tfe_plans_run_id` ON `tfe_plans` (`run_id`); +CREATE INDEX IF NOT EXISTS `idx_tfe_plans_status` ON `tfe_plans` (`status`); + +-- Create tfe_configuration_versions table +CREATE TABLE IF NOT EXISTS `tfe_configuration_versions` ( + `id` varchar(36) NOT NULL PRIMARY KEY, + `org_id` varchar(36) NOT NULL, + `unit_id` varchar(36) NOT NULL, + `created_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `updated_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `status` varchar(50) NOT NULL DEFAULT 'pending', + `source` varchar(50) NOT NULL DEFAULT 'cli', + `speculative` integer NOT NULL DEFAULT 1, + `auto_queue_runs` integer NOT NULL DEFAULT 0, + `provisional` integer NOT NULL DEFAULT 0, + `error` text, + `error_message` text, + `upload_url` text, + `uploaded_at` datetime, + `archive_blob_id` varchar(255), + `status_timestamps` text NOT NULL DEFAULT '{}', + `created_by` varchar(255), + FOREIGN KEY (`unit_id`) REFERENCES `units` (`id`) ON DELETE CASCADE +); + +-- Create indexes for tfe_configuration_versions +CREATE INDEX IF NOT EXISTS `idx_tfe_configuration_versions_org_id` ON `tfe_configuration_versions` (`org_id`); +CREATE INDEX IF NOT EXISTS `idx_tfe_configuration_versions_unit_id` ON `tfe_configuration_versions` (`unit_id`); +CREATE INDEX IF NOT EXISTS `idx_tfe_configuration_versions_status` ON `tfe_configuration_versions` (`status`); +CREATE INDEX IF NOT EXISTS `idx_tfe_configuration_versions_created_at` ON `tfe_configuration_versions` (`created_at` DESC); + diff --git a/taco/migrations/sqlite/20251114000001_add_auto_apply_to_runs.sql b/taco/migrations/sqlite/20251114000001_add_auto_apply_to_runs.sql new file mode 100644 index 000000000..229f61219 --- /dev/null +++ b/taco/migrations/sqlite/20251114000001_add_auto_apply_to_runs.sql @@ -0,0 +1,5 @@ +-- Add auto_apply and apply_log_blob_id columns to tfe_runs table + +ALTER TABLE tfe_runs ADD COLUMN auto_apply BOOLEAN DEFAULT FALSE; +ALTER TABLE tfe_runs ADD COLUMN apply_log_blob_id VARCHAR(255); + diff --git a/ui/src/api/statesman_serverFunctions.ts b/ui/src/api/statesman_serverFunctions.ts index 32d3a1381..76dd3adfc 100644 --- a/ui/src/api/statesman_serverFunctions.ts +++ b/ui/src/api/statesman_serverFunctions.ts @@ -65,9 +65,28 @@ export const getUnitStatusFn = createServerFn({method: 'GET'}) }) export const createUnitFn = createServerFn({method: 'POST'}) - .inputValidator((data : {userId: string, organisationId: string, email: string, name: string, requestId?: string}) => data) + .inputValidator((data : { + userId: string, + organisationId: string, + email: string, + name: string, + requestId?: string, + tfeAutoApply?: boolean, + tfeExecutionMode?: string, + tfeTerraformVersion?: string, + tfeWorkingDirectory?: string + }) => data) .handler(async ({ data }) => { - const unit : any = await createUnit(data.organisationId, data.userId, data.email, data.name); + const unit : any = await createUnit( + data.organisationId, + data.userId, + data.email, + data.name, + data.tfeAutoApply, + data.tfeExecutionMode, + data.tfeTerraformVersion, + data.tfeWorkingDirectory + ); return unit; }) diff --git a/ui/src/api/statesman_units.ts b/ui/src/api/statesman_units.ts index 0b53cbe81..a37cb72d1 100644 --- a/ui/src/api/statesman_units.ts +++ b/ui/src/api/statesman_units.ts @@ -170,7 +170,16 @@ export async function getUnitStatus(orgId: string, userId: string, email: string return response.json(); } -export async function createUnit(orgId: string, userId: string, email: string, name: string) { +export async function createUnit( + orgId: string, + userId: string, + email: string, + name: string, + tfeAutoApply?: boolean, + tfeExecutionMode?: string, + tfeTerraformVersion?: string, + tfeWorkingDirectory?: string +) { const response = await fetch(`${process.env.STATESMAN_BACKEND_URL}/internal/api/units`, { method: 'POST', headers: { @@ -183,6 +192,10 @@ export async function createUnit(orgId: string, userId: string, email: string, n }, body: JSON.stringify({ name: name, + tfe_auto_apply: tfeAutoApply, + tfe_execution_mode: tfeExecutionMode, + tfe_terraform_version: tfeTerraformVersion, + tfe_working_directory: tfeWorkingDirectory, }), }); diff --git a/ui/src/components/UnitCreateForm.tsx b/ui/src/components/UnitCreateForm.tsx index 966eec316..357e9cc52 100644 --- a/ui/src/components/UnitCreateForm.tsx +++ b/ui/src/components/UnitCreateForm.tsx @@ -53,6 +53,9 @@ export default function UnitCreateForm({ organisationId, email, name: unitName.trim(), + // Enable TFE remote execution for remote type + tfeAutoApply: unitType === 'remote', + tfeExecutionMode: unitType === 'remote' ? 'remote' : 'local', }, }) // analytics: track unit creation @@ -120,21 +123,22 @@ export default function UnitCreateForm({