diff --git a/changelog/pending/20230128--backend-filestate--the-filestate-backend-now-supports-project-scoped-stacks.yaml b/changelog/pending/20230128--backend-filestate--the-filestate-backend-now-supports-project-scoped-stacks.yaml new file mode 100644 index 000000000000..86e610a68e6f --- /dev/null +++ b/changelog/pending/20230128--backend-filestate--the-filestate-backend-now-supports-project-scoped-stacks.yaml @@ -0,0 +1,10 @@ +changes: +- type: feat + scope: backend/filestate + description: | + The filestate backend now supports project scoped stacks. + Newly initialized storage will automatically use this mode. + You can migrate your old state store to enable project support by running `pulumi state upgrade`. + Note that this will make the state incompatible with old CLI versions. + Old CLI versions will not understand new stacks, but may write stack files to the old locations; + new CLIs will warn if they see those files and suggest running `pulumi state migrate` again. diff --git a/pkg/backend/backend.go b/pkg/backend/backend.go index 4edfc9ebfd9e..083045e566af 100644 --- a/pkg/backend/backend.go +++ b/pkg/backend/backend.go @@ -74,7 +74,7 @@ type StackReference interface { // but that information is not part of the StackName() we pass to the engine. Name() tokens.Name - // Fully qualified name of the stack. + // Fully qualified name of the stack, including any organization, project, or other information. FullyQualifiedName() tokens.QName } diff --git a/pkg/backend/filestate/backend.go b/pkg/backend/filestate/backend.go index e9e5d67e0a2c..c3a33e59589b 100644 --- a/pkg/backend/filestate/backend.go +++ b/pkg/backend/filestate/backend.go @@ -1,4 +1,4 @@ -// Copyright 2016-2022, Pulumi Corporation. +// Copyright 2016-2023, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,9 +23,9 @@ import ( "os" "path" "path/filepath" - "regexp" "strings" "sync" + "sync/atomic" "time" "github.com/gofrs/uuid" @@ -59,6 +59,7 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/pulumi/pulumi/sdk/v3/go/common/util/result" "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" + "gopkg.in/yaml.v3" ) // PulumiFilestateGzipEnvVar is an env var that must be truthy @@ -69,6 +70,9 @@ const PulumiFilestateGzipEnvVar = "PULUMI_SELF_MANAGED_STATE_GZIP" type Backend interface { backend.Backend local() // at the moment, no local specific info, so just use a marker function. + + // Upgrade to the latest state store version. + Upgrade(ctx context.Context) error } type localBackend struct { @@ -88,25 +92,67 @@ type localBackend struct { gzip bool // The current project, if any. - currentProject *workspace.Project + currentProject atomic.Pointer[workspace.Project] + + // The store controls the layout of stacks in the backend. + // We use different layouts based on the version of the backend + // specified in the metadata file. + // If the metadata file is missing, we use the legacy layout. + store referenceStore } type localBackendReference struct { - name tokens.Name + name tokens.Name + project tokens.Name + + // Backend that created this reference. + b *localBackend + + // referenceStore that created this reference. + // + // This is necessary because the referenceStore for a backend + // may change over time. + store referenceStore } -func (r localBackendReference) String() string { - return string(r.name) +func (r *localBackendReference) String() string { + // If project is blank this is a legacy non-project scoped stack reference, just return the name. + if r.project == "" { + return string(r.name) + } + + // For project scoped references when stringifying backend references, + // we take the current project (if present) into account. + // If the project names match, we can elide them. + if proj := r.b.currentProject.Load(); proj != nil && string(r.project) == string(proj.Name) { + return string(r.name) + } + + // Else return a new style fully qualified reference. + return fmt.Sprintf("organization/%s/%s", r.project, r.name) } -func (r localBackendReference) Name() tokens.Name { +func (r *localBackendReference) Name() tokens.Name { return r.name } -func (r localBackendReference) FullyQualifiedName() tokens.QName { - return r.Name().Q() +func (r *localBackendReference) Project() tokens.Name { + return r.project } +func (r *localBackendReference) FullyQualifiedName() tokens.QName { + if r.project == "" { + return r.name.Q() + } + return tokens.QName(fmt.Sprintf("organization/%s/%s", r.project, r.name)) +} + +// Helper methods that delegate to the underlying referenceStore. +func (r *localBackendReference) Validate() error { return r.store.ValidateReference(r) } +func (r *localBackendReference) StackBasePath() string { return r.store.StackBasePath(r) } +func (r *localBackendReference) HistoryDir() string { return r.store.HistoryDir(r) } +func (r *localBackendReference) BackupDir() string { return r.store.BackupDir(r) } + func IsFileStateBackendURL(urlstr string) bool { u, err := url.Parse(urlstr) if err != nil { @@ -161,12 +207,11 @@ func New(ctx context.Context, d diag.Sink, originalURL string, project *workspac } } - isAcc, err := bucket.IsAccessible(ctx) + // Check if there is a .pulumi/Pulumi.yaml file in the bucket + b := &wrappedBucket{bucket: bucket} + pulumiState, err := ensurePulumiMeta(ctx, b) if err != nil { - return nil, fmt.Errorf("unable to check if bucket %s is accessible: %w", u, err) - } - if !isAcc { - return nil, fmt.Errorf("bucket %s is not accessible", u) + return nil, err } // Allocate a unique lock ID for this backend instance. @@ -177,15 +222,90 @@ func New(ctx context.Context, d diag.Sink, originalURL string, project *workspac gzipCompression := cmdutil.IsTruthy(os.Getenv(PulumiFilestateGzipEnvVar)) - return &localBackend{ - d: d, - originalURL: originalURL, - url: u, - bucket: &wrappedBucket{bucket: bucket}, - lockID: lockID.String(), - gzip: gzipCompression, - currentProject: project, - }, nil + backend := &localBackend{ + d: d, + originalURL: originalURL, + url: u, + bucket: b, + lockID: lockID.String(), + gzip: gzipCompression, + } + backend.currentProject.Store(project) + + projectMode := true + switch v := pulumiState.Version; v { + case 0: + backend.store = &legacyReferenceStore{b: backend} + projectMode = false + case 1: + backend.store = &projectReferenceStore{b: backend} + default: + return nil, fmt.Errorf( + "state store unsupported: 'Pulumi.yaml' version (%d) is not supported "+ + "by this version of the Pulumi CLI", v) + } + + if !projectMode { + return backend, nil + } + + // If we're in project mode warn about any old stack files. + refs, err := (&legacyReferenceStore{b: backend}).ListReferences() + if err != nil { + // If there's an error listing don't fail, just don't print the warnings + return backend, nil + } + + for _, ref := range refs { + d.Warningf(&diag.Diag{ + Message: "Found legacy stack file '%s', you should run 'pulumi state upgrade'", + }, ref.Name()) + } + + return backend, nil +} + +func (b *localBackend) Upgrade(ctx context.Context) error { + // We don't use the existing b.store because + // this may already be a projectReferenceStore + // with new legacy files introduced to it accidentally. + olds, err := (&legacyReferenceStore{b: b}).ListReferences() + if err != nil { + return fmt.Errorf("read old references: %w", err) + } + + newStore := &projectReferenceStore{b: b} + for _, old := range olds { + chk, err := b.getCheckpoint(old) + if err != nil { + return err + } + // Try and find the project name from _any_ resource URN + var project tokens.Name + if chk.Latest != nil { + for _, res := range chk.Latest.Resources { + project = tokens.Name(res.URN.Project()) + break + } + } + if project == "" { + return fmt.Errorf("no project found for stack %v", old) + } + + new := newStore.newReference(project, old.Name()) + if err := b.renameStack(ctx, old, new); err != nil { + return fmt.Errorf("upgrade stack %v to %v: %w", old, new, err) + } + } + + pulumiYaml, err := yaml.Marshal(&pulumiMeta{Version: 1}) + contract.AssertNoErrorf(err, "Could not marshal filestate.pulumiState to yaml") + if err = b.bucket.WriteAll(ctx, "Pulumi.yaml", pulumiYaml, nil); err != nil { + return fmt.Errorf("could not write 'Pulumi.yaml': %w", err) + } + b.store = newStore + + return nil } // massageBlobPath takes the path the user provided and converts it to an appropriate form go-cloud @@ -241,6 +361,14 @@ func Login(ctx context.Context, d diag.Sink, url string, project *workspace.Proj return be, workspace.StoreAccount(be.URL(), workspace.Account{}, true) } +func (b *localBackend) getReference(ref backend.StackReference) (*localBackendReference, error) { + stackRef, ok := ref.(*localBackendReference) + if !ok { + return nil, fmt.Errorf("bad stack reference type") + } + return stackRef, stackRef.Validate() +} + func (b *localBackend) local() {} func (b *localBackend) Name() string { @@ -261,7 +389,7 @@ func (b *localBackend) StateDir() string { } func (b *localBackend) SetCurrentProject(project *workspace.Project) { - b.currentProject = project + b.currentProject.Store(project) } func (b *localBackend) GetPolicyPack(ctx context.Context, policyPack string, @@ -290,74 +418,122 @@ func (b *localBackend) SupportsOrganizations() bool { return false } -func (b *localBackend) ParseStackReference(stackRefName string) (backend.StackReference, error) { - if err := b.ValidateStackName(stackRefName); err != nil { - return nil, err - } - return localBackendReference{name: tokens.Name(stackRefName)}, nil +func (b *localBackend) ParseStackReference(stackRef string) (backend.StackReference, error) { + return b.parseStackReference(stackRef) +} + +func (b *localBackend) parseStackReference(stackRef string) (*localBackendReference, error) { + return b.store.ParseReference(stackRef) } -// ValidateStackName verifies the stack name is valid for the local backend. We use the same rules as the -// httpstate backend. -func (b *localBackend) ValidateStackName(stackName string) error { - if strings.Contains(stackName, "/") { - return errors.New("stack names may not contain slashes") +// ValidateStackName verifies the stack name is valid for the local backend. +func (b *localBackend) ValidateStackName(stackRef string) error { + _, err := b.ParseStackReference(stackRef) + return err +} + +func (b *localBackend) DoesProjectExist(ctx context.Context, projectName string) (bool, error) { + projStore, ok := b.store.(*projectReferenceStore) + if !ok { + // Legacy stores don't have projects + // so the project does not exist. + return false, nil } - validNameRegex := regexp.MustCompile("^[A-Za-z0-9_.-]{1,100}$") - if !validNameRegex.MatchString(stackName) { - return errors.New( - "stack names are limited to 100 characters and may only contain alphanumeric, hyphens, underscores, or periods") + projects, err := projStore.ListProjects() + if err != nil { + return false, err } - return nil -} + for _, project := range projects { + if string(project) == projectName { + return true, nil + } + } -func (b *localBackend) DoesProjectExist(ctx context.Context, projectName string) (bool, error) { - // Local backends don't really have multiple projects, so just return false here. return false, nil } +// Confirm the specified stack's project doesn't contradict the Pulumi.yaml of the current project. If the CWD +// is not in a Pulumi project, does not contradict. If the project name in Pulumi.yaml is "foo", a stack with a +// name of bar/foo should not work. +func currentProjectContradictsWorkspace(stack *localBackendReference) bool { + contract.Requiref(stack != nil, "stack", "is nil") + + if stack.project == "" { + return false + } + + projPath, err := workspace.DetectProjectPath() + if err != nil { + return false + } + + if projPath == "" { + return false + } + + proj, err := workspace.LoadProject(projPath) + if err != nil { + return false + } + + return proj.Name.String() != stack.project.String() +} + func (b *localBackend) CreateStack(ctx context.Context, stackRef backend.StackReference, root string, opts interface{}, ) (backend.Stack, error) { - err := b.Lock(ctx, stackRef) + localStackRef, err := b.getReference(stackRef) + if err != nil { + return nil, err + } + + err = b.Lock(ctx, stackRef) if err != nil { return nil, err } defer b.Unlock(ctx, stackRef) + if currentProjectContradictsWorkspace(localStackRef) { + return nil, fmt.Errorf("provided project name %q doesn't match Pulumi.yaml", localStackRef.project) + } + contract.Requiref(opts == nil, "opts", "local stacks do not support any options") - stackName := stackRef.Name() + stackName := localStackRef.FullyQualifiedName() if stackName == "" { return nil, errors.New("invalid empty stack name") } - if _, _, err := b.getStack(ctx, stackName); err == nil { + if _, _, err := b.getStack(ctx, localStackRef); err == nil { return nil, &backend.StackAlreadyExistsError{StackName: string(stackName)} } - tags := backend.GetEnvironmentTagsForCurrentStack(root, b.currentProject) + tags := backend.GetEnvironmentTagsForCurrentStack(root, b.currentProject.Load()) - if err = validation.ValidateStackProperties(string(stackName), tags); err != nil { + if err = validation.ValidateStackProperties(stackName.Name().String(), tags); err != nil { return nil, fmt.Errorf("validating stack properties: %w", err) } - file, err := b.saveStack(stackName, nil, nil) + file, err := b.saveStack(localStackRef, nil, nil) if err != nil { return nil, err } - stack := newStack(stackRef, file, nil, b) + stack := newStack(localStackRef, file, nil, b) b.d.Infof(diag.Message("", "Created stack '%s'"), stack.Ref()) return stack, nil } func (b *localBackend) GetStack(ctx context.Context, stackRef backend.StackReference) (backend.Stack, error) { - stackName := stackRef.Name() - snapshot, path, err := b.getStack(ctx, stackName) + localStackRef, err := b.getReference(stackRef) + if err != nil { + return nil, err + } + + snapshot, path, err := b.getStack(ctx, localStackRef) switch { case gcerrors.Code(err) == gcerrors.NotFound: @@ -365,7 +541,7 @@ func (b *localBackend) GetStack(ctx context.Context, stackRef backend.StackRefer case err != nil: return nil, err default: - return newStack(stackRef, path, snapshot, b), nil + return newStack(localStackRef, path, snapshot, b), nil } } @@ -381,12 +557,8 @@ func (b *localBackend) ListStacks( // Note that the provided stack filter is not honored, since fields like // organizations and tags aren't persisted in the local backend. results := make([]backend.StackSummary, 0, len(stacks)) - for _, stackName := range stacks { - chk, err := b.getCheckpoint(stackName) - if err != nil { - return nil, nil, err - } - stackRef, err := b.ParseStackReference(string(stackName)) + for _, stackRef := range stacks { + chk, err := b.getCheckpoint(stackRef) if err != nil { return nil, nil, err } @@ -397,14 +569,18 @@ func (b *localBackend) ListStacks( } func (b *localBackend) RemoveStack(ctx context.Context, stack backend.Stack, force bool) (bool, error) { - err := b.Lock(ctx, stack.Ref()) + localStackRef, err := b.getReference(stack.Ref()) if err != nil { return false, err } - defer b.Unlock(ctx, stack.Ref()) - stackName := stack.Ref().Name() - snapshot, _, err := b.getStack(ctx, stackName) + err = b.Lock(ctx, localStackRef) + if err != nil { + return false, err + } + defer b.Unlock(ctx, localStackRef) + + snapshot, _, err := b.getStack(ctx, localStackRef) if err != nil { return false, err } @@ -414,63 +590,76 @@ func (b *localBackend) RemoveStack(ctx context.Context, stack backend.Stack, for return true, errors.New("refusing to remove stack because it still contains resources") } - return false, b.removeStack(stackName) + return false, b.removeStack(localStackRef) } func (b *localBackend) RenameStack(ctx context.Context, stack backend.Stack, newName tokens.QName, ) (backend.StackReference, error) { - err := b.Lock(ctx, stack.Ref()) + localStackRef, err := b.getReference(stack.Ref()) if err != nil { return nil, err } - defer b.Unlock(ctx, stack.Ref()) - // Get the current state from the stack to be renamed. - stackName := stack.Ref().Name() - snap, _, err := b.getStack(ctx, stackName) + // Ensure the new stack name is valid. + newRef, err := b.parseStackReference(string(newName)) if err != nil { return nil, err } - // Ensure the new stack name is valid. - newRef, err := b.ParseStackReference(string(newName)) + err = b.renameStack(ctx, localStackRef, newRef) if err != nil { return nil, err } - newStackName := newRef.Name() + return newRef, nil +} + +func (b *localBackend) renameStack(ctx context.Context, oldRef *localBackendReference, + newRef *localBackendReference, +) error { + err := b.Lock(ctx, oldRef) + if err != nil { + return err + } + defer b.Unlock(ctx, oldRef) + + // Get the current state from the stack to be renamed. + snap, _, err := b.getStack(ctx, oldRef) + if err != nil { + return err + } // Ensure the destination stack does not already exist. - hasExisting, err := b.bucket.Exists(ctx, b.stackPath(newStackName)) + hasExisting, err := b.bucket.Exists(ctx, b.stackPath(newRef)) if err != nil { - return nil, err + return err } if hasExisting { - return nil, fmt.Errorf("a stack named %s already exists", newName) + return fmt.Errorf("a stack named %s already exists", newRef.String()) } // If we have a snapshot, we need to rename the URNs inside it to use the new stack name. if snap != nil { - if err = edit.RenameStack(snap, newStackName, ""); err != nil { - return nil, err + if err = edit.RenameStack(snap, newRef.name, ""); err != nil { + return err } } // Now save the snapshot with a new name (we pass nil to re-use the existing secrets manager from the snapshot). - if _, err = b.saveStack(newStackName, snap, nil); err != nil { - return nil, err + if _, err = b.saveStack(newRef, snap, nil); err != nil { + return err } // To remove the old stack, just make a backup of the file and don't write out anything new. - file := b.stackPath(stackName) + file := b.stackPath(oldRef) backupTarget(b.bucket, file, false) // And rename the history folder as well. - if err = b.renameHistory(stackName, newStackName); err != nil { - return nil, err + if err = b.renameHistory(oldRef, newRef); err != nil { + return err } - return newRef, err + return err } func (b *localBackend) GetLatestConfiguration(ctx context.Context, @@ -572,7 +761,16 @@ func (b *localBackend) apply( events chan<- engine.Event, ) (*deploy.Plan, sdkDisplay.ResourceChanges, result.Result) { stackRef := stack.Ref() - stackName := stackRef.Name() + localStackRef, err := b.getReference(stackRef) + if err != nil { + return nil, nil, result.FromError(err) + } + + if currentProjectContradictsWorkspace(localStackRef) { + return nil, nil, result.Errorf("provided project name %q doesn't match Pulumi.yaml", localStackRef.project) + } + + stackName := stackRef.FullyQualifiedName() actionLabel := backend.ActionLabel(kind, opts.DryRun) if !(op.Opts.Display.JSONDisplay || op.Opts.Display.Type == display.DisplayWatch) { @@ -582,7 +780,7 @@ func (b *localBackend) apply( } // Start the update. - update, err := b.newUpdate(ctx, stackName, op) + update, err := b.newUpdate(ctx, localStackRef, op) if err != nil { return nil, nil, result.FromError(err) } @@ -591,7 +789,7 @@ func (b *localBackend) apply( displayEvents := make(chan engine.Event) displayDone := make(chan bool) go display.ShowEvents( - strings.ToLower(actionLabel), kind, stackName, op.Proj.Name, "", + strings.ToLower(actionLabel), kind, stackName.Name(), op.Proj.Name, "", displayEvents, displayDone, op.Opts.Display, opts.DryRun) // Create a separate event channel for engine events that we'll pipe to both listening streams. @@ -614,7 +812,7 @@ func (b *localBackend) apply( }() // Create the management machinery. - persister := b.newSnapshotPersister(stackName, op.SecretsManager) + persister := b.newSnapshotPersister(localStackRef, op.SecretsManager) manager := backend.NewSnapshotManager(persister, update.GetTarget().Snapshot) engineCtx := &engine.Context{ Cancel: scope.Context(), @@ -676,8 +874,8 @@ func (b *localBackend) apply( var saveErr error var backupErr error if !opts.DryRun { - saveErr = b.addToHistory(stackName, info) - backupErr = b.backupStack(stackName) + saveErr = b.addToHistory(localStackRef, info) + backupErr = b.backupStack(localStackRef) } if updateRes != nil { @@ -701,10 +899,10 @@ func (b *localBackend) apply( var link string if strings.HasPrefix(b.url, FilePathPrefix) { u, _ := url.Parse(b.url) - u.Path = filepath.ToSlash(path.Join(u.Path, b.stackPath(stackName))) + u.Path = filepath.ToSlash(path.Join(u.Path, b.stackPath(localStackRef))) link = u.String() } else { - link, err = b.bucket.SignedURL(ctx, b.stackPath(stackName), nil) + link, err = b.bucket.SignedURL(ctx, b.stackPath(localStackRef), nil) if err != nil { // set link to be empty to when there is an error to hide use of Permalinks link = "" @@ -743,8 +941,11 @@ func (b *localBackend) GetHistory( pageSize int, page int, ) ([]backend.UpdateInfo, error) { - stackName := stackRef.Name() - updates, err := b.getHistory(stackName, pageSize, page) + localStackRef, err := b.getReference(stackRef) + if err != nil { + return nil, err + } + updates, err := b.getHistory(localStackRef, pageSize, page) if err != nil { return nil, err } @@ -755,8 +956,12 @@ func (b *localBackend) GetLogs(ctx context.Context, secretsProvider secrets.Provider, stack backend.Stack, cfg backend.StackConfiguration, query operations.LogQuery, ) ([]operations.LogEntry, error) { - stackName := stack.Ref().Name() - target, err := b.getTarget(ctx, stackName, cfg.Config, cfg.Decrypter) + localStackRef, err := b.getReference(stack.Ref()) + if err != nil { + return nil, err + } + + target, err := b.getTarget(ctx, localStackRef, cfg.Config, cfg.Decrypter) if err != nil { return nil, err } @@ -790,8 +995,12 @@ func GetLogsForTarget(target *deploy.Target, query operations.LogQuery) ([]opera func (b *localBackend) ExportDeployment(ctx context.Context, stk backend.Stack, ) (*apitype.UntypedDeployment, error) { - stackName := stk.Ref().Name() - chk, err := b.getCheckpoint(stackName) + localStackRef, err := b.getReference(stk.Ref()) + if err != nil { + return nil, err + } + + chk, err := b.getCheckpoint(localStackRef) if err != nil { return nil, fmt.Errorf("failed to load checkpoint: %w", err) } @@ -810,19 +1019,24 @@ func (b *localBackend) ExportDeployment(ctx context.Context, func (b *localBackend) ImportDeployment(ctx context.Context, stk backend.Stack, deployment *apitype.UntypedDeployment, ) error { - err := b.Lock(ctx, stk.Ref()) + localStackRef, err := b.getReference(stk.Ref()) + if err != nil { + return err + } + + err = b.Lock(ctx, localStackRef) if err != nil { return err } - defer b.Unlock(ctx, stk.Ref()) + defer b.Unlock(ctx, localStackRef) - stackName := stk.Ref().Name() + stackName := localStackRef.FullyQualifiedName() chk, err := stack.MarshalUntypedDeploymentToVersionedCheckpoint(stackName, deployment) if err != nil { return err } - _, _, err = b.saveCheckpoint(stackName, chk) + _, _, err = b.saveCheckpoint(localStackRef, chk) return err } @@ -842,42 +1056,16 @@ func (b *localBackend) CurrentUser() (string, []string, error) { return user.Username, nil, nil } -func (b *localBackend) getLocalStacks() ([]tokens.Name, error) { - // Read the stack directory. - path := b.stackPath("") - - files, err := listBucket(b.bucket, path) - if err != nil { - return nil, fmt.Errorf("error listing stacks: %w", err) - } - stacks := make([]tokens.Name, 0, len(files)) - - for _, file := range files { - // Ignore directories. - if file.IsDir { - continue - } - - // Skip files without valid extensions (e.g., *.bak files). - stackfn := objectName(file) - ext := filepath.Ext(stackfn) - // But accept gzip compression - if ext == encoding.GZIPExt { - stackfn = strings.TrimSuffix(stackfn, encoding.GZIPExt) - ext = filepath.Ext(stackfn) - } - - if _, has := encoding.Marshalers[ext]; !has { - continue - } - - // Read in this stack's information. - name := tokens.Name(stackfn[:len(stackfn)-len(ext)]) - - stacks = append(stacks, name) - } +func (b *localBackend) getLocalStacks() ([]*localBackendReference, error) { + return b.store.ListReferences() +} - return stacks, nil +// GetStackTags fetches the stack's existing tags. +func (b *localBackend) GetStackTags(ctx context.Context, + stack backend.Stack, +) (map[apitype.StackTagName]string, error) { + // The local backend does not currently persist tags. + return nil, errors.New("stack tags not supported in --local mode") } // UpdateStackTags updates the stacks's tags, replacing all existing tags. @@ -890,7 +1078,7 @@ func (b *localBackend) UpdateStackTags(ctx context.Context, func (b *localBackend) CancelCurrentUpdate(ctx context.Context, stackRef backend.StackReference) error { // Try to delete ALL the lock files - allFiles, err := listBucket(b.bucket, stackLockDir(stackRef.Name())) + allFiles, err := listBucket(b.bucket, stackLockDir(stackRef.FullyQualifiedName())) if err != nil { // Don't error if it just wasn't found if gcerrors.Code(err) == gcerrors.NotFound { diff --git a/pkg/backend/filestate/backend_legacy_test.go b/pkg/backend/filestate/backend_legacy_test.go new file mode 100644 index 000000000000..7d173f878321 --- /dev/null +++ b/pkg/backend/filestate/backend_legacy_test.go @@ -0,0 +1,361 @@ +package filestate + +import ( + "context" + "encoding/json" + "os" + "path" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/pulumi/pulumi/pkg/v3/backend" + "github.com/pulumi/pulumi/pkg/v3/resource/deploy" + "github.com/pulumi/pulumi/pkg/v3/resource/stack" + "github.com/pulumi/pulumi/pkg/v3/secrets/b64" + "github.com/pulumi/pulumi/sdk/v3/go/common/apitype" + "github.com/pulumi/pulumi/sdk/v3/go/common/encoding" + "github.com/pulumi/pulumi/sdk/v3/go/common/resource" + "github.com/pulumi/pulumi/sdk/v3/go/common/testing/diagtest" + "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" +) + +// This file contains copies of old backend tests +// that were upgraded to run with project support. +// This duplicates those tests to run with legacy, non-project state, +// validating that the legacy behavior is preserved. + +//nolint:paralleltest // mutates environment variables +func TestListStacksWithMultiplePassphrases_legacy(t *testing.T) { + // Login to a temp dir filestate backend + tmpDir := t.TempDir() + + markLegacyStore(t, tmpDir) + + ctx := context.Background() + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), nil) + assert.NoError(t, err) + + // Create stack "a" and import a checkpoint with a secret + aStackRef, err := b.ParseStackReference("a") + assert.NoError(t, err) + aStack, err := b.CreateStack(ctx, aStackRef, "", nil) + assert.NoError(t, err) + assert.NotNil(t, aStack) + defer func() { + t.Setenv("PULUMI_CONFIG_PASSPHRASE", "abc123") + _, err := b.RemoveStack(ctx, aStack, true) + assert.NoError(t, err) + }() + deployment, err := makeUntypedDeployment("a", "abc123", + "v1:4iF78gb0nF0=:v1:Co6IbTWYs/UdrjgY:FSrAWOFZnj9ealCUDdJL7LrUKXX9BA==") + assert.NoError(t, err) + t.Setenv("PULUMI_CONFIG_PASSPHRASE", "abc123") + err = b.ImportDeployment(ctx, aStack, deployment) + assert.NoError(t, err) + + // Create stack "b" and import a checkpoint with a secret + bStackRef, err := b.ParseStackReference("b") + assert.NoError(t, err) + bStack, err := b.CreateStack(ctx, bStackRef, "", nil) + assert.NoError(t, err) + assert.NotNil(t, bStack) + defer func() { + t.Setenv("PULUMI_CONFIG_PASSPHRASE", "123abc") + _, err := b.RemoveStack(ctx, bStack, true) + assert.NoError(t, err) + }() + deployment, err = makeUntypedDeployment("b", "123abc", + "v1:C7H2a7/Ietk=:v1:yfAd1zOi6iY9DRIB:dumdsr+H89VpHIQWdB01XEFqYaYjAg==") + assert.NoError(t, err) + t.Setenv("PULUMI_CONFIG_PASSPHRASE", "123abc") + err = b.ImportDeployment(ctx, bStack, deployment) + assert.NoError(t, err) + + // Remove the config passphrase so that we can no longer deserialize the checkpoints + err = os.Unsetenv("PULUMI_CONFIG_PASSPHRASE") + assert.NoError(t, err) + + // Ensure that we can list the stacks we created even without a passphrase + stacks, outContToken, err := b.ListStacks(ctx, backend.ListStacksFilter{}, nil /* inContToken */) + assert.NoError(t, err) + assert.Nil(t, outContToken) + assert.Len(t, stacks, 2) + for _, stack := range stacks { + assert.NotNil(t, stack.ResourceCount()) + assert.Equal(t, 1, *stack.ResourceCount()) + } +} + +func TestDrillError_legacy(t *testing.T) { + t.Parallel() + + // Login to a temp dir filestate backend + tmpDir := t.TempDir() + markLegacyStore(t, tmpDir) + + ctx := context.Background() + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), nil) + assert.NoError(t, err) + + // Get a non-existent stack and expect a nil error because it won't be found. + stackRef, err := b.ParseStackReference("dev") + if err != nil { + t.Fatalf("unexpected error %v when parsing stack reference", err) + } + _, err = b.GetStack(ctx, stackRef) + assert.Nil(t, err) +} + +func TestCancel_legacy(t *testing.T) { + t.Parallel() + + // Login to a temp dir filestate backend + tmpDir := t.TempDir() + markLegacyStore(t, tmpDir) + + ctx := context.Background() + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), nil) + assert.NoError(t, err) + + // Check that trying to cancel a stack that isn't created yet doesn't error + aStackRef, err := b.ParseStackReference("a") + assert.NoError(t, err) + err = b.CancelCurrentUpdate(ctx, aStackRef) + assert.NoError(t, err) + + // Check that trying to cancel a stack that isn't locked doesn't error + aStack, err := b.CreateStack(ctx, aStackRef, "", nil) + assert.NoError(t, err) + assert.NotNil(t, aStack) + err = b.CancelCurrentUpdate(ctx, aStackRef) + assert.NoError(t, err) + + // Locking and lock checks are only part of the internal interface + lb, ok := b.(*localBackend) + assert.True(t, ok) + assert.NotNil(t, lb) + + // Lock the stack and check CancelCurrentUpdate deletes the lock file + err = lb.Lock(ctx, aStackRef) + assert.NoError(t, err) + // check the lock file exists + lockExists, err := lb.bucket.Exists(ctx, lb.lockPath(aStackRef)) + assert.NoError(t, err) + assert.True(t, lockExists) + // Call CancelCurrentUpdate + err = lb.CancelCurrentUpdate(ctx, aStackRef) + assert.NoError(t, err) + // Now check the lock file no longer exists + lockExists, err = lb.bucket.Exists(ctx, lb.lockPath(aStackRef)) + assert.NoError(t, err) + assert.False(t, lockExists) + + // Make another filestate backend which will have a different lockId + ob, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), nil) + assert.NoError(t, err) + otherBackend, ok := ob.(*localBackend) + assert.True(t, ok) + assert.NotNil(t, lb) + + // Lock the stack with this new backend, then check that checkForLocks on the first backend now errors + err = otherBackend.Lock(ctx, aStackRef) + assert.NoError(t, err) + err = lb.checkForLock(ctx, aStackRef) + assert.Error(t, err) + // Now call CancelCurrentUpdate and check that checkForLocks no longer errors + err = lb.CancelCurrentUpdate(ctx, aStackRef) + assert.NoError(t, err) + err = lb.checkForLock(ctx, aStackRef) + assert.NoError(t, err) +} + +func TestRemoveMakesBackups_legacy(t *testing.T) { + t.Parallel() + + // Login to a temp dir filestate backend + tmpDir := t.TempDir() + markLegacyStore(t, tmpDir) + + ctx := context.Background() + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), nil) + assert.NoError(t, err) + + // Grab the bucket interface to test with + lb, ok := b.(*localBackend) + assert.True(t, ok) + assert.NotNil(t, lb) + + // Check that creating a new stack doesn't make a backup file + aStackRef, err := lb.parseStackReference("a") + assert.NoError(t, err) + aStack, err := b.CreateStack(ctx, aStackRef, "", nil) + assert.NoError(t, err) + assert.NotNil(t, aStack) + + // Check the stack file now exists, but the backup file doesn't + stackFileExists, err := lb.bucket.Exists(ctx, lb.stackPath(aStackRef)) + assert.NoError(t, err) + assert.True(t, stackFileExists) + backupFileExists, err := lb.bucket.Exists(ctx, lb.stackPath(aStackRef)+".bak") + assert.NoError(t, err) + assert.False(t, backupFileExists) + + // Now remove the stack + removed, err := b.RemoveStack(ctx, aStack, false) + assert.NoError(t, err) + assert.False(t, removed) + + // Check the stack file is now gone, but the backup file exists + stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(aStackRef)) + assert.NoError(t, err) + assert.False(t, stackFileExists) + backupFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(aStackRef)+".bak") + assert.NoError(t, err) + assert.True(t, backupFileExists) +} + +func TestRenameWorks_legacy(t *testing.T) { + t.Parallel() + + // Login to a temp dir filestate backend + tmpDir := t.TempDir() + markLegacyStore(t, tmpDir) + + ctx := context.Background() + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), nil) + assert.NoError(t, err) + + // Grab the bucket interface to test with + lb, ok := b.(*localBackend) + assert.True(t, ok) + assert.NotNil(t, lb) + + // Create a new stack + aStackRef, err := lb.parseStackReference("a") + assert.NoError(t, err) + aStack, err := b.CreateStack(ctx, aStackRef, "", nil) + assert.NoError(t, err) + assert.NotNil(t, aStack) + + // Check the stack file now exists + stackFileExists, err := lb.bucket.Exists(ctx, lb.stackPath(aStackRef)) + assert.NoError(t, err) + assert.True(t, stackFileExists) + + // Fake up some history + err = lb.addToHistory(aStackRef, backend.UpdateInfo{Kind: apitype.DestroyUpdate}) + assert.NoError(t, err) + // And pollute the history folder + err = lb.bucket.WriteAll(ctx, path.Join(aStackRef.HistoryDir(), "randomfile.txt"), []byte{0, 13}, nil) + assert.NoError(t, err) + + // Rename the stack + bStackRefI, err := b.RenameStack(ctx, aStack, "b") + assert.NoError(t, err) + assert.Equal(t, "b", bStackRefI.String()) + bStackRef := bStackRefI.(*localBackendReference) + + // Check the new stack file now exists and the old one is gone + stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(bStackRef)) + assert.NoError(t, err) + assert.True(t, stackFileExists) + stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(aStackRef)) + assert.NoError(t, err) + assert.False(t, stackFileExists) + + // Rename again + bStack, err := b.GetStack(ctx, bStackRef) + assert.NoError(t, err) + cStackRefI, err := b.RenameStack(ctx, bStack, "c") + assert.NoError(t, err) + assert.Equal(t, "c", cStackRefI.String()) + cStackRef := cStackRefI.(*localBackendReference) + + // Check the new stack file now exists and the old one is gone + stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(cStackRef)) + assert.NoError(t, err) + assert.True(t, stackFileExists) + stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(bStackRef)) + assert.NoError(t, err) + assert.False(t, stackFileExists) + + // Check we can still get the history + history, err := b.GetHistory(ctx, cStackRef, 10, 0) + assert.NoError(t, err) + assert.Len(t, history, 1) + assert.Equal(t, apitype.DestroyUpdate, history[0].Kind) +} + +// Regression test for https://github.com/pulumi/pulumi/issues/10439 +func TestHtmlEscaping_legacy(t *testing.T) { + t.Parallel() + + sm := b64.NewBase64SecretsManager() + resources := []*resource.State{ + { + URN: resource.NewURN("a", "proj", "d:e:f", "a:b:c", "name"), + Type: "a:b:c", + Inputs: resource.PropertyMap{ + resource.PropertyKey("html"): resource.NewStringProperty(""), + }, + }, + } + + snap := deploy.NewSnapshot(deploy.Manifest{}, sm, resources, nil) + + sdep, err := stack.SerializeDeployment(snap, snap.SecretsManager, false /* showSecrsts */) + assert.NoError(t, err) + + data, err := encoding.JSON.Marshal(sdep) + assert.NoError(t, err) + + // Ensure data has the string contents """, not "\u003chtml\u0026tags\u003e" + // ImportDeployment below should not modify the data + assert.Contains(t, string(data), "") + + udep := &apitype.UntypedDeployment{ + Version: 3, + Deployment: json.RawMessage(data), + } + + // Login to a temp dir filestate backend + tmpDir := t.TempDir() + markLegacyStore(t, tmpDir) + ctx := context.Background() + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), nil) + assert.NoError(t, err) + + // Create stack "a" and import a checkpoint with a secret + aStackRef, err := b.ParseStackReference("a") + assert.NoError(t, err) + aStack, err := b.CreateStack(ctx, aStackRef, "", nil) + assert.NoError(t, err) + assert.NotNil(t, aStack) + err = b.ImportDeployment(ctx, aStack, udep) + assert.NoError(t, err) + + // Ensure the file has the string contents """, not "\u003chtml\u0026tags\u003e" + + // Grab the bucket interface to read the file with + lb, ok := b.(*localBackend) + assert.True(t, ok) + assert.NotNil(t, lb) + + chkpath := lb.stackPath(aStackRef.(*localBackendReference)) + bytes, err := lb.bucket.ReadAll(context.Background(), chkpath) + assert.NoError(t, err) + state := string(bytes) + assert.Contains(t, state, "") +} + +// markLegacyStore marks the given directory as a legacy store. +// This is done by dropping a single file into the bookkeeping directory. +// ensurePulumiMeta will treat this as a legacy store if the directory exists. +func markLegacyStore(t *testing.T, dir string) { + marker := filepath.Join(dir, workspace.BookkeepingDir, ".legacy") + require.NoError(t, os.MkdirAll(filepath.Dir(marker), 0o755)) + require.NoError(t, os.WriteFile(marker, []byte(nil), 0o600)) +} diff --git a/pkg/backend/filestate/backend_test.go b/pkg/backend/filestate/backend_test.go index 1108823712a2..160fe8f6eef3 100644 --- a/pkg/backend/filestate/backend_test.go +++ b/pkg/backend/filestate/backend_test.go @@ -1,16 +1,22 @@ package filestate import ( + "bytes" "context" "encoding/json" + "fmt" + "io" "os" "path" "path/filepath" "runtime" + "sync" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" user "github.com/tweekmonster/luser" + "gocloud.dev/blob/fileblob" "github.com/pulumi/pulumi/pkg/v3/backend" "github.com/pulumi/pulumi/pkg/v3/operations" @@ -19,11 +25,14 @@ import ( "github.com/pulumi/pulumi/pkg/v3/secrets/b64" "github.com/pulumi/pulumi/pkg/v3/secrets/passphrase" "github.com/pulumi/pulumi/sdk/v3/go/common/apitype" + "github.com/pulumi/pulumi/sdk/v3/go/common/diag" + "github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors" "github.com/pulumi/pulumi/sdk/v3/go/common/encoding" "github.com/pulumi/pulumi/sdk/v3/go/common/resource" "github.com/pulumi/pulumi/sdk/v3/go/common/resource/config" "github.com/pulumi/pulumi/sdk/v3/go/common/testing/diagtest" "github.com/pulumi/pulumi/sdk/v3/go/common/tokens" + "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" ) func TestMassageBlobPath(t *testing.T) { @@ -149,7 +158,7 @@ func TestListStacksWithMultiplePassphrases(t *testing.T) { assert.NoError(t, err) // Create stack "a" and import a checkpoint with a secret - aStackRef, err := b.ParseStackReference("a") + aStackRef, err := b.ParseStackReference("organization/project/a") assert.NoError(t, err) aStack, err := b.CreateStack(ctx, aStackRef, "", nil) assert.NoError(t, err) @@ -167,7 +176,7 @@ func TestListStacksWithMultiplePassphrases(t *testing.T) { assert.NoError(t, err) // Create stack "b" and import a checkpoint with a secret - bStackRef, err := b.ParseStackReference("b") + bStackRef, err := b.ParseStackReference("organization/project/b") assert.NoError(t, err) bStack, err := b.CreateStack(ctx, bStackRef, "", nil) assert.NoError(t, err) @@ -209,7 +218,7 @@ func TestDrillError(t *testing.T) { assert.NoError(t, err) // Get a non-existent stack and expect a nil error because it won't be found. - stackRef, err := b.ParseStackReference("dev") + stackRef, err := b.ParseStackReference("organization/project/dev") if err != nil { t.Fatalf("unexpected error %v when parsing stack reference", err) } @@ -227,7 +236,7 @@ func TestCancel(t *testing.T) { assert.NoError(t, err) // Check that trying to cancel a stack that isn't created yet doesn't error - aStackRef, err := b.ParseStackReference("a") + aStackRef, err := b.ParseStackReference("organization/project/a") assert.NoError(t, err) err = b.CancelCurrentUpdate(ctx, aStackRef) assert.NoError(t, err) @@ -248,14 +257,14 @@ func TestCancel(t *testing.T) { err = lb.Lock(ctx, aStackRef) assert.NoError(t, err) // check the lock file exists - lockExists, err := lb.bucket.Exists(ctx, lb.lockPath(aStackRef.Name())) + lockExists, err := lb.bucket.Exists(ctx, lb.lockPath(aStackRef)) assert.NoError(t, err) assert.True(t, lockExists) // Call CancelCurrentUpdate err = lb.CancelCurrentUpdate(ctx, aStackRef) assert.NoError(t, err) // Now check the lock file no longer exists - lockExists, err = lb.bucket.Exists(ctx, lb.lockPath(aStackRef.Name())) + lockExists, err = lb.bucket.Exists(ctx, lb.lockPath(aStackRef)) assert.NoError(t, err) assert.False(t, lockExists) @@ -293,17 +302,17 @@ func TestRemoveMakesBackups(t *testing.T) { assert.NotNil(t, lb) // Check that creating a new stack doesn't make a backup file - aStackRef, err := b.ParseStackReference("a") + aStackRef, err := lb.parseStackReference("organization/project/a") assert.NoError(t, err) aStack, err := b.CreateStack(ctx, aStackRef, "", nil) assert.NoError(t, err) assert.NotNil(t, aStack) // Check the stack file now exists, but the backup file doesn't - stackFileExists, err := lb.bucket.Exists(ctx, lb.stackPath(aStackRef.Name())) + stackFileExists, err := lb.bucket.Exists(ctx, lb.stackPath(aStackRef)) assert.NoError(t, err) assert.True(t, stackFileExists) - backupFileExists, err := lb.bucket.Exists(ctx, lb.stackPath(aStackRef.Name())+".bak") + backupFileExists, err := lb.bucket.Exists(ctx, lb.stackPath(aStackRef)+".bak") assert.NoError(t, err) assert.False(t, backupFileExists) @@ -313,10 +322,10 @@ func TestRemoveMakesBackups(t *testing.T) { assert.False(t, removed) // Check the stack file is now gone, but the backup file exists - stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(aStackRef.Name())) + stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(aStackRef)) assert.NoError(t, err) assert.False(t, stackFileExists) - backupFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(aStackRef.Name())+".bak") + backupFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(aStackRef)+".bak") assert.NoError(t, err) assert.True(t, backupFileExists) } @@ -336,49 +345,51 @@ func TestRenameWorks(t *testing.T) { assert.NotNil(t, lb) // Create a new stack - aStackRef, err := b.ParseStackReference("a") + aStackRef, err := lb.parseStackReference("organization/project/a") assert.NoError(t, err) aStack, err := b.CreateStack(ctx, aStackRef, "", nil) assert.NoError(t, err) assert.NotNil(t, aStack) // Check the stack file now exists - stackFileExists, err := lb.bucket.Exists(ctx, lb.stackPath(aStackRef.Name())) + stackFileExists, err := lb.bucket.Exists(ctx, lb.stackPath(aStackRef)) assert.NoError(t, err) assert.True(t, stackFileExists) // Fake up some history - err = lb.addToHistory("a", backend.UpdateInfo{Kind: apitype.DestroyUpdate}) + err = lb.addToHistory(aStackRef, backend.UpdateInfo{Kind: apitype.DestroyUpdate}) assert.NoError(t, err) // And pollute the history folder - err = lb.bucket.WriteAll(ctx, path.Join(lb.historyDirectory("a"), "randomfile.txt"), []byte{0, 13}, nil) + err = lb.bucket.WriteAll(ctx, path.Join(aStackRef.HistoryDir(), "randomfile.txt"), []byte{0, 13}, nil) assert.NoError(t, err) // Rename the stack - bStackRef, err := b.RenameStack(ctx, aStack, "b") + bStackRefI, err := b.RenameStack(ctx, aStack, "organization/project/b") assert.NoError(t, err) - assert.Equal(t, "b", bStackRef.String()) + assert.Equal(t, "organization/project/b", bStackRefI.String()) + bStackRef := bStackRefI.(*localBackendReference) // Check the new stack file now exists and the old one is gone - stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(bStackRef.Name())) + stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(bStackRef)) assert.NoError(t, err) assert.True(t, stackFileExists) - stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(aStackRef.Name())) + stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(aStackRef)) assert.NoError(t, err) assert.False(t, stackFileExists) // Rename again bStack, err := b.GetStack(ctx, bStackRef) assert.NoError(t, err) - cStackRef, err := b.RenameStack(ctx, bStack, "c") + cStackRefI, err := b.RenameStack(ctx, bStack, "organization/project/c") assert.NoError(t, err) - assert.Equal(t, "c", cStackRef.String()) + assert.Equal(t, "organization/project/c", cStackRefI.String()) + cStackRef := cStackRefI.(*localBackendReference) // Check the new stack file now exists and the old one is gone - stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(cStackRef.Name())) + stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(cStackRef)) assert.NoError(t, err) assert.True(t, stackFileExists) - stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(bStackRef.Name())) + stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(bStackRef)) assert.NoError(t, err) assert.False(t, stackFileExists) @@ -403,11 +414,12 @@ func TestLoginToNonExistingFolderFails(t *testing.T) { // an error when the stack name is the empty string.TestParseEmptyStackFails func TestParseEmptyStackFails(t *testing.T) { t.Parallel() - // ParseStackReference does use the method receiver - // (it is a total function disguised as a method.) - var b *localBackend - stackName := "" - _, err := b.ParseStackReference(stackName) + tmpDir := t.TempDir() + ctx := context.Background() + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), nil) + assert.NoError(t, err) + + _, err = b.ParseStackReference("") assert.Error(t, err) } @@ -450,7 +462,7 @@ func TestHtmlEscaping(t *testing.T) { assert.NoError(t, err) // Create stack "a" and import a checkpoint with a secret - aStackRef, err := b.ParseStackReference("a") + aStackRef, err := b.ParseStackReference("organization/project/a") assert.NoError(t, err) aStack, err := b.CreateStack(ctx, aStackRef, "", nil) assert.NoError(t, err) @@ -465,9 +477,375 @@ func TestHtmlEscaping(t *testing.T) { assert.True(t, ok) assert.NotNil(t, lb) - chkpath := lb.stackPath("a") + chkpath := lb.stackPath(aStackRef.(*localBackendReference)) bytes, err := lb.bucket.ReadAll(context.Background(), chkpath) assert.NoError(t, err) state := string(bytes) assert.Contains(t, state, "") } + +func TestLegacyFolderStructure(t *testing.T) { + t.Parallel() + + // Make a dummy stack file in the legacy location + tmpDir := t.TempDir() + err := os.MkdirAll(path.Join(tmpDir, ".pulumi", "stacks"), os.ModePerm) + require.NoError(t, err) + err = os.WriteFile(path.Join(tmpDir, ".pulumi", "stacks", "a.json"), []byte("{}"), os.ModePerm) + require.NoError(t, err) + + // Login to a temp dir filestate backend + ctx := context.Background() + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), nil) + require.NoError(t, err) + // Check the backend says it's NOT in project mode + lb, ok := b.(*localBackend) + assert.True(t, ok) + assert.NotNil(t, lb) + assert.IsType(t, &legacyReferenceStore{}, lb.store) + + // Check that list stack shows that stack + stacks, token, err := b.ListStacks(ctx, backend.ListStacksFilter{}, nil /* inContToken */) + assert.NoError(t, err) + assert.Nil(t, token) + assert.Len(t, stacks, 1) + assert.Equal(t, "a", stacks[0].Name().String()) + + // Create a new non-project stack + bRef, err := b.ParseStackReference("b") + assert.NoError(t, err) + assert.Equal(t, "b", bRef.String()) + bStack, err := b.CreateStack(ctx, bRef, "", nil) + assert.NoError(t, err) + assert.Equal(t, "b", bStack.Ref().String()) + assert.FileExists(t, path.Join(tmpDir, ".pulumi", "stacks", "b.json")) +} + +func TestInvalidStateFile(t *testing.T) { + t.Parallel() + + // Make a bad version file + tmpDir := t.TempDir() + err := os.Mkdir(path.Join(tmpDir, ".pulumi"), os.ModePerm) + require.NoError(t, err) + err = os.WriteFile(path.Join(tmpDir, ".pulumi", "Pulumi.yaml"), []byte("version: 0"), os.ModePerm) + require.NoError(t, err) + + ctx := context.Background() + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), nil) + assert.Nil(t, b) + assert.Error(t, err) +} + +// Verifies that the StackReference.String method +// takes the current project name into account, +// even if the current project name changes +// after the stack reference is created. +func TestStackReferenceString_currentProjectChange(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + ctx := context.Background() + + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(dir), nil) + require.NoError(t, err) + + foo, err := b.ParseStackReference("organization/proj1/foo") + require.NoError(t, err) + + bar, err := b.ParseStackReference("organization/proj2/bar") + require.NoError(t, err) + + assert.Equal(t, "organization/proj1/foo", foo.String()) + assert.Equal(t, "organization/proj2/bar", bar.String()) + + // Change the current project name + b.SetCurrentProject(&workspace.Project{Name: "proj1"}) + + assert.Equal(t, "foo", foo.String()) + assert.Equal(t, "organization/proj2/bar", bar.String()) +} + +// Verifies that there's no data race in calling StackReference.String +// and localBackend.SetCurrentProject concurrently. +func TestStackReferenceString_currentProjectChange_race(t *testing.T) { + t.Parallel() + + const N = 1000 + + dir := t.TempDir() + ctx := context.Background() + + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(dir), nil) + require.NoError(t, err) + + projects := make([]*workspace.Project, N) + refs := make([]backend.StackReference, N) + for i := 0; i < N; i++ { + name := fmt.Sprintf("proj%d", i) + projects[i] = &workspace.Project{Name: tokens.PackageName(name)} + refs[i], err = b.ParseStackReference(fmt.Sprintf("organization/%v/foo", name)) + require.NoError(t, err) + } + + // To exercise this data race, we'll have two goroutines. + // One goroutine will call StackReference.String repeatedly + // on all the stack references, + // and the other goroutine will call localBackend.SetCurrentProject + // with all the projects. + + var wg sync.WaitGroup + ready := make(chan struct{}) // both goroutines wait on this + + wg.Add(1) + go func() { + defer wg.Done() + <-ready + for i := 0; i < N; i++ { + _ = refs[i].String() + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + <-ready + for i := 0; i < N; i++ { + b.SetCurrentProject(projects[i]) + } + }() + + close(ready) // start racing + wg.Wait() +} + +func TestUnsupportedStateFile(t *testing.T) { + t.Parallel() + + // Make a bad version file + tmpDir := t.TempDir() + err := os.Mkdir(path.Join(tmpDir, ".pulumi"), os.ModePerm) + require.NoError(t, err) + err = os.WriteFile(path.Join(tmpDir, ".pulumi", "Pulumi.yaml"), []byte("version: 10"), os.ModePerm) + require.NoError(t, err) + + ctx := context.Background() + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), nil) + assert.Nil(t, b) + assert.Error(t, err) +} + +func TestProjectFolderStructure(t *testing.T) { + t.Parallel() + + // Login to a temp dir filestate backend + tmpDir := t.TempDir() + ctx := context.Background() + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), nil) + assert.NoError(t, err) + + // Check the backend says it's in project mode + lb, ok := b.(*localBackend) + assert.True(t, ok) + assert.NotNil(t, lb) + assert.IsType(t, &projectReferenceStore{}, lb.store) + + // Make a dummy stack file in the new project location + err = os.MkdirAll(path.Join(tmpDir, ".pulumi", "stacks", "testproj"), os.ModePerm) + assert.NoError(t, err) + err = os.WriteFile(path.Join(tmpDir, ".pulumi", "stacks", "testproj", "a.json"), []byte("{}"), os.ModePerm) + assert.NoError(t, err) + + // Check that testproj is reported as existing + exists, err := b.DoesProjectExist(ctx, "testproj") + assert.NoError(t, err) + assert.True(t, exists) + + // Check that list stack shows that stack + stacks, token, err := b.ListStacks(ctx, backend.ListStacksFilter{}, nil /* inContToken */) + assert.NoError(t, err) + assert.Nil(t, token) + assert.Len(t, stacks, 1) + assert.Equal(t, "organization/testproj/a", stacks[0].Name().String()) + + // Create a new project stack + bRef, err := b.ParseStackReference("organization/testproj/b") + assert.NoError(t, err) + assert.Equal(t, "organization/testproj/b", bRef.String()) + bStack, err := b.CreateStack(ctx, bRef, "", nil) + assert.NoError(t, err) + assert.Equal(t, "organization/testproj/b", bStack.Ref().String()) + assert.FileExists(t, path.Join(tmpDir, ".pulumi", "stacks", "testproj", "b.json")) +} + +func chdir(t *testing.T, dir string) { + cwd, err := os.Getwd() + require.NoError(t, err) + require.NoError(t, os.Chdir(dir)) // Set directory + t.Cleanup(func() { + require.NoError(t, os.Chdir(cwd)) // Restore directory + restoredDir, err := os.Getwd() + require.NoError(t, err) + require.Equal(t, cwd, restoredDir) + }) +} + +//nolint:paralleltest // mutates cwd +func TestProjectNameMustMatch(t *testing.T) { + // Create a new project + projectDir := t.TempDir() + pyaml := filepath.Join(projectDir, "Pulumi.yaml") + err := os.WriteFile(pyaml, []byte("name: my-project\nruntime: test"), 0o600) + require.NoError(t, err) + proj, err := workspace.LoadProject(pyaml) + require.NoError(t, err) + + chdir(t, projectDir) + + // Login to a temp dir filestate backend + tmpDir := t.TempDir() + ctx := context.Background() + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), proj) + require.NoError(t, err) + + // Create a new implicit-project stack + aRef, err := b.ParseStackReference("a") + assert.NoError(t, err) + assert.Equal(t, "a", aRef.String()) + aStack, err := b.CreateStack(ctx, aRef, "", nil) + assert.NoError(t, err) + assert.Equal(t, "a", aStack.Ref().String()) + assert.FileExists(t, path.Join(tmpDir, ".pulumi", "stacks", "my-project", "a.json")) + + // Create a new project stack with the wrong project name + bRef, err := b.ParseStackReference("organization/not-my-project/b") + assert.NoError(t, err) + assert.Equal(t, "organization/not-my-project/b", bRef.String()) + bStack, err := b.CreateStack(ctx, bRef, "", nil) + assert.Error(t, err) + assert.Nil(t, bStack) + + // Create a new project stack with the right project name + cRef, err := b.ParseStackReference("organization/my-project/c") + assert.NoError(t, err) + assert.Equal(t, "c", cRef.String()) + cStack, err := b.CreateStack(ctx, cRef, "", nil) + assert.NoError(t, err) + assert.Equal(t, "c", cStack.Ref().String()) + assert.FileExists(t, path.Join(tmpDir, ".pulumi", "stacks", "my-project", "c.json")) +} + +func TestLegacyUpgrade(t *testing.T) { + t.Parallel() + + // Make a dummy stack file in the legacy location + tmpDir := t.TempDir() + err := os.MkdirAll(path.Join(tmpDir, ".pulumi", "stacks"), os.ModePerm) + require.NoError(t, err) + err = os.WriteFile(path.Join(tmpDir, ".pulumi", "stacks", "a.json"), []byte(`{ + "latest": { + "resources": [ + { + "type": "package:module:resource", + "urn": "urn:pulumi:stack::project::package:module:resource::name" + } + ] + } + }`), os.ModePerm) + require.NoError(t, err) + + // Login to a temp dir filestate backend + ctx := context.Background() + b, err := New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(tmpDir), nil) + require.NoError(t, err) + // Check the backend says it's NOT in project mode + lb, ok := b.(*localBackend) + assert.True(t, ok) + assert.NotNil(t, lb) + assert.IsType(t, &legacyReferenceStore{}, lb.store) + + err = lb.Upgrade(ctx) + require.NoError(t, err) + assert.IsType(t, &projectReferenceStore{}, lb.store) + + // Check that a has been moved + aStackRef, err := lb.parseStackReference("organization/project/a") + require.NoError(t, err) + stackFileExists, err := lb.bucket.Exists(ctx, lb.stackPath(aStackRef)) + require.NoError(t, err) + assert.True(t, stackFileExists) + + // Write b.json and upgrade again + err = os.WriteFile(path.Join(tmpDir, ".pulumi", "stacks", "b.json"), []byte(`{ + "latest": { + "resources": [ + { + "type": "package:module:resource", + "urn": "urn:pulumi:stack::other-project::package:module:resource::name" + } + ] + } + }`), os.ModePerm) + require.NoError(t, err) + + err = lb.Upgrade(ctx) + require.NoError(t, err) + + // Check that b has been moved + bStackRef, err := lb.parseStackReference("organization/other-project/b") + require.NoError(t, err) + stackFileExists, err = lb.bucket.Exists(ctx, lb.stackPath(bStackRef)) + require.NoError(t, err) + assert.True(t, stackFileExists) +} + +func TestNew_legacyFileWarning(t *testing.T) { + t.Parallel() + + // Verifies the names of files printed in warnings + // when legacy files are found while running in project mode. + + stateDir := t.TempDir() + bucket, err := fileblob.OpenBucket(stateDir, nil) + require.NoError(t, err) + + // Set up a legacy stack file with a newer version file. + ctx := context.Background() + require.NoError(t, + bucket.WriteAll(ctx, ".pulumi/Pulumi.yaml", []byte("version: 1"), nil)) + require.NoError(t, + bucket.WriteAll(ctx, ".pulumi/stacks/a.json", []byte(`{}`), nil)) + require.NoError(t, + bucket.WriteAll(ctx, ".pulumi/stacks/b.json.gz", []byte(`{}`), nil)) + require.NoError(t, + bucket.WriteAll(ctx, ".pulumi/stacks/c.json.bak", []byte(`{}`), nil)) // should ignore + + var buff bytes.Buffer + sink := diag.DefaultSink(io.Discard, &buff, diag.FormatOptions{Color: colors.Never}) + _, err = New(ctx, sink, "file://"+filepath.ToSlash(stateDir), nil) + require.NoError(t, err) + + stderr := buff.String() + assert.Contains(t, stderr, "Found legacy stack file 'a', you should run 'pulumi state upgrade'") + assert.Contains(t, stderr, "Found legacy stack file 'b', you should run 'pulumi state upgrade'") +} + +func TestNew_unsupportedStoreVersion(t *testing.T) { + t.Parallel() + + // Verifies that we fail to initialize a backend if the store version is + // newer than the CLI version. + + stateDir := t.TempDir() + bucket, err := fileblob.OpenBucket(stateDir, nil) + require.NoError(t, err) + + // Set up a Pulumi.yaml "from the future". + ctx := context.Background() + require.NoError(t, + bucket.WriteAll(ctx, ".pulumi/Pulumi.yaml", []byte("version: 999999999"), nil)) + + _, err = New(ctx, diagtest.LogSink(t), "file://"+filepath.ToSlash(stateDir), nil) + assert.ErrorContains(t, err, "state store unsupported") + assert.ErrorContains(t, err, "'Pulumi.yaml' version (999999999) is not supported") +} diff --git a/pkg/backend/filestate/bucket.go b/pkg/backend/filestate/bucket.go index d33303bdb70a..6adf42eff852 100644 --- a/pkg/backend/filestate/bucket.go +++ b/pkg/backend/filestate/bucket.go @@ -87,7 +87,12 @@ func listBucket(bucket Bucket, dir string) ([]*blob.ListObject, error) { // objectName returns the filename of a ListObject (an object from a bucket). func objectName(obj *blob.ListObject) string { - _, filename := path.Split(obj.Key) + // If obj.Key ends in "/" we want to trim that to get the name just before + key := obj.Key + if key[len(key)-1] == '/' { + key = key[0 : len(key)-1] + } + _, filename := path.Split(key) return filename } diff --git a/pkg/backend/filestate/lock.go b/pkg/backend/filestate/lock.go index 187ba27edd5e..582788e1b7dc 100644 --- a/pkg/backend/filestate/lock.go +++ b/pkg/backend/filestate/lock.go @@ -58,7 +58,8 @@ func newLockContent() (*lockContent, error) { // checkForLock looks for any existing locks for this stack, and returns a helpful diagnostic if there is one. func (b *localBackend) checkForLock(ctx context.Context, stackRef backend.StackReference) error { - allFiles, err := listBucket(b.bucket, stackLockDir(stackRef.Name())) + stackName := stackRef.FullyQualifiedName() + allFiles, err := listBucket(b.bucket, stackLockDir(stackName)) if err != nil { return err } @@ -68,7 +69,7 @@ func (b *localBackend) checkForLock(ctx context.Context, stackRef backend.StackR if file.IsDir { continue } - if file.Key != b.lockPath(stackRef.Name()) { + if file.Key != b.lockPath(stackRef) { lockKeys = append(lockKeys, file.Key) } } @@ -116,7 +117,7 @@ func (b *localBackend) Lock(ctx context.Context, stackRef backend.StackReference if err != nil { return err } - err = b.bucket.WriteAll(ctx, b.lockPath(stackRef.Name()), content, nil) + err = b.bucket.WriteAll(ctx, b.lockPath(stackRef), content, nil) if err != nil { return err } @@ -129,11 +130,11 @@ func (b *localBackend) Lock(ctx context.Context, stackRef backend.StackReference } func (b *localBackend) Unlock(ctx context.Context, stackRef backend.StackReference) { - err := b.bucket.Delete(ctx, b.lockPath(stackRef.Name())) + err := b.bucket.Delete(ctx, b.lockPath(stackRef)) if err != nil { b.d.Errorf( diag.Message("", "there was a problem deleting the lock at %v, manual clean up may be required: %v"), - path.Join(b.url, b.lockPath(stackRef.Name())), + path.Join(b.url, b.lockPath(stackRef)), err) } } @@ -142,12 +143,12 @@ func lockDir() string { return path.Join(workspace.BookkeepingDir, workspace.LockDir) } -func stackLockDir(stack tokens.Name) string { +func stackLockDir(stack tokens.QName) string { contract.Requiref(stack != "", "stack", "must not be empty") - return path.Join(lockDir(), fsutil.NamePath(stack)) + return path.Join(lockDir(), fsutil.QnamePath(stack)) } -func (b *localBackend) lockPath(stack tokens.Name) string { - contract.Requiref(stack != "", "stack", "must not be empty") - return path.Join(stackLockDir(stack), b.lockID+".json") +func (b *localBackend) lockPath(stackRef backend.StackReference) string { + contract.Requiref(stackRef != nil, "stack", "must not be nil") + return path.Join(stackLockDir(stackRef.FullyQualifiedName()), b.lockID+".json") } diff --git a/pkg/backend/filestate/meta.go b/pkg/backend/filestate/meta.go new file mode 100644 index 000000000000..c9228284f3aa --- /dev/null +++ b/pkg/backend/filestate/meta.go @@ -0,0 +1,92 @@ +// Copyright 2016-2023, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filestate + +import ( + "context" + "errors" + "fmt" + "io" + "path/filepath" + + "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" + "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" + "gocloud.dev/blob" + "gocloud.dev/gcerrors" + "gopkg.in/yaml.v3" +) + +// pulumiMeta holds the contents of the .pulumi/Pulumi.yaml file +// in a filestate backend. +// +// This file holds metadata for the backend, +// including a version number that the backend can use +// to maintain compatibility with older versions of the CLI. +type pulumiMeta struct { + // Version is the current version of the state store + Version int `json:"version,omitempty" yaml:"version,omitempty"` +} + +// ensurePulumiMeta loads the .pulumi/Pulumi.yaml file from the bucket, +// creating it if the bucket is new. +// +// If the bucket is not new, and the file does not exist, +// it returns a Version of 0 to indicate that the bucket is in legacy mode (no project). +func ensurePulumiMeta(ctx context.Context, b Bucket) (*pulumiMeta, error) { + statePath := filepath.Join(workspace.BookkeepingDir, "Pulumi.yaml") + stateBody, err := b.ReadAll(ctx, statePath) + if err != nil { + if gcerrors.Code(err) != gcerrors.NotFound { + return nil, fmt.Errorf("could not read 'Pulumi.yaml': %w", err) + } + } + + if err == nil { + // File exists. Load and validate it. + var state pulumiMeta + if err := yaml.Unmarshal(stateBody, &state); err != nil { + return nil, fmt.Errorf("state store corrupted, could not unmarshal 'Pulumi.yaml': %w", err) + } + if state.Version < 1 { + return nil, fmt.Errorf("state store corrupted, 'Pulumi.yaml' reports an invalid version of %d", state.Version) + } + return &state, nil + } + + // We'll only get here if err is NotFound, at this point we want to see if this is a fresh new store, + // in which case we'll write the new Pulumi.yaml, or if there's existing data here we'll fallback to + // non-project mode. + bucketIter := b.List(&blob.ListOptions{ + Delimiter: "/", + Prefix: workspace.BookkeepingDir, + }) + if _, err := bucketIter.Next(ctx); err == nil { + // Already exists. We're in legacy mode. + return &pulumiMeta{Version: 0}, nil + } else if !errors.Is(err, io.EOF) { + // io.EOF is expected, but any other error is not. + return nil, fmt.Errorf("could not examine bucket: %w", err) + } + + // Empty bucket. Turn on project mode. + state := pulumiMeta{Version: 1} + stateBody, err = yaml.Marshal(state) + contract.AssertNoErrorf(err, "Could not marshal filestate.pulumiMeta to yaml") + if err := b.WriteAll(ctx, statePath, stateBody, nil); err != nil { + return nil, fmt.Errorf("could not write 'Pulumi.yaml': %w", err) + } + + return &state, nil +} diff --git a/pkg/backend/filestate/meta_test.go b/pkg/backend/filestate/meta_test.go new file mode 100644 index 000000000000..30a77a1a015c --- /dev/null +++ b/pkg/backend/filestate/meta_test.go @@ -0,0 +1,109 @@ +// Copyright 2016-2023, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filestate + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gocloud.dev/blob/memblob" +) + +func TestEnsurePulumiMeta(t *testing.T) { + t.Parallel() + + tests := []struct { + desc string + give map[string]string // files in the bucket + want pulumiMeta + }{ + { + // Empty bucket should be initialized to + // the current version. + desc: "empty", + want: pulumiMeta{Version: 1}, + }, + { + // Non-empty bucket without a version file + // should get version 0 for legacy mode. + desc: "legacy", + give: map[string]string{ + ".pulumi/stacks/a.json": `{}`, + }, + want: pulumiMeta{Version: 0}, + }, + { + desc: "version 1", + give: map[string]string{ + ".pulumi/Pulumi.yaml": `version: 1`, + }, + want: pulumiMeta{Version: 1}, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.desc, func(t *testing.T) { + t.Parallel() + + b := memblob.OpenBucket(nil) + ctx := context.Background() + for name, body := range tt.give { + require.NoError(t, b.WriteAll(ctx, name, []byte(body), nil)) + } + + state, err := ensurePulumiMeta(ctx, b) + require.NoError(t, err) + assert.Equal(t, &tt.want, state) + }) + } +} + +func TestEnsurePulumiMeta_corruption(t *testing.T) { + t.Parallel() + + tests := []struct { + desc string + give string // contents of Pulumi.yaml + wantErr string + }{ + { + desc: "empty", + give: ``, // no YAML will get zero value + wantErr: "reports an invalid version of 0", + }, + { + desc: "corrupt version", + give: `version: foo`, + wantErr: "could not unmarshal 'Pulumi.yaml'", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.desc, func(t *testing.T) { + t.Parallel() + + b := memblob.OpenBucket(nil) + ctx := context.Background() + require.NoError(t, b.WriteAll(ctx, ".pulumi/Pulumi.yaml", []byte(tt.give), nil)) + + _, err := ensurePulumiMeta(context.Background(), b) + assert.ErrorContains(t, err, tt.wantErr) + }) + } +} diff --git a/pkg/backend/filestate/snapshot.go b/pkg/backend/filestate/snapshot.go index 1bedb325e584..8beaf89306d9 100644 --- a/pkg/backend/filestate/snapshot.go +++ b/pkg/backend/filestate/snapshot.go @@ -17,13 +17,12 @@ package filestate import ( "github.com/pulumi/pulumi/pkg/v3/resource/deploy" "github.com/pulumi/pulumi/pkg/v3/secrets" - "github.com/pulumi/pulumi/sdk/v3/go/common/tokens" ) // localSnapshotManager is a simple SnapshotManager implementation that persists snapshots // to disk on the local machine. type localSnapshotPersister struct { - name tokens.Name + ref *localBackendReference backend *localBackend sm secrets.Manager } @@ -33,10 +32,10 @@ func (sp *localSnapshotPersister) SecretsManager() secrets.Manager { } func (sp *localSnapshotPersister) Save(snapshot *deploy.Snapshot) error { - _, err := sp.backend.saveStack(sp.name, snapshot, sp.sm) + _, err := sp.backend.saveStack(sp.ref, snapshot, sp.sm) return err } -func (b *localBackend) newSnapshotPersister(stackName tokens.Name, sm secrets.Manager) *localSnapshotPersister { - return &localSnapshotPersister{name: stackName, backend: b, sm: sm} +func (b *localBackend) newSnapshotPersister(ref *localBackendReference, sm secrets.Manager) *localSnapshotPersister { + return &localSnapshotPersister{ref: ref, backend: b, sm: sm} } diff --git a/pkg/backend/filestate/stack.go b/pkg/backend/filestate/stack.go index 93bb811db496..05d044932381 100644 --- a/pkg/backend/filestate/stack.go +++ b/pkg/backend/filestate/stack.go @@ -28,6 +28,7 @@ import ( "github.com/pulumi/pulumi/pkg/v3/secrets/passphrase" "github.com/pulumi/pulumi/sdk/v3/go/common/apitype" "github.com/pulumi/pulumi/sdk/v3/go/common/display" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/pulumi/pulumi/sdk/v3/go/common/util/result" ) @@ -39,13 +40,15 @@ type Stack interface { // localStack is a local stack descriptor. type localStack struct { - ref backend.StackReference // the stack's reference (qualified name). + ref *localBackendReference // the stack's reference (qualified name). path string // a path to the stack's checkpoint file on disk. snapshot *deploy.Snapshot // a snapshot representing the latest deployment state. b *localBackend // a pointer to the backend this stack belongs to. } -func newStack(ref backend.StackReference, path string, snapshot *deploy.Snapshot, b *localBackend) Stack { +func newStack(ref *localBackendReference, path string, snapshot *deploy.Snapshot, b *localBackend) Stack { + contract.Requiref(ref != nil, "ref", "ref was nil") + return &localStack{ ref: ref, path: path, diff --git a/pkg/backend/filestate/state.go b/pkg/backend/filestate/state.go index d5116375233d..61a07c72a098 100644 --- a/pkg/backend/filestate/state.go +++ b/pkg/backend/filestate/state.go @@ -16,7 +16,6 @@ package filestate import ( "context" - "errors" "fmt" "io" "os" @@ -39,10 +38,8 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/common/apitype" "github.com/pulumi/pulumi/sdk/v3/go/common/encoding" "github.com/pulumi/pulumi/sdk/v3/go/common/resource/config" - "github.com/pulumi/pulumi/sdk/v3/go/common/tokens" "github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" - "github.com/pulumi/pulumi/sdk/v3/go/common/util/fsutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/logging" "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" ) @@ -96,13 +93,13 @@ func (b *localBackend) newQuery( func (b *localBackend) newUpdate( ctx context.Context, - stackName tokens.Name, + ref *localBackendReference, op backend.UpdateOperation, ) (*update, error) { - contract.Requiref(stackName != "", "stackName", "must not be empty") + contract.Requiref(ref != nil, "ref", "must not be nil") // Construct the deployment target. - target, err := b.getTarget(ctx, stackName, + target, err := b.getTarget(ctx, ref, op.StackConfiguration.Config, op.StackConfiguration.Decrypter) if err != nil { return nil, err @@ -119,17 +116,18 @@ func (b *localBackend) newUpdate( func (b *localBackend) getTarget( ctx context.Context, - stackName tokens.Name, + stack *localBackendReference, cfg config.Map, dec config.Decrypter, ) (*deploy.Target, error) { - snapshot, _, err := b.getStack(ctx, stackName) + contract.Requiref(stack != nil, "stack", "must not be nil") + snapshot, _, err := b.getStack(ctx, stack) if err != nil { return nil, err } return &deploy.Target{ - Name: stackName, - Organization: "", // filestate has no organizations + Name: stack.Name(), + Organization: "organization", // filestate has no organizations really, but we just always say it's "organization" Config: cfg, Decrypter: dec, Snapshot: snapshot, @@ -138,15 +136,13 @@ func (b *localBackend) getTarget( func (b *localBackend) getStack( ctx context.Context, - name tokens.Name, + ref *localBackendReference, ) (*deploy.Snapshot, string, error) { - if name == "" { - return nil, "", errors.New("invalid empty stack name") - } + contract.Requiref(ref != nil, "ref", "must not be nil") - file := b.stackPath(name) + file := b.stackPath(ref) - chk, err := b.getCheckpoint(name) + chk, err := b.getCheckpoint(ref) if err != nil { return nil, file, fmt.Errorf("failed to load checkpoint: %w", err) } @@ -168,8 +164,8 @@ func (b *localBackend) getStack( } // GetCheckpoint loads a checkpoint file for the given stack in this project, from the current project workspace. -func (b *localBackend) getCheckpoint(stackName tokens.Name) (*apitype.CheckpointV3, error) { - chkpath := b.stackPath(stackName) +func (b *localBackend) getCheckpoint(ref *localBackendReference) (*apitype.CheckpointV3, error) { + chkpath := b.stackPath(ref) bytes, err := b.bucket.ReadAll(context.TODO(), chkpath) if err != nil { return nil, err @@ -183,10 +179,10 @@ func (b *localBackend) getCheckpoint(stackName tokens.Name) (*apitype.Checkpoint } func (b *localBackend) saveCheckpoint( - name tokens.Name, checkpoint *apitype.VersionedCheckpoint, + ref *localBackendReference, checkpoint *apitype.VersionedCheckpoint, ) (backupFile string, file string, _ error) { // Make a serializable stack and then use the encoder to encode it. - file = b.stackPath(name) + file = b.stackPath(ref) m, ext := encoding.Detect(strings.TrimSuffix(file, ".gz")) if m == nil { return "", "", fmt.Errorf("resource serialization failed; illegal markup extension: '%v'", ext) @@ -258,7 +254,7 @@ func (b *localBackend) saveCheckpoint( } } - logging.V(7).Infof("Saved stack %s checkpoint to: %s (backup=%s)", name, file, backupFile) + logging.V(7).Infof("Saved stack %s checkpoint to: %s (backup=%s)", ref.FullyQualifiedName(), file, backupFile) // And if we are retaining historical checkpoint information, write it out again if cmdutil.IsTruthy(os.Getenv("PULUMI_RETAIN_CHECKPOINTS")) { @@ -270,13 +266,17 @@ func (b *localBackend) saveCheckpoint( return backupFile, file, nil } -func (b *localBackend) saveStack(name tokens.Name, snap *deploy.Snapshot, sm secrets.Manager) (string, error) { - chk, err := stack.SerializeCheckpoint(name, snap, sm, false /* showSecrets */) +func (b *localBackend) saveStack( + ref *localBackendReference, snap *deploy.Snapshot, + sm secrets.Manager, +) (string, error) { + contract.Requiref(ref != nil, "ref", "ref was nil") + chk, err := stack.SerializeCheckpoint(ref.FullyQualifiedName(), snap, sm, false /* showSecrets */) if err != nil { return "", fmt.Errorf("serializaing checkpoint: %w", err) } - backup, file, err := b.saveCheckpoint(name, chk) + backup, file, err := b.saveCheckpoint(ref, chk) if err != nil { return "", err } @@ -296,14 +296,14 @@ func (b *localBackend) saveStack(name tokens.Name, snap *deploy.Snapshot, sm sec } // removeStack removes information about a stack from the current workspace. -func (b *localBackend) removeStack(name tokens.Name) error { - contract.Requiref(name != "", "name", "must not be empty") +func (b *localBackend) removeStack(ref *localBackendReference) error { + contract.Requiref(ref != nil, "ref", "must not be nil") // Just make a backup of the file and don't write out anything new. - file := b.stackPath(name) + file := b.stackPath(ref) backupTarget(b.bucket, file, false) - historyDir := b.historyDirectory(name) + historyDir := ref.HistoryDir() return removeAllByPrefix(b.bucket, historyDir) } @@ -329,8 +329,8 @@ func backupTarget(bucket Bucket, file string, keepOriginal bool) string { } // backupStack copies the current Checkpoint file to ~/.pulumi/backups. -func (b *localBackend) backupStack(name tokens.Name) error { - contract.Requiref(name != "", "name", "must not be empty") +func (b *localBackend) backupStack(ref *localBackendReference) error { + contract.Requiref(ref != nil, "ref", "must not be nil") // Exit early if backups are disabled. if cmdutil.IsTruthy(os.Getenv(DisableCheckpointBackupsEnvVar)) { @@ -338,14 +338,14 @@ func (b *localBackend) backupStack(name tokens.Name) error { } // Read the current checkpoint file. (Assuming it aleady exists.) - stackPath := b.stackPath(name) + stackPath := b.stackPath(ref) byts, err := b.bucket.ReadAll(context.TODO(), stackPath) if err != nil { return err } // Get the backup directory. - backupDir := b.backupDirectory(name) + backupDir := ref.BackupDir() // Write out the new backup checkpoint file. stackFile := filepath.Base(stackPath) @@ -362,16 +362,15 @@ func (b *localBackend) backupStack(name tokens.Name) error { return b.bucket.WriteAll(context.TODO(), filepath.Join(backupDir, backupFile), byts, nil) } -func (b *localBackend) stackPath(stack tokens.Name) string { - path := filepath.Join(b.StateDir(), workspace.StackDir) - if stack == "" { - return path +func (b *localBackend) stackPath(ref *localBackendReference) string { + if ref == nil { + return StacksDir } // We can't use listBucket here for as we need to do a partial prefix match on filename, while the // "dir" option to listBucket is always suffixed with "/". Also means we don't need to save any // results in a slice. - plainPath := filepath.ToSlash(filepath.Join(path, fsutil.NamePath(stack)) + ".json") + plainPath := ref.StackBasePath() + ".json" gzipedPath := plainPath + ".gz" bucketIter := b.bucket.List(&blob.ListOptions{ @@ -407,22 +406,12 @@ func (b *localBackend) stackPath(stack tokens.Name) string { return plainPath } -func (b *localBackend) historyDirectory(stack tokens.Name) string { - contract.Requiref(stack != "", "stack", "must not be empty") - return filepath.Join(b.StateDir(), workspace.HistoryDir, fsutil.NamePath(stack)) -} - -func (b *localBackend) backupDirectory(stack tokens.Name) string { - contract.Requiref(stack != "", "stack", "must not be empty") - return filepath.Join(b.StateDir(), workspace.BackupDir, fsutil.NamePath(stack)) -} - // getHistory returns locally stored update history. The first element of the result will be // the most recent update record. -func (b *localBackend) getHistory(name tokens.Name, pageSize int, page int) ([]backend.UpdateInfo, error) { - contract.Requiref(name != "", "name", "must not be empty") +func (b *localBackend) getHistory(stack *localBackendReference, pageSize int, page int) ([]backend.UpdateInfo, error) { + contract.Requiref(stack != nil, "stack", "must not be nil") - dir := b.historyDirectory(name) + dir := stack.HistoryDir() // TODO: we could consider optimizing the list operation using `page` and `pageSize`. // Unfortunately, this is mildly invasive given the gocloud List API. allFiles, err := listBucket(b.bucket, dir) @@ -491,12 +480,12 @@ func (b *localBackend) getHistory(name tokens.Name, pageSize int, page int) ([]b return updates, nil } -func (b *localBackend) renameHistory(oldName tokens.Name, newName tokens.Name) error { - contract.Requiref(oldName != "", "oldName", "must not be empty") - contract.Requiref(newName != "", "newName", "must not be empty") +func (b *localBackend) renameHistory(oldName *localBackendReference, newName *localBackendReference) error { + contract.Requiref(oldName != nil, "oldName", "must not be nil") + contract.Requiref(newName != nil, "newName", "must not be nil") - oldHistory := b.historyDirectory(oldName) - newHistory := b.historyDirectory(newName) + oldHistory := oldName.HistoryDir() + newHistory := newName.HistoryDir() allFiles, err := listBucket(b.bucket, oldHistory) if err != nil { @@ -515,12 +504,12 @@ func (b *localBackend) renameHistory(oldName tokens.Name, newName tokens.Name) e // the stack name part but retain the other parts. If we find files that don't match this format // ignore them. dashIndex := strings.LastIndex(fileName, "-") - if dashIndex == -1 || (fileName[:dashIndex] != oldName.String()) { + if dashIndex == -1 || (fileName[:dashIndex] != oldName.name.String()) { // No dash or the string up to the dash isn't the old name continue } - newFileName := string(newName) + fileName[dashIndex:] + newFileName := newName.name.String() + fileName[dashIndex:] newBlob := path.Join(newHistory, newFileName) if err := b.bucket.Copy(context.TODO(), newBlob, oldBlob, nil); err != nil { @@ -535,13 +524,13 @@ func (b *localBackend) renameHistory(oldName tokens.Name, newName tokens.Name) e } // addToHistory saves the UpdateInfo and makes a copy of the current Checkpoint file. -func (b *localBackend) addToHistory(name tokens.Name, update backend.UpdateInfo) error { - contract.Requiref(name != "", "name", "must not be empty") +func (b *localBackend) addToHistory(ref *localBackendReference, update backend.UpdateInfo) error { + contract.Requiref(ref != nil, "ref", "must not be nil") - dir := b.historyDirectory(name) + dir := ref.HistoryDir() // Prefix for the update and checkpoint files. - pathPrefix := path.Join(dir, fmt.Sprintf("%s-%d", name, time.Now().UnixNano())) + pathPrefix := path.Join(dir, fmt.Sprintf("%s-%d", ref.name, time.Now().UnixNano())) m, ext := encoding.JSON, "json" if b.gzip { @@ -562,5 +551,5 @@ func (b *localBackend) addToHistory(name tokens.Name, update backend.UpdateInfo) // Make a copy of the checkpoint file. (Assuming it already exists.) checkpointFile := fmt.Sprintf("%s.checkpoint.%s", pathPrefix, ext) - return b.bucket.Copy(context.TODO(), checkpointFile, b.stackPath(name), nil) + return b.bucket.Copy(context.TODO(), checkpointFile, b.stackPath(ref), nil) } diff --git a/pkg/backend/filestate/store.go b/pkg/backend/filestate/store.go new file mode 100644 index 000000000000..375e32705166 --- /dev/null +++ b/pkg/backend/filestate/store.go @@ -0,0 +1,343 @@ +// Copyright 2016-2023, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filestate + +import ( + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/pulumi/pulumi/sdk/v3/go/common/encoding" + "github.com/pulumi/pulumi/sdk/v3/go/common/tokens" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/fsutil" + "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" +) + +// These should be constants +// but we can't make a constant from filepath.Join. +var ( + // StacksDir is a path under the state's root directory + // where the filestate backend stores stack information. + StacksDir = filepath.Join(workspace.BookkeepingDir, workspace.StackDir) + + // HistoriesDir is a path under the state's root directory + // where the filestate backend stores histories for all stacks. + HistoriesDir = filepath.Join(workspace.BookkeepingDir, workspace.HistoryDir) + + // BackupsDir is a path under the state's root directory + // where the filestate backend stores backups of stacks. + BackupsDir = filepath.Join(workspace.BookkeepingDir, workspace.BackupDir) +) + +// referenceStore stores and provides access to stack information. +// +// Each implementation of referenceStore is a different version of the stack +// storage format. +type referenceStore interface { + // StackBasePath returns the base path to for the file + // where snapshots of this stack are stored. + // + // This must be under StacksDir. + // + // This is the path to the file without the extension. + // The real file path is StackBasePath + ".json" + // or StackBasePath + ".json.gz". + StackBasePath(*localBackendReference) string + + // HistoryDir returns the path to the directory + // where history for this stack is stored. + // + // This must be under HistoriesDir. + HistoryDir(*localBackendReference) string + + // BackupDir returns the path to the directory + // where backups for this stack are stored. + // + // This must be under BackupsDir. + BackupDir(*localBackendReference) string + + // ListReferences lists all stack references in the store. + ListReferences() ([]*localBackendReference, error) + + // ParseReference parses a localBackendReference from a string. + ParseReference(ref string) (*localBackendReference, error) + + // ValidateReference verifies that the provided reference is valid + // returning an error if it is not. + ValidateReference(*localBackendReference) error +} + +// projectReferenceStore is a referenceStore that stores stack +// information with the new project-based layout. +// +// This is version 1 of the stack storage format. +type projectReferenceStore struct { + b *localBackend +} + +var _ referenceStore = (*projectReferenceStore)(nil) + +// newReference builds a new localBackendReference with the provided arguments. +// This DOES NOT modify the underlying storage. +func (p *projectReferenceStore) newReference(project, name tokens.Name) *localBackendReference { + return &localBackendReference{ + name: name, + project: project, + store: p, + b: p.b, + } +} + +func (p *projectReferenceStore) StackBasePath(ref *localBackendReference) string { + contract.Requiref(ref.project != "", "ref.project", "must not be empty") + return filepath.Join(StacksDir, fsutil.NamePath(ref.project), fsutil.NamePath(ref.name)) +} + +func (p *projectReferenceStore) HistoryDir(stack *localBackendReference) string { + contract.Requiref(stack.project != "", "ref.project", "must not be empty") + return filepath.Join(HistoriesDir, fsutil.NamePath(stack.project), fsutil.NamePath(stack.name)) +} + +func (p *projectReferenceStore) BackupDir(stack *localBackendReference) string { + contract.Requiref(stack.project != "", "ref.project", "must not be empty") + return filepath.Join(BackupsDir, fsutil.NamePath(stack.project), fsutil.NamePath(stack.name)) +} + +func (p *projectReferenceStore) ParseReference(stackRef string) (*localBackendReference, error) { + var name, project, org string + split := strings.Split(stackRef, "/") + switch len(split) { + case 1: + name = split[0] + case 2: + org = split[0] + name = split[1] + case 3: + org = split[0] + project = split[1] + name = split[2] + default: + return nil, fmt.Errorf("could not parse stack reference '%s'", stackRef) + } + + // If the provided stack name didn't include the org or project, infer them from the local + // environment. + if org == "" { + // Filestate organization MUST always be "organization" + org = "organization" + } + + if org != "organization" { + return nil, errors.New("organization name must be 'organization'") + } + + if project == "" { + currentProject := p.b.currentProject.Load() + if currentProject == nil { + return nil, fmt.Errorf("if you're using the --stack flag, " + + "pass the fully qualified name (organization/project/stack)") + } + + project = currentProject.Name.String() + } + + if len(project) > 100 { + return nil, errors.New("project names must be less than 100 characters") + } + + if project != "" && !tokens.IsName(project) { + return nil, fmt.Errorf( + "project names may only contain alphanumerics, hyphens, underscores, and periods: %s", + project) + } + + if !tokens.IsName(name) || len(name) > 100 { + return nil, fmt.Errorf( + "stack names are limited to 100 characters and may only contain alphanumeric, hyphens, underscores, or periods: %s", + name) + } + + return p.newReference(tokens.Name(project), tokens.Name(name)), nil +} + +func (p *projectReferenceStore) ValidateReference(ref *localBackendReference) error { + if ref.project == "" { + return fmt.Errorf("bad stack reference, project was not set") + } + return nil +} + +func (p *projectReferenceStore) ListProjects() ([]tokens.Name, error) { + path := StacksDir + + files, err := listBucket(p.b.bucket, path) + if err != nil { + return nil, fmt.Errorf("error listing stacks: %w", err) + } + + projects := make([]tokens.Name, 0, len(files)) + for _, file := range files { + if !file.IsDir { + continue // ignore files + } + + projName := objectName(file) + if !tokens.IsName(projName) { + // If this isn't a valid Name + // it won't be a project directory, + // so skip it. + continue + } + + projects = append(projects, tokens.Name(projName)) + } + + return projects, nil +} + +func (p *projectReferenceStore) ListReferences() ([]*localBackendReference, error) { + // The first level of the bucket is the project name. + // The second level of the bucket is the stack name. + path := StacksDir + + projects, err := p.ListProjects() + if err != nil { + return nil, err + } + + var stacks []*localBackendReference + for _, projName := range projects { + // TODO: Could we improve the efficiency here by firstly making listBucket return an enumerator not + // eagerly collecting all keys into a slice, and secondly by getting listBucket to return all + // descendent items not just the immediate children. We could then do the necessary splitting by + // file paths here to work out project names. + projectFiles, err := listBucket(p.b.bucket, filepath.Join(path, projName.String())) + if err != nil { + return nil, fmt.Errorf("error listing stacks: %w", err) + } + + for _, projectFile := range projectFiles { + // Can ignore directories at this level + if projectFile.IsDir { + continue + } + + objName := objectName(projectFile) + // Skip files without valid extensions (e.g., *.bak files). + ext := filepath.Ext(objName) + // But accept gzip compression + if ext == encoding.GZIPExt { + objName = strings.TrimSuffix(objName, encoding.GZIPExt) + ext = filepath.Ext(objName) + } + + if _, has := encoding.Marshalers[ext]; !has { + continue + } + + // Read in this stack's information. + name := objName[:len(objName)-len(ext)] + stacks = append(stacks, p.newReference(projName, tokens.Name(name))) + } + } + + return stacks, nil +} + +// legacyReferenceStore is a referenceStore that stores stack +// information with the legacy layout that did not support projects. +// +// This is the format we used before we introduced versioning. +type legacyReferenceStore struct { + b *localBackend +} + +var _ referenceStore = (*legacyReferenceStore)(nil) + +// newReference builds a new localBackendReference with the provided arguments. +// This DOES NOT modify the underlying storage. +func (p *legacyReferenceStore) newReference(name tokens.Name) *localBackendReference { + return &localBackendReference{ + name: name, + store: p, + b: p.b, + } +} + +func (p *legacyReferenceStore) StackBasePath(ref *localBackendReference) string { + contract.Requiref(ref.project == "", "ref.project", "must be empty") + return filepath.Join(StacksDir, fsutil.NamePath(ref.name)) +} + +func (p *legacyReferenceStore) HistoryDir(stack *localBackendReference) string { + contract.Requiref(stack.project == "", "ref.project", "must be empty") + return filepath.Join(HistoriesDir, fsutil.NamePath(stack.name)) +} + +func (p *legacyReferenceStore) BackupDir(stack *localBackendReference) string { + contract.Requiref(stack.project == "", "ref.project", "must be empty") + return filepath.Join(BackupsDir, fsutil.NamePath(stack.name)) +} + +func (p *legacyReferenceStore) ParseReference(stackRef string) (*localBackendReference, error) { + if !tokens.IsName(stackRef) || len(stackRef) > 100 { + return nil, fmt.Errorf( + "stack names are limited to 100 characters and may only contain alphanumeric, hyphens, underscores, or periods: %q", + stackRef) + } + return p.newReference(tokens.Name(stackRef)), nil +} + +func (p *legacyReferenceStore) ValidateReference(ref *localBackendReference) error { + if ref.project != "" { + return fmt.Errorf("bad stack reference, project was set") + } + return nil +} + +func (p *legacyReferenceStore) ListReferences() ([]*localBackendReference, error) { + files, err := listBucket(p.b.bucket, StacksDir) + if err != nil { + return nil, fmt.Errorf("error listing stacks: %w", err) + } + stacks := make([]*localBackendReference, 0, len(files)) + + for _, file := range files { + if file.IsDir { + continue + } + + objName := objectName(file) + // Skip files without valid extensions (e.g., *.bak files). + ext := filepath.Ext(objName) + // But accept gzip compression + if ext == encoding.GZIPExt { + objName = strings.TrimSuffix(objName, encoding.GZIPExt) + ext = filepath.Ext(objName) + } + + if _, has := encoding.Marshalers[ext]; !has { + continue + } + + // Read in this stack's information. + name := objName[:len(objName)-len(ext)] + stacks = append(stacks, p.newReference(tokens.Name(name))) + } + + return stacks, nil +} diff --git a/pkg/cmd/pulumi/new_acceptance_test.go b/pkg/cmd/pulumi/new_acceptance_test.go index 9897f6f10cfe..64fcdab6640d 100644 --- a/pkg/cmd/pulumi/new_acceptance_test.go +++ b/pkg/cmd/pulumi/new_acceptance_test.go @@ -149,7 +149,8 @@ func TestCreatingProjectWithPulumiBackendURL(t *testing.T) { proj := loadProject(t, tempdir) assert.Equal(t, defaultProjectName, proj.Name.String()) // Expect the stack directory to have a checkpoint file for the stack. - _, err = os.Stat(filepath.Join(fileStateDir, workspace.BookkeepingDir, workspace.StackDir, stackName+".json")) + _, err = os.Stat(filepath.Join( + fileStateDir, workspace.BookkeepingDir, workspace.StackDir, defaultProjectName, stackName+".json")) assert.NoError(t, err) b, err = currentBackend(ctx, nil, display.Options{}) diff --git a/pkg/cmd/pulumi/state.go b/pkg/cmd/pulumi/state.go index 21dfc66526ac..b05f47d271b3 100644 --- a/pkg/cmd/pulumi/state.go +++ b/pkg/cmd/pulumi/state.go @@ -1,4 +1,4 @@ -// Copyright 2016-2022, Pulumi Corporation. +// Copyright 2016-2023, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -50,6 +50,7 @@ troubleshooting a stack or when performing specific edits that otherwise would r cmd.AddCommand(newStateDeleteCommand()) cmd.AddCommand(newStateUnprotectCommand()) cmd.AddCommand(newStateRenameCommand()) + cmd.AddCommand(newStateUpgradeCommand()) return cmd } diff --git a/pkg/cmd/pulumi/state_upgrade.go b/pkg/cmd/pulumi/state_upgrade.go new file mode 100644 index 000000000000..ecf184bb0f09 --- /dev/null +++ b/pkg/cmd/pulumi/state_upgrade.go @@ -0,0 +1,76 @@ +// Copyright 2016-2023, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + + "github.com/pulumi/pulumi/pkg/v3/backend" + "github.com/pulumi/pulumi/pkg/v3/backend/display" + "github.com/pulumi/pulumi/pkg/v3/backend/filestate" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/result" + "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" + + "github.com/spf13/cobra" +) + +func newStateUpgradeCommand() *cobra.Command { + var sucmd stateUpgradeCmd + + cmd := &cobra.Command{ + Use: "upgrade", + Short: "Migrates the current backend to the latest supported version", + Long: `Migrates the current backend to the latest supported version + +This only has an effect on the filestate backend. +`, + Args: cmdutil.NoArgs, + Run: cmdutil.RunResultFunc(func(cmd *cobra.Command, args []string) result.Result { + if err := sucmd.Run(commandContext()); err != nil { + return result.FromError(err) + } + return nil + }), + } + return cmd +} + +// stateUpgradeCmd implements the 'pulumi state upgrade' command. +type stateUpgradeCmd struct { + // Used to mock out the currentBackend function for testing. + // Defaults to currentBackend function. + currentBackend func(context.Context, *workspace.Project, display.Options) (backend.Backend, error) +} + +func (cmd *stateUpgradeCmd) Run(ctx context.Context) error { + if cmd.currentBackend == nil { + cmd.currentBackend = currentBackend + } + currentBackend := cmd.currentBackend // shadow top-level currentBackend + + b, err := currentBackend(ctx, nil, display.Options{Color: cmdutil.GetGlobalColorization()}) + if err != nil { + return err + } + + if lb, is := b.(filestate.Backend); is { + if err := lb.Upgrade(ctx); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/cmd/pulumi/state_upgrade_test.go b/pkg/cmd/pulumi/state_upgrade_test.go new file mode 100644 index 000000000000..46d0268e3870 --- /dev/null +++ b/pkg/cmd/pulumi/state_upgrade_test.go @@ -0,0 +1,138 @@ +package main + +import ( + "context" + "errors" + "testing" + + "github.com/pulumi/pulumi/pkg/v3/backend" + "github.com/pulumi/pulumi/pkg/v3/backend/display" + "github.com/pulumi/pulumi/pkg/v3/backend/filestate" + "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStateUpgradeCommand_parseArgs(t *testing.T) { + t.Parallel() + + // Parsing flags with a cobra.Command without running the command + // is a bit verbose. + // You have to run ParseFlags to parse the flags, + // then extract non-flag arguments with cmd.Flags().Args(), + // then run ValidateArgs to validate the positional arguments. + + cmd := newStateUpgradeCommand() + args := []string{} // no arguments + + require.NoError(t, cmd.ParseFlags(args)) + args = cmd.Flags().Args() // non flag args + require.NoError(t, cmd.ValidateArgs(args)) +} + +func TestStateUpgradeCommand_parseArgsErrors(t *testing.T) { + t.Parallel() + + tests := []struct { + desc string + give []string + wantErr string + }{ + { + desc: "unknown flag", + give: []string{"--unknown"}, + wantErr: "unknown flag: --unknown", + }, + // Unfortunately, + // our cmdutil.NoArgs validator exits the program, + // causing the test to fail. + // Until we resolve this, we'll skip this test + // and rely on the positive test case + // to validate the arguments intead. + // { + // desc: "unexpected argument", + // give: []string{"arg"}, + // wantErr: `unknown command "arg" for "upgrade"`, + // }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.desc, func(t *testing.T) { + t.Parallel() + + cmd := newStateUpgradeCommand() + args := tt.give + + // Errors can occur during flag parsing + // or argument validation. + // If there's no error on ParseFlags, + // expect one on ValidateArgs. + if err := cmd.ParseFlags(args); err != nil { + assert.ErrorContains(t, err, tt.wantErr) + return + } + args = cmd.Flags().Args() // non flag args + assert.ErrorContains(t, cmd.ValidateArgs(args), tt.wantErr) + }) + } +} + +func TestStateUpgradeCommand_Run_upgrade(t *testing.T) { + t.Parallel() + + var called bool + cmd := stateUpgradeCmd{ + currentBackend: func(context.Context, *workspace.Project, display.Options) (backend.Backend, error) { + return &stubFileBackend{ + UpgradeF: func(context.Context) error { + called = true + return nil + }, + }, nil + }, + } + + err := cmd.Run(context.Background()) + require.NoError(t, err) + + assert.True(t, called, "Upgrade was never called") +} + +func TestStateUpgradeCommand_Run_unsupportedBackend(t *testing.T) { + t.Parallel() + + cmd := stateUpgradeCmd{ + currentBackend: func(context.Context, *workspace.Project, display.Options) (backend.Backend, error) { + return &backend.MockBackend{}, nil + }, + } + + // Non-filestate backend is already up-to-date. + err := cmd.Run(context.Background()) + require.NoError(t, err) +} + +func TestStateUpgradeCmd_Run_backendError(t *testing.T) { + t.Parallel() + + giveErr := errors.New("great sadness") + cmd := stateUpgradeCmd{ + currentBackend: func(context.Context, *workspace.Project, display.Options) (backend.Backend, error) { + return nil, giveErr + }, + } + + err := cmd.Run(context.Background()) + assert.ErrorIs(t, err, giveErr) +} + +type stubFileBackend struct { + filestate.Backend + + UpgradeF func(context.Context) error +} + +func (f *stubFileBackend) Upgrade(ctx context.Context) error { + return f.UpgradeF(ctx) +} diff --git a/pkg/resource/stack/checkpoint.go b/pkg/resource/stack/checkpoint.go index c2913be5b0b6..7408d0fa89d3 100644 --- a/pkg/resource/stack/checkpoint.go +++ b/pkg/resource/stack/checkpoint.go @@ -80,13 +80,13 @@ func UnmarshalVersionedCheckpointToLatestCheckpoint(m encoding.Marshaler, bytes } func MarshalUntypedDeploymentToVersionedCheckpoint( - stack tokens.Name, deployment *apitype.UntypedDeployment, + stack tokens.QName, deployment *apitype.UntypedDeployment, ) (*apitype.VersionedCheckpoint, error) { chk := struct { Stack tokens.QName Latest json.RawMessage }{ - Stack: stack.Q(), + Stack: stack, Latest: deployment.Deployment, } @@ -102,7 +102,7 @@ func MarshalUntypedDeploymentToVersionedCheckpoint( } // SerializeCheckpoint turns a snapshot into a data structure suitable for serialization. -func SerializeCheckpoint(stack tokens.Name, snap *deploy.Snapshot, +func SerializeCheckpoint(stack tokens.QName, snap *deploy.Snapshot, sm secrets.Manager, showSecrets bool, ) (*apitype.VersionedCheckpoint, error) { // If snap is nil, that's okay, we will just create an empty deployment; otherwise, serialize the whole snapshot. @@ -116,7 +116,7 @@ func SerializeCheckpoint(stack tokens.Name, snap *deploy.Snapshot, } b, err := encoding.JSON.Marshal(apitype.CheckpointV3{ - Stack: stack.Q(), + Stack: stack, Latest: latest, }) if err != nil { diff --git a/tests/config_test.go b/tests/config_test.go index ab9278a79da7..a5f9ba571488 100644 --- a/tests/config_test.go +++ b/tests/config_test.go @@ -71,10 +71,14 @@ func TestConfigCommands(t *testing.T) { // check that the nested config does not exist because we didn't use path _, stderr := e.RunCommandExpectError("pulumi", "config", "get", "outer") - assert.Equal(t, "error: configuration key 'outer' not found for stack 'test'", strings.Trim(stderr, "\r\n")) + assert.Equal(t, + "error: configuration key 'outer' not found for stack 'test'", + strings.Trim(stderr, "\r\n")) _, stderr = e.RunCommandExpectError("pulumi", "config", "get", "myList") - assert.Equal(t, "error: configuration key 'myList' not found for stack 'test'", strings.Trim(stderr, "\r\n")) + assert.Equal(t, + "error: configuration key 'myList' not found for stack 'test'", + strings.Trim(stderr, "\r\n")) // set the nested config using --path e.RunCommand("pulumi", "config", "set-all", "--path", diff --git a/tests/integration/integration_test.go b/tests/integration/integration_test.go index a6b48d9978ae..5074ffc3f871 100644 --- a/tests/integration/integration_test.go +++ b/tests/integration/integration_test.go @@ -531,7 +531,7 @@ func TestDestroyStackRef(t *testing.T) { e.RunCommand("pulumi", "up", "--skip-preview", "--yes") e.CWD = os.TempDir() - e.RunCommand("pulumi", "destroy", "--skip-preview", "--yes", "-s", "dev") + e.RunCommand("pulumi", "destroy", "--skip-preview", "--yes", "-s", "organization/large_resource_js/dev") } //nolint:paralleltest // uses parallel programtest diff --git a/tests/stack_test.go b/tests/stack_test.go index 77dd04dda2b6..f29e0bc43436 100644 --- a/tests/stack_test.go +++ b/tests/stack_test.go @@ -367,7 +367,7 @@ func TestStackBackups(t *testing.T) { const stackName = "imulup" // Get the path to the backup directory for this project. - backupDir, err := getStackProjectBackupDir(e, stackName) + backupDir, err := getStackProjectBackupDir(e, "stack_outputs", stackName) assert.NoError(t, err, "getting stack project backup path") defer func() { if !t.Failed() { @@ -560,8 +560,8 @@ func TestLocalStateLocking(t *testing.T) { // stackFileFormatAsserters returns a function to assert that the current file // format is for gzip and plain formats respectively. -func stackFileFormatAsserters(t *testing.T, e *ptesting.Environment, stackName string) (func(), func()) { - stacksDir := filepath.Join(".pulumi", "stacks") +func stackFileFormatAsserters(t *testing.T, e *ptesting.Environment, projectName, stackName string) (func(), func()) { + stacksDir := filepath.Join(".pulumi", "stacks", projectName) pathStack := filepath.Join(stacksDir, stackName+".json") pathStackGzip := pathStack + ".gz" pathStackBak := pathStack + ".bak" @@ -622,7 +622,7 @@ func TestLocalStateGzip(t *testing.T) { //nolint:paralleltest e.RunCommand("yarn", "install") e.RunCommand("pulumi", "up", "--non-interactive", "--yes", "--skip-preview") - assertGzipFileFormat, assertPlainFileFormat := stackFileFormatAsserters(t, e, stackName) + assertGzipFileFormat, assertPlainFileFormat := stackFileFormatAsserters(t, e, "stack_dependencies", stackName) switchGzipOff := func() { e.Setenv(filestate.PulumiFilestateGzipEnvVar, "0") } switchGzipOn := func() { e.Setenv(filestate.PulumiFilestateGzipEnvVar, "1") } pulumiUp := func() { e.RunCommand("pulumi", "up", "--non-interactive", "--yes", "--skip-preview") } @@ -691,10 +691,11 @@ func assertBackupStackFile(t *testing.T, stackName string, file os.DirEntry, bef assert.True(t, parsedTime < after, "False: %v < %v", parsedTime, after) } -func getStackProjectBackupDir(e *ptesting.Environment, stackName string) (string, error) { +func getStackProjectBackupDir(e *ptesting.Environment, projectName, stackName string) (string, error) { return filepath.Join(e.RootPath, workspace.BookkeepingDir, workspace.BackupDir, + projectName, stackName, ), nil }