From 52fd18e9db3451e4ef35f6fff7cca6ca54b9b18a Mon Sep 17 00:00:00 2001 From: lyndon <98304688+Lyndon-Li@users.noreply.github.com> Date: Fri, 29 Jul 2022 16:06:18 +0800 Subject: [PATCH] Kopia Integration Change - Storage Configuration (#5142) * unified repo storge config Signed-off-by: Lyndon-Li * add UT Signed-off-by: Lyndon-Li --- changelogs/unreleased/5142-lyndon | 4 + .../restic_repository_controller.go | 3 +- pkg/repository/config/aws.go | 99 +++++ pkg/{restic => repository/config}/aws_test.go | 4 +- pkg/{restic => repository/config}/azure.go | 28 +- .../config}/azure_test.go | 2 +- pkg/{restic => repository/config}/config.go | 49 +-- .../config}/config_test.go | 2 +- pkg/{restic => repository/config}/gcp.go | 20 +- pkg/{restic => repository/config}/gcp_test.go | 4 +- pkg/repository/provider/provider.go | 56 +++ pkg/repository/provider/unified_repo.go | 292 ++++++++++++ pkg/repository/provider/unified_repo_test.go | 414 ++++++++++++++++++ pkg/repository/udmrepo/repo-option-consts.go | 58 +++ pkg/repository/udmrepo/repo.go | 171 ++++++++ pkg/restic/aws.go | 41 -- pkg/restic/common.go | 21 +- pkg/util/ownership/backup_owner.go | 42 ++ 18 files changed, 1208 insertions(+), 102 deletions(-) create mode 100644 changelogs/unreleased/5142-lyndon create mode 100644 pkg/repository/config/aws.go rename pkg/{restic => repository/config}/aws_test.go (96%) rename pkg/{restic => repository/config}/azure.go (88%) rename pkg/{restic => repository/config}/azure_test.go (99%) rename pkg/{restic => repository/config}/config.go (74%) rename pkg/{restic => repository/config}/config_test.go (99%) rename pkg/{restic => repository/config}/gcp.go (59%) rename pkg/{restic => repository/config}/gcp_test.go (95%) create mode 100644 pkg/repository/provider/provider.go create mode 100644 pkg/repository/provider/unified_repo.go create mode 100644 pkg/repository/provider/unified_repo_test.go create mode 100644 pkg/repository/udmrepo/repo-option-consts.go create mode 100644 pkg/repository/udmrepo/repo.go delete mode 100644 pkg/restic/aws.go create mode 100644 pkg/util/ownership/backup_owner.go diff --git a/changelogs/unreleased/5142-lyndon b/changelogs/unreleased/5142-lyndon new file mode 100644 index 0000000000..10286cf0bc --- /dev/null +++ b/changelogs/unreleased/5142-lyndon @@ -0,0 +1,4 @@ +Kopia Integration: Add the Unified Repository Interface definition. +Kopia Integration: Add the changes for Unified Repository storage config. + +Related Issues; #5076, #5080 \ No newline at end of file diff --git a/pkg/controller/restic_repository_controller.go b/pkg/controller/restic_repository_controller.go index 36f0e76a86..3f27de36fc 100644 --- a/pkg/controller/restic_repository_controller.go +++ b/pkg/controller/restic_repository_controller.go @@ -31,6 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + repoconfig "github.com/vmware-tanzu/velero/pkg/repository/config" "github.com/vmware-tanzu/velero/pkg/restic" "github.com/vmware-tanzu/velero/pkg/util/kube" ) @@ -127,7 +128,7 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1 return r.patchResticRepository(ctx, req, repoNotReady(err.Error())) } - repoIdentifier, err := restic.GetRepoIdentifier(loc, req.Spec.VolumeNamespace) + repoIdentifier, err := repoconfig.GetRepoIdentifier(loc, req.Spec.VolumeNamespace) if err != nil { return r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) { rr.Status.Message = err.Error() diff --git a/pkg/repository/config/aws.go b/pkg/repository/config/aws.go new file mode 100644 index 0000000000..0ff4ca218a --- /dev/null +++ b/pkg/repository/config/aws.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "os" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/pkg/errors" +) + +const ( + // AWS specific environment variable + awsProfileEnvVar = "AWS_PROFILE" + awsProfileKey = "profile" + awsCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" +) + +// GetS3ResticEnvVars gets the environment variables that restic +// relies on (AWS_PROFILE) based on info in the provided object +// storage location config map. +func GetS3ResticEnvVars(config map[string]string) (map[string]string, error) { + result := make(map[string]string) + + if credentialsFile, ok := config[CredentialsFileKey]; ok { + result[awsCredentialsFileEnvVar] = credentialsFile + } + + if profile, ok := config[awsProfileKey]; ok { + result[awsProfileEnvVar] = profile + } + + return result, nil +} + +// GetS3Credentials gets the S3 credential values according to the information +// of the provided config or the system's environment variables +func GetS3Credentials(config map[string]string) (credentials.Value, error) { + credentialsFile := config[CredentialsFileKey] + if credentialsFile == "" { + credentialsFile = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + } + + if credentialsFile == "" { + return credentials.Value{}, errors.New("missing credential file") + } + + creds := credentials.NewSharedCredentials(credentialsFile, "") + credValue, err := creds.Get() + if err != nil { + return credValue, err + } + + return credValue, nil +} + +// GetAWSBucketRegion returns the AWS region that a bucket is in, or an error +// if the region cannot be determined. +func GetAWSBucketRegion(bucket string) (string, error) { + var region string + + sess, err := session.NewSession() + if err != nil { + return "", errors.WithStack(err) + } + + for _, partition := range endpoints.DefaultPartitions() { + for regionHint := range partition.Regions() { + region, _ = s3manager.GetBucketRegion(context.Background(), sess, bucket, regionHint) + + // we only need to try a single region hint per partition, so break after the first + break + } + + if region != "" { + return region, nil + } + } + + return "", errors.New("unable to determine bucket's region") +} diff --git a/pkg/restic/aws_test.go b/pkg/repository/config/aws_test.go similarity index 96% rename from pkg/restic/aws_test.go rename to pkg/repository/config/aws_test.go index 51f3ceb993..bdd3e4fa23 100644 --- a/pkg/restic/aws_test.go +++ b/pkg/repository/config/aws_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config import ( "testing" @@ -55,7 +55,7 @@ func TestGetS3ResticEnvVars(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - actual, err := getS3ResticEnvVars(tc.config) + actual, err := GetS3ResticEnvVars(tc.config) require.NoError(t, err) diff --git a/pkg/restic/azure.go b/pkg/repository/config/azure.go similarity index 88% rename from pkg/restic/azure.go rename to pkg/repository/config/azure.go index 20324b8e36..8c5871c527 100644 --- a/pkg/restic/azure.go +++ b/pkg/repository/config/azure.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config import ( "context" @@ -37,6 +37,7 @@ const ( storageAccountConfigKey = "storageAccount" storageAccountKeyEnvVarConfigKey = "storageAccountKeyEnvVar" subscriptionIDConfigKey = "subscriptionId" + storageDomainConfigKey = "storageDomain" ) // getSubscriptionID gets the subscription ID from the 'config' map if it contains @@ -131,10 +132,10 @@ func mapLookup(data map[string]string) func(string) string { } } -// getAzureResticEnvVars gets the environment variables that restic +// GetAzureResticEnvVars gets the environment variables that restic // relies on (AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY) based // on info in the provided object storage location config map. -func getAzureResticEnvVars(config map[string]string) (map[string]string, error) { +func GetAzureResticEnvVars(config map[string]string) (map[string]string, error) { storageAccountKey, _, err := getStorageAccountKey(config) if err != nil { return nil, err @@ -158,7 +159,7 @@ func credentialsFileFromEnv() string { // selectCredentialsFile selects the Azure credentials file to use, retrieving it // from the given config or falling back to retrieving it from the environment. func selectCredentialsFile(config map[string]string) string { - if credentialsFile, ok := config[credentialsFileKey]; ok { + if credentialsFile, ok := config[CredentialsFileKey]; ok { return credentialsFile } @@ -208,3 +209,22 @@ func getRequiredValues(getValue func(string) string, keys ...string) (map[string return results, nil } + +// GetAzureStorageDomain gets the Azure storage domain required by a Azure blob connection, +// if the provided config doean't have the value, get it from system's environment variables +func GetAzureStorageDomain(config map[string]string) string { + if domain, exist := config[storageDomainConfigKey]; exist { + return domain + } else { + return os.Getenv(cloudNameEnvVar) + } +} + +func GetAzureCredentials(config map[string]string) (string, string, error) { + storageAccountKey, _, err := getStorageAccountKey(config) + if err != nil { + return "", "", err + } + + return config[storageAccountConfigKey], storageAccountKey, nil +} diff --git a/pkg/restic/azure_test.go b/pkg/repository/config/azure_test.go similarity index 99% rename from pkg/restic/azure_test.go rename to pkg/repository/config/azure_test.go index acb2f25065..d20ac2e28b 100644 --- a/pkg/restic/azure_test.go +++ b/pkg/repository/config/azure_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config import ( "os" diff --git a/pkg/restic/config.go b/pkg/repository/config/config.go similarity index 74% rename from pkg/restic/config.go rename to pkg/repository/config/config.go index 1600f39fa8..24dc8d6b3d 100644 --- a/pkg/restic/config.go +++ b/pkg/repository/config/config.go @@ -14,17 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config import ( - "context" "fmt" "path" "strings" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/pkg/errors" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -37,11 +33,18 @@ const ( AWSBackend BackendType = "velero.io/aws" AzureBackend BackendType = "velero.io/azure" GCPBackend BackendType = "velero.io/gcp" + FSBackend BackendType = "velero.io/fs" +) + +const ( + // CredentialsFileKey is the key within a BSL config that is checked to see if + // the BSL is using its own credentials, rather than those in the environment + CredentialsFileKey = "credentialsFile" ) // this func is assigned to a package-level variable so it can be // replaced when unit-testing -var getAWSBucketRegion = getBucketRegion +var getAWSBucketRegion = GetAWSBucketRegion // getRepoPrefix returns the prefix of the value of the --repo flag for // restic commands, i.e. everything except the "/". @@ -55,7 +58,7 @@ func getRepoPrefix(location *velerov1api.BackupStorageLocation) (string, error) prefix = layout.GetResticDir() } - backendType := getBackendType(location.Spec.Provider) + backendType := GetBackendType(location.Spec.Provider) if repoPrefix := location.Spec.Config["resticRepoPrefix"]; repoPrefix != "" { return repoPrefix, nil @@ -89,7 +92,7 @@ func getRepoPrefix(location *velerov1api.BackupStorageLocation) (string, error) return "", errors.New("restic repository prefix (resticRepoPrefix) not specified in backup storage location's config") } -func getBackendType(provider string) BackendType { +func GetBackendType(provider string) BackendType { if !strings.Contains(provider, "/") { provider = "velero.io/" + provider } @@ -97,6 +100,10 @@ func getBackendType(provider string) BackendType { return BackendType(provider) } +func IsBackendTypeValid(backendType BackendType) bool { + return (backendType == AWSBackend || backendType == AzureBackend || backendType == GCPBackend || backendType == FSBackend) +} + // GetRepoIdentifier returns the string to be used as the value of the --repo flag in // restic commands for the given repository. func GetRepoIdentifier(location *velerov1api.BackupStorageLocation, name string) (string, error) { @@ -107,29 +114,3 @@ func GetRepoIdentifier(location *velerov1api.BackupStorageLocation, name string) return fmt.Sprintf("%s/%s", strings.TrimSuffix(prefix, "/"), name), nil } - -// getBucketRegion returns the AWS region that a bucket is in, or an error -// if the region cannot be determined. -func getBucketRegion(bucket string) (string, error) { - var region string - - sess, err := session.NewSession() - if err != nil { - return "", errors.WithStack(err) - } - - for _, partition := range endpoints.DefaultPartitions() { - for regionHint := range partition.Regions() { - region, _ = s3manager.GetBucketRegion(context.Background(), sess, bucket, regionHint) - - // we only need to try a single region hint per partition, so break after the first - break - } - - if region != "" { - return region, nil - } - } - - return "", errors.New("unable to determine bucket's region") -} diff --git a/pkg/restic/config_test.go b/pkg/repository/config/config_test.go similarity index 99% rename from pkg/restic/config_test.go rename to pkg/repository/config/config_test.go index 8418d68085..2fa26a1936 100644 --- a/pkg/restic/config_test.go +++ b/pkg/repository/config/config_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config import ( "testing" diff --git a/pkg/restic/gcp.go b/pkg/repository/config/gcp.go similarity index 59% rename from pkg/restic/gcp.go rename to pkg/repository/config/gcp.go index 96d1edfe60..ed9e3ec6a8 100644 --- a/pkg/restic/gcp.go +++ b/pkg/repository/config/gcp.go @@ -14,21 +14,33 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config + +import "os" const ( // GCP specific environment variable gcpCredentialsFileEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" ) -// getGCPResticEnvVars gets the environment variables that restic relies +// GetGCPResticEnvVars gets the environment variables that restic relies // on based on info in the provided object storage location config map. -func getGCPResticEnvVars(config map[string]string) (map[string]string, error) { +func GetGCPResticEnvVars(config map[string]string) (map[string]string, error) { result := make(map[string]string) - if credentialsFile, ok := config[credentialsFileKey]; ok { + if credentialsFile, ok := config[CredentialsFileKey]; ok { result[gcpCredentialsFileEnvVar] = credentialsFile } return result, nil } + +// GetGCPCredentials gets the credential file required by a GCP bucket connection, +// if the provided config doean't have the value, get it from system's environment variables +func GetGCPCredentials(config map[string]string) string { + if credentialsFile, ok := config[CredentialsFileKey]; ok { + return credentialsFile + } else { + return os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") + } +} diff --git a/pkg/restic/gcp_test.go b/pkg/repository/config/gcp_test.go similarity index 95% rename from pkg/restic/gcp_test.go rename to pkg/repository/config/gcp_test.go index 37f2bf2c70..cd4411e3bc 100644 --- a/pkg/restic/gcp_test.go +++ b/pkg/repository/config/gcp_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config import ( "testing" @@ -46,7 +46,7 @@ func TestGetGCPResticEnvVars(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - actual, err := getGCPResticEnvVars(tc.config) + actual, err := GetGCPResticEnvVars(tc.config) require.NoError(t, err) diff --git a/pkg/repository/provider/provider.go b/pkg/repository/provider/provider.go new file mode 100644 index 0000000000..36d69a594e --- /dev/null +++ b/pkg/repository/provider/provider.go @@ -0,0 +1,56 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "context" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +// RepoParam includes the parameters to manipulate a backup repository +// SubDir is used to generate the path in the backup storage +type RepoParam struct { + SubDir string + BackupLocation *velerov1api.BackupStorageLocation +} + +type Provider interface { + //InitRepo is to initialize a repository from a new storage place + InitRepo(ctx context.Context, param RepoParam) error + + //ConnectToRepo is to establish the connection to a + //storage place that a repository is already initialized + ConnectToRepo(ctx context.Context, param RepoParam) error + + //PrepareRepo is a combination of InitRepo and ConnectToRepo, + //it may do initializing + connecting, connecting only if the repository + //is already initialized, or do nothing if the repository is already connected + PrepareRepo(ctx context.Context, param RepoParam) error + + //PruneRepo does a full prune/maintenance of the repository + PruneRepo(ctx context.Context, param RepoParam) error + + //PruneRepoQuick does a quick prune/maintenance of the repository if available + PruneRepoQuick(ctx context.Context, param RepoParam) error + + //EnsureUnlockRepo esures to remove any stale file locks in the storage + EnsureUnlockRepo(ctx context.Context, param RepoParam) error + + //Forget is to delete a snapshot from the repository + Forget(ctx context.Context, snapshotID string, param RepoParam) error +} diff --git a/pkg/repository/provider/unified_repo.go b/pkg/repository/provider/unified_repo.go new file mode 100644 index 0000000000..49cef09ce4 --- /dev/null +++ b/pkg/repository/provider/unified_repo.go @@ -0,0 +1,292 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "context" + "fmt" + "path" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/vmware-tanzu/velero/internal/credentials" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + repoconfig "github.com/vmware-tanzu/velero/pkg/repository/config" + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" + "github.com/vmware-tanzu/velero/pkg/util/ownership" +) + +type unifiedRepoProvider struct { + credentialsFileStore credentials.FileStore + workPath string + repoService udmrepo.BackupRepoService + log logrus.FieldLogger +} + +// this func is assigned to a package-level variable so it can be +// replaced when unit-testing +var getAzureCredentials = repoconfig.GetAzureCredentials +var getS3Credentials = repoconfig.GetS3Credentials +var getGCPCredentials = repoconfig.GetGCPCredentials +var getS3BucketRegion = repoconfig.GetAWSBucketRegion +var getAzureStorageDomain = repoconfig.GetAzureStorageDomain + +// NewUnifiedRepoProvider creates the service provider for Unified Repo +// workPath is the path for Unified Repo to store some local information +// workPath could be empty, if so, the default path will be used +func NewUnifiedRepoProvider( + credentialFileStore credentials.FileStore, + workPath string, + log logrus.FieldLogger, +) (Provider, error) { + repo := unifiedRepoProvider{ + credentialsFileStore: credentialFileStore, + workPath: workPath, + log: log, + } + + repo.repoService = createRepoService(log) + + log.Debug("Finished create unified repo service") + + return &repo, nil +} + +func (urp *unifiedRepoProvider) InitRepo(ctx context.Context, param RepoParam) error { + log := urp.log.WithFields(logrus.Fields{ + "BSL name": param.BackupLocation.Name, + "BSL UID": param.BackupLocation.UID, + }) + + log.Debug("Start to init repo") + + repoOption, err := urp.getRepoOption(param) + if err != nil { + return errors.Wrap(err, "error to get repo options") + } + + err = urp.repoService.Init(ctx, repoOption, true) + if err != nil { + return errors.Wrap(err, "error to init backup repo") + } + + log.Debug("Init repo complete") + + return nil +} + +func (urp *unifiedRepoProvider) ConnectToRepo(ctx context.Context, param RepoParam) error { + ///TODO + return nil +} + +func (urp *unifiedRepoProvider) PrepareRepo(ctx context.Context, param RepoParam) error { + ///TODO + return nil +} + +func (urp *unifiedRepoProvider) PruneRepo(ctx context.Context, param RepoParam) error { + ///TODO + return nil +} + +func (urp *unifiedRepoProvider) PruneRepoQuick(ctx context.Context, param RepoParam) error { + ///TODO + return nil +} + +func (urp *unifiedRepoProvider) EnsureUnlockRepo(ctx context.Context, param RepoParam) error { + return nil +} + +func (urp *unifiedRepoProvider) Forget(ctx context.Context, snapshotID string, param RepoParam) error { + ///TODO + return nil +} + +func (urp *unifiedRepoProvider) getRepoPassword(param RepoParam) (string, error) { + ///TODO: get repo password + + return "", nil +} + +func (urp *unifiedRepoProvider) getRepoOption(param RepoParam) (udmrepo.RepoOptions, error) { + repoOption := udmrepo.RepoOptions{ + StorageType: getStorageType(param.BackupLocation), + ConfigFilePath: getRepoConfigFile(urp.workPath, string(param.BackupLocation.UID)), + Ownership: udmrepo.OwnershipOptions{ + Username: ownership.GetRepositoryOwner().Username, + DomainName: ownership.GetRepositoryOwner().DomainName, + }, + StorageOptions: make(map[string]string), + GeneralOptions: make(map[string]string), + } + + repoPassword, err := urp.getRepoPassword(param) + if err != nil { + return repoOption, errors.Wrap(err, "error to get repo password") + } + + repoOption.RepoPassword = repoPassword + + storeVar, err := getStorageVariables(param.BackupLocation, param.SubDir) + if err != nil { + return repoOption, errors.Wrap(err, "error to get storage variables") + } + + for k, v := range storeVar { + repoOption.StorageOptions[k] = v + } + + storeCred, err := getStorageCredentials(param.BackupLocation, urp.credentialsFileStore) + if err != nil { + return repoOption, errors.Wrap(err, "error to get repo credential env") + } + + for k, v := range storeCred { + repoOption.StorageOptions[k] = v + } + + return repoOption, nil +} + +func getStorageType(backupLocation *velerov1api.BackupStorageLocation) string { + backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider) + + switch backendType { + case repoconfig.AWSBackend: + return udmrepo.StorageTypeS3 + case repoconfig.AzureBackend: + return udmrepo.StorageTypeAzure + case repoconfig.GCPBackend: + return udmrepo.StorageTypeGcs + case repoconfig.FSBackend: + return udmrepo.StorageTypeFs + default: + return "" + } +} + +func getStorageCredentials(backupLocation *velerov1api.BackupStorageLocation, credentialsFileStore credentials.FileStore) (map[string]string, error) { + result := make(map[string]string) + var err error + + backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider) + if !repoconfig.IsBackendTypeValid(backendType) { + return map[string]string{}, errors.New("invalid storage provider") + } + + config := backupLocation.Spec.Config + if config == nil { + config = map[string]string{} + } + + if backupLocation.Spec.Credential != nil { + config[repoconfig.CredentialsFileKey], err = credentialsFileStore.Path(backupLocation.Spec.Credential) + if err != nil { + return map[string]string{}, errors.Wrap(err, "error get credential file in bsl") + } + } + + switch backendType { + case repoconfig.AWSBackend: + credValue, err := getS3Credentials(config) + if err != nil { + return map[string]string{}, errors.Wrap(err, "error get s3 credentials") + } + result[udmrepo.StoreOptionS3KeyId] = credValue.AccessKeyID + result[udmrepo.StoreOptionS3Provider] = credValue.ProviderName + result[udmrepo.StoreOptionS3SecretKey] = credValue.SecretAccessKey + result[udmrepo.StoreOptionS3Token] = credValue.SessionToken + + case repoconfig.AzureBackend: + storageAccount, accountKey, err := getAzureCredentials(config) + if err != nil { + return map[string]string{}, errors.Wrap(err, "error get azure credentials") + } + result[udmrepo.StoreOptionAzureStorageAccount] = storageAccount + result[udmrepo.StoreOptionAzureKey] = accountKey + + case repoconfig.GCPBackend: + result[udmrepo.StoreOptionCredentialFile] = getGCPCredentials(config) + } + + return result, nil +} + +func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repoName string) (map[string]string, error) { + result := make(map[string]string) + + backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider) + if !repoconfig.IsBackendTypeValid(backendType) { + return map[string]string{}, errors.New("invalid storage provider") + } + + config := backupLocation.Spec.Config + if config == nil { + config = map[string]string{} + } + + bucket := strings.Trim(config["bucket"], "/") + prefix := strings.Trim(config["prefix"], "/") + if backupLocation.Spec.ObjectStorage != nil { + bucket = strings.Trim(backupLocation.Spec.ObjectStorage.Bucket, "/") + prefix = strings.Trim(backupLocation.Spec.ObjectStorage.Prefix, "/") + } + + prefix = path.Join(prefix, udmrepo.StoreOptionPrefixName, repoName) + "/" + + region := config["region"] + + if backendType == repoconfig.AWSBackend { + s3Url := config["s3Url"] + + var err error + if s3Url == "" { + region, err = getS3BucketRegion(bucket) + if err != nil { + return map[string]string{}, errors.Wrap(err, "error get s3 bucket region") + } + + s3Url = fmt.Sprintf("s3-%s.amazonaws.com", region) + } + + result[udmrepo.StoreOptionS3Endpoint] = strings.Trim(s3Url, "/") + result[udmrepo.StoreOptionS3DisableTlsVerify] = config["insecureSkipTLSVerify"] + } else if backendType == repoconfig.AzureBackend { + result[udmrepo.StoreOptionAzureDomain] = getAzureStorageDomain(config) + } + + result[udmrepo.StoreOptionOssBucket] = bucket + result[udmrepo.StoreOptionPrefix] = prefix + result[udmrepo.StoreOptionOssRegion] = strings.Trim(region, "/") + result[udmrepo.StoreOptionFsPath] = config["fspath"] + + return result, nil +} + +func getRepoConfigFile(workPath string, repoID string) string { + ///TODO: call udmrepo to get config file + return "" +} + +func createRepoService(log logrus.FieldLogger) udmrepo.BackupRepoService { + ///TODO: call udmrepo create repo service + return nil +} diff --git a/pkg/repository/provider/unified_repo_test.go b/pkg/repository/provider/unified_repo_test.go new file mode 100644 index 0000000000..ee78c7b5d2 --- /dev/null +++ b/pkg/repository/provider/unified_repo_test.go @@ -0,0 +1,414 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "errors" + "testing" + + awscredentials "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1api "k8s.io/api/core/v1" + + filecredentials "github.com/vmware-tanzu/velero/internal/credentials" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + velerotest "github.com/vmware-tanzu/velero/pkg/test" +) + +func TestGetStorageCredentials(t *testing.T) { + testCases := []struct { + name string + backupLocation velerov1api.BackupStorageLocation + credFileStore filecredentials.FileStore + getAzureCredentials func(map[string]string) (string, string, error) + getS3Credentials func(map[string]string) (awscredentials.Value, error) + getGCPCredentials func(map[string]string) string + expected map[string]string + expectedErr string + }{ + { + name: "invalid provider", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "invalid-provider", + }, + }, + expected: map[string]string{}, + expectedErr: "invalid storage provider", + }, + { + name: "credential section exists in BSL, file store fail", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "aws", + Credential: &corev1api.SecretKeySelector{}, + }, + }, + credFileStore: velerotest.NewFakeCredentialsFileStore("", errors.New("fake error")), + expected: map[string]string{}, + expectedErr: "error get credential file in bsl: fake error", + }, + { + name: "aws, Credential section not exists in BSL", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{ + "credentialsFile": "credentials-from-config-map", + }, + }, + }, + getS3Credentials: func(config map[string]string) (awscredentials.Value, error) { + return awscredentials.Value{ + AccessKeyID: "from: " + config["credentialsFile"], + }, nil + }, + + expected: map[string]string{ + "accessKeyID": "from: credentials-from-config-map", + "providerName": "", + "secretAccessKey": "", + "sessionToken": "", + }, + }, + { + name: "aws, Credential section exists in BSL", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{ + "credentialsFile": "credentials-from-config-map", + }, + Credential: &corev1api.SecretKeySelector{}, + }, + }, + credFileStore: velerotest.NewFakeCredentialsFileStore("credentials-from-credential-key", nil), + getS3Credentials: func(config map[string]string) (awscredentials.Value, error) { + return awscredentials.Value{ + AccessKeyID: "from: " + config["credentialsFile"], + }, nil + }, + + expected: map[string]string{ + "accessKeyID": "from: credentials-from-credential-key", + "providerName": "", + "secretAccessKey": "", + "sessionToken": "", + }, + }, + { + name: "aws, get credentials fail", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{ + "credentialsFile": "credentials-from-config-map", + }, + }, + }, + credFileStore: velerotest.NewFakeCredentialsFileStore("", nil), + getS3Credentials: func(config map[string]string) (awscredentials.Value, error) { + return awscredentials.Value{}, errors.New("fake error") + }, + expected: map[string]string{}, + expectedErr: "error get s3 credentials: fake error", + }, + { + name: "azure, Credential section exists in BSL", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/azure", + Config: map[string]string{ + "credentialsFile": "credentials-from-config-map", + }, + Credential: &corev1api.SecretKeySelector{}, + }, + }, + credFileStore: velerotest.NewFakeCredentialsFileStore("credentials-from-credential-key", nil), + getAzureCredentials: func(config map[string]string) (string, string, error) { + return "storage account from: " + config["credentialsFile"], "", nil + }, + + expected: map[string]string{ + "storageAccount": "storage account from: credentials-from-credential-key", + "storageKey": "", + }, + }, + { + name: "azure, get azure credentials fails", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/azure", + Config: map[string]string{ + "credentialsFile": "credentials-from-config-map", + }, + }, + }, + getAzureCredentials: func(config map[string]string) (string, string, error) { + return "", "", errors.New("fake error") + }, + + expected: map[string]string{}, + expectedErr: "error get azure credentials: fake error", + }, + { + name: "gcp, Credential section not exists in BSL", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/gcp", + Config: map[string]string{ + "credentialsFile": "credentials-from-config-map", + }, + }, + }, + getGCPCredentials: func(config map[string]string) string { + return "credentials-from-config-map" + }, + + expected: map[string]string{ + "credFile": "credentials-from-config-map", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + getAzureCredentials = tc.getAzureCredentials + getS3Credentials = tc.getS3Credentials + getGCPCredentials = tc.getGCPCredentials + + actual, err := getStorageCredentials(&tc.backupLocation, tc.credFileStore) + + require.Equal(t, tc.expected, actual) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} + +func TestGetStorageVariables(t *testing.T) { + testCases := []struct { + name string + backupLocation velerov1api.BackupStorageLocation + repoName string + getS3BucketRegion func(string) (string, error) + getAzureStorageDomain func(map[string]string) string + expected map[string]string + expectedErr string + }{ + { + name: "invalid provider", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "invalid-provider", + }, + }, + expected: map[string]string{}, + expectedErr: "invalid storage provider", + }, + { + name: "aws, ObjectStorage section not exists in BSL, s3Url exist", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{ + "bucket": "fake-bucket", + "prefix": "fake-prefix", + "region": "fake-region/", + "s3Url": "fake-url", + "insecureSkipTLSVerify": "true", + }, + }, + }, + expected: map[string]string{ + "bucket": "fake-bucket", + "prefix": "fake-prefix/unified-repo/", + "region": "fake-region", + "fspath": "", + "endpoint": "fake-url", + "skipTLSVerify": "true", + }, + }, + { + name: "aws, ObjectStorage section not exists in BSL, s3Url not exist", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{ + "bucket": "fake-bucket", + "prefix": "fake-prefix", + "insecureSkipTLSVerify": "false", + }, + }, + }, + getS3BucketRegion: func(bucket string) (string, error) { + return "region from bucket: " + bucket, nil + }, + expected: map[string]string{ + "bucket": "fake-bucket", + "prefix": "fake-prefix/unified-repo/", + "region": "region from bucket: fake-bucket", + "fspath": "", + "endpoint": "s3-region from bucket: fake-bucket.amazonaws.com", + "skipTLSVerify": "false", + }, + }, + { + name: "aws, ObjectStorage section not exists in BSL, s3Url not exist, get region fail", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{}, + }, + }, + getS3BucketRegion: func(bucket string) (string, error) { + return "", errors.New("fake error") + }, + expected: map[string]string{}, + expectedErr: "error get s3 bucket region: fake error", + }, + { + name: "aws, ObjectStorage section exists in BSL, s3Url exist", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{ + "bucket": "fake-bucket-config", + "prefix": "fake-prefix-config", + "region": "fake-region", + "s3Url": "fake-url", + "insecureSkipTLSVerify": "false", + }, + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ + Bucket: "fake-bucket-object-store", + Prefix: "fake-prefix-object-store", + }, + }, + }, + }, + getS3BucketRegion: func(bucket string) (string, error) { + return "region from bucket: " + bucket, nil + }, + expected: map[string]string{ + "bucket": "fake-bucket-object-store", + "prefix": "fake-prefix-object-store/unified-repo/", + "region": "fake-region", + "fspath": "", + "endpoint": "fake-url", + "skipTLSVerify": "false", + }, + }, + { + name: "azure, ObjectStorage section exists in BSL", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/azure", + Config: map[string]string{ + "bucket": "fake-bucket-config", + "prefix": "fake-prefix-config", + "region": "fake-region", + "fspath": "", + "storageDomain": "fake-domain", + }, + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ + Bucket: "fake-bucket-object-store", + Prefix: "fake-prefix-object-store", + }, + }, + }, + }, + getAzureStorageDomain: func(config map[string]string) string { + return config["storageDomain"] + }, + expected: map[string]string{ + "bucket": "fake-bucket-object-store", + "prefix": "fake-prefix-object-store/unified-repo/", + "region": "fake-region", + "fspath": "", + "storageDomain": "fake-domain", + }, + }, + { + name: "azure, ObjectStorage section not exists in BSL, repo name exists", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/azure", + Config: map[string]string{ + "bucket": "fake-bucket", + "prefix": "fake-prefix", + "region": "fake-region", + "fspath": "", + "storageDomain": "fake-domain", + }, + }, + }, + repoName: "//fake-name//", + getAzureStorageDomain: func(config map[string]string) string { + return config["storageDomain"] + }, + expected: map[string]string{ + "bucket": "fake-bucket", + "prefix": "fake-prefix/unified-repo/fake-name/", + "region": "fake-region", + "fspath": "", + "storageDomain": "fake-domain", + }, + }, + { + name: "fs", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/fs", + Config: map[string]string{ + "fspath": "fake-path", + "prefix": "fake-prefix", + }, + }, + }, + expected: map[string]string{ + "fspath": "fake-path", + "bucket": "", + "prefix": "fake-prefix/unified-repo/", + "region": "", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + getS3BucketRegion = tc.getS3BucketRegion + getAzureStorageDomain = tc.getAzureStorageDomain + + actual, err := getStorageVariables(&tc.backupLocation, tc.repoName) + + require.Equal(t, tc.expected, actual) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} diff --git a/pkg/repository/udmrepo/repo-option-consts.go b/pkg/repository/udmrepo/repo-option-consts.go new file mode 100644 index 0000000000..7cf55d017c --- /dev/null +++ b/pkg/repository/udmrepo/repo-option-consts.go @@ -0,0 +1,58 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package udmrepo + +const ( + StorageTypeS3 = "s3" + StorageTypeAzure = "azure" + StorageTypeFs = "filesystem" + StorageTypeGcs = "gcs" + + GenOptionMaintainMode = "mode" + GenOptionMaintainFull = "full" + GenOptionMaintainQuick = "quick" + + StoreOptionS3KeyId = "accessKeyID" + StoreOptionS3Provider = "providerName" + StoreOptionS3SecretKey = "secretAccessKey" + StoreOptionS3Token = "sessionToken" + StoreOptionS3Endpoint = "endpoint" + StoreOptionS3DisableTls = "doNotUseTLS" + StoreOptionS3DisableTlsVerify = "skipTLSVerify" + + StoreOptionAzureKey = "storageKey" + StoreOptionAzureDomain = "storageDomain" + StoreOptionAzureStorageAccount = "storageAccount" + StoreOptionAzureToken = "sasToken" + + StoreOptionFsPath = "fspath" + + StoreOptionGcsReadonly = "readonly" + + StoreOptionOssBucket = "bucket" + StoreOptionOssRegion = "region" + + StoreOptionCredentialFile = "credFile" + StoreOptionPrefix = "prefix" + StoreOptionPrefixName = "unified-repo" + + ThrottleOptionReadOps = "readOPS" + ThrottleOptionWriteOps = "writeOPS" + ThrottleOptionListOps = "listOPS" + ThrottleOptionUploadBytes = "uploadBytes" + ThrottleOptionDownloadBytes = "downloadBytes" +) diff --git a/pkg/repository/udmrepo/repo.go b/pkg/repository/udmrepo/repo.go new file mode 100644 index 0000000000..be18a6d176 --- /dev/null +++ b/pkg/repository/udmrepo/repo.go @@ -0,0 +1,171 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package udmrepo + +import ( + "context" + "io" + "time" +) + +type ID string + +// ManifestEntryMetadata is the metadata describing one manifest data +type ManifestEntryMetadata struct { + ID ID // The ID of the manifest data + Length int32 // The data size of the manifest data + Labels map[string]string // Labels saved together with the manifest data + ModTime time.Time // Modified time of the manifest data +} + +type RepoManifest struct { + Payload interface{} // The user data of manifest + Metadata *ManifestEntryMetadata // The metadata data of manifest +} + +type ManifestFilter struct { + Labels map[string]string +} + +const ( + // Below consts descrbe the data type of one object. + // Metadata: This type describes how the data is organized. + // For a file system backup, the Metadata describes a Dir or File. + // For a block backup, the Metadata describes a Disk and its incremental link. + ObjectDataTypeUnknown int = 0 + ObjectDataTypeMetadata int = 1 + ObjectDataTypeData int = 2 + + // Below consts defines the access mode when creating an object for write + ObjectDataAccessModeUnknown int = 0 + ObjectDataAccessModeFile int = 1 + ObjectDataAccessModeBlock int = 2 + + ObjectDataBackupModeUnknown int = 0 + ObjectDataBackupModeFull int = 1 + ObjectDataBackupModeInc int = 2 +) + +// ObjectWriteOptions defines the options when creating an object for write +type ObjectWriteOptions struct { + FullPath string // Full logical path of the object + DataType int // OBJECT_DATA_TYPE_* + Description string // A description of the object, could be empty + Prefix ID // A prefix of the name used to save the object + AccessMode int // OBJECT_DATA_ACCESS_* + BackupMode int // OBJECT_DATA_BACKUP_* +} + +// OwnershipOptions is used to add some access control to the unified repository. +// For example, some privileged operations of the unified repository can be done by the +// repository owner only; the data of a backup may be manipulated by the backup owner +// who created it only. It is optional for a backup repository to support this ownership control. +type OwnershipOptions struct { + Username string + DomainName string + FullQualified string +} + +type RepoOptions struct { + // A repository specific string to identify a backup storage, i.e., "s3", "filesystem" + StorageType string + // Backup repository password, if any + RepoPassword string + // A custom path to save the repository's configuration, if any + ConfigFilePath string + // The ownership for the current repository operation + Ownership OwnershipOptions + // Other repository specific options + GeneralOptions map[string]string + // Storage specific options + StorageOptions map[string]string + + // Description of the backup repository + Description string +} + +// BackupRepoService is used to initialize, open or maintain a backup repository +type BackupRepoService interface { + // Create a backup repository or connect to an existing backup repository. + // repoOption: option to the backup repository and the underlying backup storage. + // createNew: indicates whether to create a new or connect to an existing backup repository. + Init(ctx context.Context, repoOption RepoOptions, createNew bool) error + + // Open an backup repository that has been created/connected. + // repoOption: options to open the backup repository and the underlying storage. + Open(ctx context.Context, repoOption RepoOptions) (BackupRepo, error) + + // Periodically called to maintain the backup repository to eliminate redundant data and improve performance. + // repoOption: options to maintain the backup repository. + Maintain(ctx context.Context, repoOption RepoOptions) error +} + +// BackupRepo provides the access to the backup repository +type BackupRepo interface { + // Open an existing object for read. + // id: the object's unified identifier. + OpenObject(ctx context.Context, id ID) (ObjectReader, error) + + // Get a manifest data. + GetManifest(ctx context.Context, id ID, mani *RepoManifest) error + + // Get one or more manifest data that match the given labels + FindManifests(ctx context.Context, filter ManifestFilter) ([]*ManifestEntryMetadata, error) + + // Create a new object and return the object's writer interface. + // return: A unified identifier of the object on success. + NewObjectWriter(ctx context.Context, opt ObjectWriteOptions) ObjectWriter + + // Save a manifest object + PutManifest(ctx context.Context, mani RepoManifest) (ID, error) + + // Delete a manifest object + DeleteManifest(ctx context.Context, id ID) error + + // Flush all the backup repository data + Flush(ctx context.Context) error + + // Get the local time of the backup repository. It may be different from the time of the caller + Time() time.Time + + // Close the backup repository + Close(ctx context.Context) error +} + +type ObjectReader interface { + io.ReadCloser + io.Seeker + + // Length returns the logical size of the object + Length() int64 +} + +type ObjectWriter interface { + io.WriteCloser + + // For some cases, i.e. block incremental, the object is not written sequentially + io.Seeker + + // Periodically called to preserve the state of data written to the repo so far. + // Return a unified identifier that represent the current state. + // An empty ID could be returned on success if the backup repository doesn't support this. + Checkpoint() (ID, error) + + // Wait for the completion of the object write. + // Result returns the object's unified identifier after the write completes. + Result() (ID, error) +} diff --git a/pkg/restic/aws.go b/pkg/restic/aws.go deleted file mode 100644 index d97c5f0b77..0000000000 --- a/pkg/restic/aws.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restic - -const ( - // AWS specific environment variable - awsProfileEnvVar = "AWS_PROFILE" - awsProfileKey = "profile" - awsCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" -) - -// getS3ResticEnvVars gets the environment variables that restic -// relies on (AWS_PROFILE) based on info in the provided object -// storage location config map. -func getS3ResticEnvVars(config map[string]string) (map[string]string, error) { - result := make(map[string]string) - - if credentialsFile, ok := config[credentialsFileKey]; ok { - result[awsCredentialsFileEnvVar] = credentialsFile - } - - if profile, ok := config[awsProfileKey]; ok { - result[awsProfileEnvVar] = profile - } - - return result, nil -} diff --git a/pkg/restic/common.go b/pkg/restic/common.go index 23c09e558e..580bd27080 100644 --- a/pkg/restic/common.go +++ b/pkg/restic/common.go @@ -32,6 +32,7 @@ import ( "github.com/vmware-tanzu/velero/internal/credentials" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/label" + repoconfig "github.com/vmware-tanzu/velero/pkg/repository/config" "github.com/vmware-tanzu/velero/pkg/util/filesystem" ) @@ -63,10 +64,6 @@ const ( // should be excluded from restic backup. VolumesToExcludeAnnotation = "backup.velero.io/backup-volumes-excludes" - // credentialsFileKey is the key within a BSL config that is checked to see if - // the BSL is using its own credentials, rather than those in the environment - credentialsFileKey = "credentialsFile" - // Deprecated. // // TODO(2.0): remove @@ -322,24 +319,24 @@ func CmdEnv(backupLocation *velerov1api.BackupStorageLocation, credentialFileSto if err != nil { return []string{}, errors.WithStack(err) } - config[credentialsFileKey] = credsFile + config[repoconfig.CredentialsFileKey] = credsFile } - backendType := getBackendType(backupLocation.Spec.Provider) + backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider) switch backendType { - case AWSBackend: - customEnv, err = getS3ResticEnvVars(config) + case repoconfig.AWSBackend: + customEnv, err = repoconfig.GetS3ResticEnvVars(config) if err != nil { return []string{}, err } - case AzureBackend: - customEnv, err = getAzureResticEnvVars(config) + case repoconfig.AzureBackend: + customEnv, err = repoconfig.GetAzureResticEnvVars(config) if err != nil { return []string{}, err } - case GCPBackend: - customEnv, err = getGCPResticEnvVars(config) + case repoconfig.GCPBackend: + customEnv, err = repoconfig.GetGCPResticEnvVars(config) if err != nil { return []string{}, err } diff --git a/pkg/util/ownership/backup_owner.go b/pkg/util/ownership/backup_owner.go new file mode 100644 index 0000000000..078c799dd9 --- /dev/null +++ b/pkg/util/ownership/backup_owner.go @@ -0,0 +1,42 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ownership + +import "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" + +const ( + defaultOwnerUsername = "default" + defaultOwnerDomain = "default" +) + +// GetBackupOwner returns the owner used by uploaders when saving a snapshot or +// opening the unified repository. At present, use the default owner only +func GetBackupOwner() udmrepo.OwnershipOptions { + return udmrepo.OwnershipOptions{ + Username: defaultOwnerUsername, + DomainName: defaultOwnerDomain, + } +} + +// GetBackupOwner returns the owner used to create/connect the unified repository. +//At present, use the default owner only +func GetRepositoryOwner() udmrepo.OwnershipOptions { + return udmrepo.OwnershipOptions{ + Username: defaultOwnerUsername, + DomainName: defaultOwnerDomain, + } +}