diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 78b8ec117e..8b0489fa97 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -1608,6 +1608,15 @@ func (cluster *Cluster) IsPodMonitorEnabled() bool { return false } +// IsBarmanBackupConfigured returns true if one of the possible backup destination +// is configured, false otherwise +func (backupConfiguration *BackupConfiguration) IsBarmanBackupConfigured() bool { + return backupConfiguration != nil && backupConfiguration.BarmanObjectStore != nil && + (backupConfiguration.BarmanObjectStore.AzureCredentials != nil || + backupConfiguration.BarmanObjectStore.S3Credentials != nil || + backupConfiguration.BarmanObjectStore.GoogleCredentials != nil) +} + // IsBarmanEndpointCASet returns true if we have a CA bundle for the endpoint // false otherwise func (backupConfiguration *BackupConfiguration) IsBarmanEndpointCASet() bool { @@ -1621,8 +1630,13 @@ func (backupConfiguration *BackupConfiguration) IsBarmanEndpointCASet() bool { // BuildPostgresOptions create the list of options that // should be added to the PostgreSQL configuration to // recover given a certain target -func (target RecoveryTarget) BuildPostgresOptions() string { +func (target *RecoveryTarget) BuildPostgresOptions() string { result := "" + + if target == nil { + return result + } + if target.TargetTLI != "" { result += fmt.Sprintf( "recovery_target_timeline = '%v'\n", diff --git a/controllers/cluster_create.go b/controllers/cluster_create.go index f18f924d29..3c69bc8bed 100644 --- a/controllers/cluster_create.go +++ b/controllers/cluster_create.go @@ -877,11 +877,11 @@ func (r *ClusterReconciler) createPrimaryInstance( if cluster.Status.LatestGeneratedNode != 0 { // We are we creating a new blank primary when we had previously generated - // other nodes and we don't have any PVC to reuse? + // other nodes, and we don't have any PVC to reuse? // This can happen when: // // 1 - the user deletes all the PVCs and all the Pods in a cluster - // (and why would an user do that?) + // (and why would a user do that?) // 2 - the cache isn't ready for Pods and ready for the Cluster, // so we actually haven't the first pod in our managed list // but it's still in the API Server @@ -906,7 +906,7 @@ func (r *ClusterReconciler) createPrimaryInstance( if err == specs.ErrorInvalidSize { // This error should have been caught by the validating // webhook, but since we are here the user must have disabled server-side - // validation and we must react. + // validation, and we must react. contextLogger.Info("The size specified for the cluster is not valid", "size", cluster.Spec.StorageConfiguration.Size) diff --git a/go.sum b/go.sum index e7fbfc11e8..b040880d51 100644 --- a/go.sum +++ b/go.sum @@ -102,7 +102,6 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -425,17 +424,14 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo v1.16.6-0.20211022041942-6e68f79684d4 h1:+itWF48KGausVSUDqCT3aHonVOrIPEt+rM06rChfwSo= github.com/onsi/ginkgo v1.16.6-0.20211022041942-6e68f79684d4/go.mod h1:FGGTNz05swxobKgpWKhnxbEiUUxN+CeHRdF9ViWWPDw= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -556,7 +552,6 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= @@ -592,17 +587,14 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -616,8 +608,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -653,7 +643,6 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -703,7 +692,6 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -806,11 +794,8 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 h1:OH54vjqzRWmbJ62fjuhxy7AxFFgoHN0/DPc/UrL8cAs= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1086,32 +1071,18 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= -k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8= k8s.io/api v0.23.6 h1:yOK34wbYECH4RsJbQ9sfkFK3O7f/DUHRlzFehkqZyVw= k8s.io/api v0.23.6/go.mod h1:1kFaYxGCFHYp3qd6a85DAj/yW8aVD6XLZMqJclkoi9g= -k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4= -k8s.io/apiextensions-apiserver v0.23.5/go.mod h1:ntcPWNXS8ZPKN+zTXuzYMeg731CP0heCTl6gYBxLcuQ= k8s.io/apiextensions-apiserver v0.23.6 h1:v58cQ6Z0/GK1IXYr+oW0fnYl52o9LTY0WgoWvI8uv5Q= k8s.io/apiextensions-apiserver v0.23.6/go.mod h1:YVh17Mphv183THQJA5spNFp9XfoidFyL3WoDgZxQIZU= -k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= -k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.23.6 h1:RH1UweWJkWNTlFx0D8uxOpaU1tjIOvVVWV/bu5b3/NQ= k8s.io/apimachinery v0.23.6/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= -k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw= k8s.io/apiserver v0.23.6/go.mod h1:5PU32F82tfErXPmf7FXhd/UcuLfh97tGepjKUgJ2atg= k8s.io/cli-runtime v0.23.6 h1:zvsGa4An+udUnznKSfD1Q17sETWHNOaMqYKHwHCvg+4= k8s.io/cli-runtime v0.23.6/go.mod h1:0Z3VR/HRIFKiLzKIAkm1mPtcH98GT/fXu2IU0E4vFmw= -k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= -k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4= k8s.io/client-go v0.23.6 h1:7h4SctDVQAQbkHQnR4Kzi7EyUyvla5G1pFWf4+Od7hQ= k8s.io/client-go v0.23.6/go.mod h1:Umt5icFOMLV/+qbtZ3PR0D+JA6lvvb3syzodv4irpK4= -k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= -k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/code-generator v0.23.6/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= -k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= -k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0= k8s.io/component-base v0.23.6 h1:8dhVZ4VrRcNdV2EGjl8tj8YOHwX6ysgCGMJ2Oyy0NW8= k8s.io/component-base v0.23.6/go.mod h1:FGMPeMrjYu0UZBSAFcfloVDplj9IvU+uRMTOdE23Fj0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -1125,14 +1096,12 @@ k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2R k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA= sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4= @@ -1143,7 +1112,6 @@ sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3O sigs.k8s.io/kustomize/kyaml v0.13.0 h1:9c+ETyNfSrVhxvphs+K2dzT3dh5oVPPEqPOE/cUpScY= sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/internal/cmd/manager/instance/restore/cmd.go b/internal/cmd/manager/instance/restore/cmd.go index 63f367dd48..f0c405ea25 100644 --- a/internal/cmd/manager/instance/restore/cmd.go +++ b/internal/cmd/manager/instance/restore/cmd.go @@ -32,7 +32,6 @@ func NewCmd() *cobra.Command { var clusterName string var namespace string var pgData string - var recoveryTarget string cmd := &cobra.Command{ Use: "restore [flags]", @@ -41,10 +40,9 @@ func NewCmd() *cobra.Command { ctx := context.Background() info := postgres.InitInfo{ - ClusterName: clusterName, - Namespace: namespace, - PgData: pgData, - RecoveryTarget: recoveryTarget, + ClusterName: clusterName, + Namespace: namespace, + PgData: pgData, } return restoreSubCommand(ctx, info) @@ -56,8 +54,6 @@ func NewCmd() *cobra.Command { cmd.Flags().StringVar(&namespace, "namespace", os.Getenv("NAMESPACE"), "The namespace of "+ "the cluster and the Pod in k8s") cmd.Flags().StringVar(&pgData, "pg-data", os.Getenv("PGDATA"), "The PGDATA to be created") - cmd.Flags().StringVar(&recoveryTarget, "target", "", "The recovery target in the form of "+ - "PostgreSQL options") return cmd } diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go index 60581ab394..692f9b939c 100644 --- a/internal/cmd/manager/walarchive/cmd.go +++ b/internal/cmd/manager/walarchive/cmd.go @@ -161,25 +161,8 @@ func run(ctx context.Context, podName string, args []string, client client.WithW // Step 3: gather the WAL files names to archive walFilesList := gatherWALFilesToArchive(ctx, walName, maxParallel) - checkWalOptions, err := barmanCloudCheckWalArchiveOptions(cluster, cluster.Name) - if err != nil { - log.Error(err, "while getting barman-cloud-wal-archive options") - if errCond := manager.UpdateCondition(ctx, client, - cluster, buildArchiveCondition(err)); errCond != nil { - log.Error(errCond, "Error status.UpdateCondition()") - } - return err - } - // Step 4: Check if the archive location is safe to perform archiving - // This will output no error if we're not in the timeline 1 and archiving the wal file 1 - if err := walArchiver.CheckWalArchive(ctx, walFilesList, checkWalOptions); err != nil { - log.Error(err, "while barman-cloud-check-wal-archive") - // Update the condition if needed. - if errCond := manager.UpdateCondition(ctx, client, - cluster, buildArchiveCondition(err)); errCond != nil { - log.Error(errCond, "Error status.UpdateCondition()") - } + if err := checkWalArchive(ctx, cluster, walArchiver, client, walFilesList); err != nil { return err } @@ -344,36 +327,6 @@ func barmanCloudWalArchiveOptions( return options, nil } -func barmanCloudCheckWalArchiveOptions( - cluster *apiv1.Cluster, - clusterName string, -) ([]string, error) { - configuration := cluster.Spec.Backup.BarmanObjectStore - - var options []string - if len(configuration.EndpointURL) > 0 { - options = append( - options, - "--endpoint-url", - configuration.EndpointURL) - } - - options, err := barman.AppendCloudProviderOptionsFromConfiguration(options, configuration) - if err != nil { - return nil, err - } - - serverName := clusterName - if len(configuration.ServerName) != 0 { - serverName = configuration.ServerName - } - options = append( - options, - configuration.DestinationPath, - serverName) - return options, nil -} - func buildArchiveCondition(err error) *apiv1.ClusterCondition { if err != nil { return &apiv1.ClusterCondition{ @@ -390,3 +343,37 @@ func buildArchiveCondition(err error) *apiv1.ClusterCondition { Message: "", } } + +func checkWalArchive(ctx context.Context, + cluster *apiv1.Cluster, + walArchiver *archiver.WALArchiver, + client client.WithWatch, + walFilesList []string, +) error { + checkWalOptions, err := walArchiver.BarmanCloudCheckWalArchiveOptions(cluster, cluster.Name) + if err != nil { + log.Error(err, "while getting barman-cloud-wal-archive options") + if errCond := manager.UpdateCondition(ctx, client, + cluster, buildArchiveCondition(err)); errCond != nil { + log.Error(errCond, "Error status.UpdateCondition()") + } + return err + } + + firstWalFile := walArchiver.CheckWalFiles(ctx, walFilesList) + if !firstWalFile { + return nil + } + + if err := walArchiver.CheckWalArchiveDestination(ctx, checkWalOptions); err != nil { + log.Error(err, "while barman-cloud-check-wal-archive") + // Update the condition if needed. + if errCond := manager.UpdateCondition(ctx, client, + cluster, buildArchiveCondition(err)); errCond != nil { + log.Error(errCond, "Error status.UpdateCondition()") + } + return err + } + + return nil +} diff --git a/pkg/management/barman/archiver/archiver.go b/pkg/management/barman/archiver/archiver.go index cc88576947..c4e03fa7c2 100644 --- a/pkg/management/barman/archiver/archiver.go +++ b/pkg/management/barman/archiver/archiver.go @@ -25,6 +25,8 @@ import ( "sync" "time" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/barman" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" barmanCapabilities "github.com/cloudnative-pg/cloudnative-pg/pkg/management/barman/capabilities" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/barman/spool" @@ -177,28 +179,31 @@ func (archiver *WALArchiver) Archive(walName string, baseOptions []string) error return nil } -// CheckWalArchive checks if the destinationObjectStore is ready perform archiving. -// Based on this ticket in Barman https://github.com/EnterpriseDB/barman/issues/432 -// and its implementation https://github.com/EnterpriseDB/barman/pull/443 -// The idea here is to check ONLY if we're archiving the wal files for the first time in the bucket -// since in this case the command barman-cloud-check-wal-archive will fail if the bucket exist and -// contain wal files inside -func (archiver *WALArchiver) CheckWalArchive(ctx context.Context, walFilesList, options []string) error { +// CheckWalFiles check a list of WAL files looking for the first WAL file of the first Timeline +// return true if the first file in the list it's the fir WAL file +func (archiver *WALArchiver) CheckWalFiles(ctx context.Context, walFilesList []string) bool { contextLogger := log.FromContext(ctx) - // If walFileList is empty then, this is a no-op just like the method ArchiveList if len(walFilesList) == 0 { - return nil + contextLogger.Debug("WAL file list is empty, skipping check") + return false } // Get the first wal file from the list walName := path.Base(walFilesList[0]) // We check that we have the first wal file of the first timeline, otherwise, there's nothing to do here - if walName != "000000010000000000000001" { - return nil - } + return walName == "000000010000000000000001" +} - contextLogger.Info("barman-cloud-check-wal-archive checking the first wal", "walName", walName) +// CheckWalArchiveDestination checks if the destinationObjectStore is ready perform archiving. +// Based on this ticket in Barman https://github.com/EnterpriseDB/barman/issues/432 +// and its implementation https://github.com/EnterpriseDB/barman/pull/443 +// The idea here is to check ONLY if we're archiving the wal files for the first time in the bucket +// since in this case the command barman-cloud-check-wal-archive will fail if the bucket exist and +// contain wal files inside +func (archiver *WALArchiver) CheckWalArchiveDestination(ctx context.Context, options []string) error { + contextLogger := log.FromContext(ctx) + contextLogger.Info("barman-cloud-check-wal-archive checking the first wal") // Check barman compatibility capabilities, err := barmanCapabilities.CurrentCapabilities() @@ -213,7 +218,6 @@ func (archiver *WALArchiver) CheckWalArchive(ctx context.Context, walFilesList, } contextLogger.Trace("Executing "+barmanCapabilities.BarmanCloudCheckWalArchive, - "walName", walName, "currentPrimary", archiver.cluster.Status.CurrentPrimary, "targetPrimary", archiver.cluster.Status.TargetPrimary, "options", options, @@ -225,7 +229,6 @@ func (archiver *WALArchiver) CheckWalArchive(ctx context.Context, walFilesList, err = execlog.RunStreaming(barmanCloudWalArchiveCmd, barmanCapabilities.BarmanCloudCheckWalArchive) if err != nil { contextLogger.Error(err, "Error invoking "+barmanCapabilities.BarmanCloudCheckWalArchive, - "walName", walName, "currentPrimary", archiver.cluster.Status.CurrentPrimary, "targetPrimary", archiver.cluster.Status.TargetPrimary, "options", options, @@ -238,3 +241,35 @@ func (archiver *WALArchiver) CheckWalArchive(ctx context.Context, walFilesList, return nil } + +// BarmanCloudCheckWalArchiveOptions create the options needed for the `barman-cloud-check-wal-archive` +// command. +func (archiver *WALArchiver) BarmanCloudCheckWalArchiveOptions( + cluster *apiv1.Cluster, + clusterName string, +) ([]string, error) { + configuration := cluster.Spec.Backup.BarmanObjectStore + + var options []string + if len(configuration.EndpointURL) > 0 { + options = append( + options, + "--endpoint-url", + configuration.EndpointURL) + } + + options, err := barman.AppendCloudProviderOptionsFromConfiguration(options, configuration) + if err != nil { + return nil, err + } + + serverName := clusterName + if len(configuration.ServerName) != 0 { + serverName = configuration.ServerName + } + options = append( + options, + configuration.DestinationPath, + serverName) + return options, nil +} diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go index 7079fe2fde..3deb1d7cd8 100644 --- a/pkg/management/postgres/initdb.go +++ b/pkg/management/postgres/initdb.go @@ -73,10 +73,6 @@ type InitInfo struct { // database just after having configured a new instance PostInitTemplateSQL []string - // The recovery target options, only applicable for the - // recovery bootstrap type - RecoveryTarget string - // Whether it is a temporary instance that will never contain real data. Temporary bool } diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index 9c8d86807d..858c55544c 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -30,6 +30,9 @@ import ( "strings" "time" + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/walarchive" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/barman/archiver" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" @@ -85,6 +88,13 @@ func (info InitInfo) Restore(ctx context.Context) error { return err } + // Before starting the restore we check if the archive destination it's safe to use + // otherwise, we stop creating the cluster + err = info.checkBackupDestination(ctx, typedClient, cluster) + if err != nil { + return err + } + backup, env, err := info.loadBackup(ctx, typedClient, cluster) if err != nil { return err @@ -118,7 +128,7 @@ func (info InitInfo) Restore(ctx context.Context) error { return err } - if err := info.writeRestoreWalConfig(backup); err != nil { + if err := info.writeRestoreWalConfig(backup, cluster); err != nil { return err } @@ -301,7 +311,7 @@ func (info InitInfo) loadBackupFromReference( // writeRestoreWalConfig writes a `custom.conf` allowing PostgreSQL // to complete the WAL recovery from the object storage and then start // as a new primary -func (info InitInfo) writeRestoreWalConfig(backup *apiv1.Backup) error { +func (info InitInfo) writeRestoreWalConfig(backup *apiv1.Backup, cluster *apiv1.Cluster) error { // Ensure restore_command is used to correctly recover WALs // from the object storage major, err := postgresSpec.GetMajorVersion(info.PgData) @@ -333,7 +343,7 @@ func (info InitInfo) writeRestoreWalConfig(backup *apiv1.Backup) error { "restore_command = '%s'\n"+ "%s", strings.Join(cmd, " "), - info.RecoveryTarget) + cluster.Spec.Bootstrap.Recovery.RecoveryTarget.BuildPostgresOptions()) log.Info("Generated recovery configuration", "configuration", recoveryFileContents) // Disable archiving @@ -560,6 +570,49 @@ func (info InitInfo) ConfigureInstanceAfterRestore(env []string) error { return nil } +func (info *InitInfo) checkBackupDestination( + ctx context.Context, + client client.Client, + cluster *apiv1.Cluster, +) error { + if !cluster.Spec.Backup.IsBarmanBackupConfigured() { + return nil + } + // Get environment from cache + env, err := barmanCredentials.EnvSetRestoreCloudCredentials(ctx, + client, + cluster.Namespace, + cluster.Spec.Backup.BarmanObjectStore, + os.Environ()) + if err != nil { + return fmt.Errorf("can't get credentials for cluster %v: %w", cluster.Name, err) + } + if len(env) == 0 { + return nil + } + + // Instance the WALArchiver to get the proper configuration + var walArchiver *archiver.WALArchiver + walArchiver, err = archiver.New(ctx, cluster, env, walarchive.SpoolDirectory) + if err != nil { + return fmt.Errorf("while creating the archiver: %w", err) + } + + // Get WAL archive options + checkWalOptions, err := walArchiver.BarmanCloudCheckWalArchiveOptions(cluster, cluster.Name) + if err != nil { + log.Error(err, "while getting barman-cloud-wal-archive options") + return err + } + + // Check if we're ok to archive in the desired destination + if err := walArchiver.CheckWalArchiveDestination(ctx, checkWalOptions); err != nil { + return err + } + + return nil +} + // waitUntilRecoveryFinishes periodically checks the underlying // PostgreSQL connection and returns only when the recovery // mode is finished diff --git a/pkg/specs/jobs.go b/pkg/specs/jobs.go index ced2bfc2a0..abc1e1013f 100644 --- a/pkg/specs/jobs.go +++ b/pkg/specs/jobs.go @@ -123,12 +123,6 @@ func CreatePrimaryJobViaRecovery(cluster apiv1.Cluster, nodeSerial int32, backup "restore", } - if cluster.Spec.Bootstrap.Recovery.RecoveryTarget != nil { - initCommand = append(initCommand, - "--target", - cluster.Spec.Bootstrap.Recovery.RecoveryTarget.BuildPostgresOptions()) - } - job := createPrimaryJob(cluster, nodeSerial, "full-recovery", initCommand) addBarmanEndpointCAToJob(cluster, backup, job)