diff --git a/cmd/metrics_exporter/app/metrics_exporter.go b/cmd/metrics_exporter/app/metrics_exporter.go index a24267e39..af64a69aa 100644 --- a/cmd/metrics_exporter/app/metrics_exporter.go +++ b/cmd/metrics_exporter/app/metrics_exporter.go @@ -95,7 +95,7 @@ func Run() { // Create operator instance chop := chop.GetCHOp(chopClient, chopConfigFile) chop.SetupLog() - chop.Config().WriteToLog() + log.Info(chop.Config().String(true)) exporter := metrics.StartMetricsREST( metrics.NewCHAccessInfo( diff --git a/cmd/operator/app/clickhouse_operator.go b/cmd/operator/app/clickhouse_operator.go index 26b6a0962..0a083382b 100644 --- a/cmd/operator/app/clickhouse_operator.go +++ b/cmd/operator/app/clickhouse_operator.go @@ -24,16 +24,13 @@ import ( "syscall" "time" + kubeinformers "k8s.io/client-go/informers" + + log "github.com/altinity/clickhouse-operator/pkg/announcer" "github.com/altinity/clickhouse-operator/pkg/chop" + chopinformers "github.com/altinity/clickhouse-operator/pkg/client/informers/externalversions" "github.com/altinity/clickhouse-operator/pkg/controller/chi" "github.com/altinity/clickhouse-operator/pkg/version" - - chopinformers "github.com/altinity/clickhouse-operator/pkg/client/informers/externalversions" - - log "github.com/golang/glog" - // log "k8s.io/klog" - - kubeinformers "k8s.io/client-go/informers" ) // Prometheus exporter defaults @@ -87,12 +84,15 @@ func Run() { os.Exit(0) } + log.S().P() + defer log.E().P() + if debugRequest { kubeInformerFactoryResyncPeriod = defaultInformerFactoryResyncDebugPeriod chopInformerFactoryResyncPeriod = defaultInformerFactoryResyncDebugPeriod } - log.Infof("Starting clickhouse-operator. Version:%s GitSHA:%s BuiltAt:%s\n", version.Version, version.GitSHA, version.BuiltAt) + log.A().Info("Starting clickhouse-operator. Version:%s GitSHA:%s BuiltAt:%s", version.Version, version.GitSHA, version.BuiltAt) // Initialize k8s API clients kubeClient, chopClient := chop.GetClientset(kubeConfigFile, masterURL) @@ -100,9 +100,8 @@ func Run() { // Create operator instance chop := chop.GetCHOp(chopClient, chopConfigFile) chop.SetupLog() - chop.Config().WriteToLog() - - log.V(1).Infof("Log options parsed\n") + log.V(1).A().Info("Log options parsed") + log.Info(chop.Config().String(true)) // Create Informers kubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions( @@ -145,7 +144,7 @@ func Run() { // // Start Controller // - log.V(1).Info("Starting CHI controller\n") + log.V(1).A().Info("Starting CHI controller") wg := &sync.WaitGroup{} wg.Add(1) go func() { diff --git a/config/users.d/03-database-ordinary.xml b/config/users.d/03-database-ordinary.xml new file mode 100644 index 000000000..2a76b2212 --- /dev/null +++ b/config/users.d/03-database-ordinary.xml @@ -0,0 +1,8 @@ + + + + + Ordinary + + + \ No newline at end of file diff --git a/deploy/dev/clickhouse-operator-install-dev.yaml b/deploy/dev/clickhouse-operator-install-dev.yaml index c4113af18..a5e49b26a 100644 --- a/deploy/dev/clickhouse-operator-install-dev.yaml +++ b/deploy/dev/clickhouse-operator-install-dev.yaml @@ -1473,7 +1473,7 @@ subjects: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-files # apiVersion: v1 @@ -1611,7 +1611,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-confd-files # apiVersion: v1 @@ -1626,7 +1626,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-configd-files # apiVersion: v1 @@ -1682,7 +1682,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-templatesd-files # apiVersion: v1 @@ -1781,7 +1781,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-usersd-files # apiVersion: v1 @@ -1826,12 +1826,21 @@ data: + 03-database-ordinary.xml: | + + + + + Ordinary + + + --- # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.13.0 -# altinity/metrics-exporter:0.13.0 +# altinity/clickhouse-operator:0.13.5 +# altinity/metrics-exporter:0.13.5 # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -1874,7 +1883,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.13.0 + image: altinity/clickhouse-operator:0.13.5 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -1939,7 +1948,7 @@ spec: resource: limits.memory - name: metrics-exporter - image: altinity/metrics-exporter:0.13.0 + image: altinity/metrics-exporter:0.13.5 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder diff --git a/deploy/operator/clickhouse-operator-install-deployment.yaml b/deploy/operator/clickhouse-operator-install-deployment.yaml index 7f9e9a444..95e93620e 100644 --- a/deploy/operator/clickhouse-operator-install-deployment.yaml +++ b/deploy/operator/clickhouse-operator-install-deployment.yaml @@ -1,7 +1,7 @@ # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-files # apiVersion: v1 @@ -138,7 +138,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-confd-files # apiVersion: v1 @@ -152,7 +152,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-configd-files # apiVersion: v1 @@ -207,7 +207,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-templatesd-files # apiVersion: v1 @@ -305,7 +305,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-usersd-files # apiVersion: v1 @@ -349,12 +349,21 @@ data: + 03-database-ordinary.xml: | + + + + + Ordinary + + + --- # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.13.0 -# altinity/metrics-exporter:0.13.0 +# altinity/clickhouse-operator:0.13.5 +# altinity/metrics-exporter:0.13.5 # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -396,7 +405,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.13.0 + image: altinity/clickhouse-operator:0.13.5 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -461,7 +470,7 @@ spec: resource: limits.memory - name: metrics-exporter - image: altinity/metrics-exporter:0.13.0 + image: altinity/metrics-exporter:0.13.5 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder diff --git a/deploy/operator/clickhouse-operator-install-template-deployment.yaml b/deploy/operator/clickhouse-operator-install-template-deployment.yaml index e4dbb897a..44ec9067b 100644 --- a/deploy/operator/clickhouse-operator-install-template-deployment.yaml +++ b/deploy/operator/clickhouse-operator-install-template-deployment.yaml @@ -354,6 +354,15 @@ data: + 03-database-ordinary.xml: | + + + + + Ordinary + + + --- # Possible Template Parameters: # diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml index 1c666d872..4db52974f 100644 --- a/deploy/operator/clickhouse-operator-install-template.yaml +++ b/deploy/operator/clickhouse-operator-install-template.yaml @@ -1826,6 +1826,15 @@ data: + 03-database-ordinary.xml: | + + + + + Ordinary + + + --- # Possible Template Parameters: # diff --git a/deploy/operator/clickhouse-operator-install.yaml b/deploy/operator/clickhouse-operator-install.yaml index a34a5b4c8..5b8ad9e10 100644 --- a/deploy/operator/clickhouse-operator-install.yaml +++ b/deploy/operator/clickhouse-operator-install.yaml @@ -1473,7 +1473,7 @@ subjects: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-files # apiVersion: v1 @@ -1611,7 +1611,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-confd-files # apiVersion: v1 @@ -1626,7 +1626,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-configd-files # apiVersion: v1 @@ -1682,7 +1682,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-templatesd-files # apiVersion: v1 @@ -1781,7 +1781,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-usersd-files # apiVersion: v1 @@ -1826,12 +1826,21 @@ data: + 03-database-ordinary.xml: | + + + + + Ordinary + + + --- # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.13.0 -# altinity/metrics-exporter:0.13.0 +# altinity/clickhouse-operator:0.13.5 +# altinity/metrics-exporter:0.13.5 # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -1874,7 +1883,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.13.0 + image: altinity/clickhouse-operator:0.13.5 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -1939,7 +1948,7 @@ spec: resource: limits.memory - name: metrics-exporter - image: altinity/metrics-exporter:0.13.0 + image: altinity/metrics-exporter:0.13.5 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder diff --git a/docs/replication_setup.md b/docs/replication_setup.md index 1f2c64c09..be7e84019 100644 --- a/docs/replication_setup.md +++ b/docs/replication_setup.md @@ -73,7 +73,9 @@ CREATE TABLE events_local on cluster '{cluster}' ( event_type Int32, article_id Int32, title String -) engine=ReplicatedMergeTree('/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}', '{replica}', event_date, (event_type, article_id), 8192); +) engine=ReplicatedMergeTree('/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}', '{replica}') +PARTITION BY toYYYYMM(event_date) +ORDER BY (event_type, article_id); ``` ```sql diff --git a/go.mod b/go.mod index 8654fb84d..52bcc1e75 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/mailru/go-clickhouse v1.3.0 github.com/prometheus/client_golang v1.6.0 github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a + github.com/sanity-io/litter v1.3.0 github.com/spf13/pflag v1.0.5 // indirect gopkg.in/d4l3k/messagediff.v1 v1.2.1 gopkg.in/yaml.v2 v2.2.8 diff --git a/go.sum b/go.sum index 514b85aa9..9a3aaf285 100644 --- a/go.sum +++ b/go.sum @@ -19,6 +19,7 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U= github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkEQxENCrlLo= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -112,6 +113,7 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -132,6 +134,8 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4 github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a h1:2v4Ipjxa3sh+xn6GvtgrMub2ci4ZLQMvTaYIba2lfdc= github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a/go.mod h1:ozniNEFS3j1qCwHKdvraMn1WJOsUxHd7lYfukEIS4cs= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/sanity-io/litter v1.3.0 h1:5ZO+weUsqdSWMUng5JnpkW/Oz8iTXiIdeumhQr1sSjs= +github.com/sanity-io/litter v1.3.0/go.mod h1:5Z71SvaYy5kcGtyglXOC9rrUi3c1E8CamFWjQsazTh0= github.com/satori/go.uuid v1.1.0 h1:B9KXyj+GzIpJbV7gmr873NsY6zpbxNy24CBtGrk7jHo= github.com/satori/go.uuid v1.1.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -142,6 +146,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go new file mode 100644 index 000000000..813334e23 --- /dev/null +++ b/pkg/announcer/announcer.go @@ -0,0 +1,400 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package announcer + +import ( + "reflect" + "strconv" + + "github.com/altinity/clickhouse-operator/pkg/util" + log "github.com/golang/glog" +) + +// Announcer handler all log/event/status messages going outside of controller/worker +type Announcer struct { + v log.Level + + // writeLog specifies whether to write log file + writeLog bool + + // file specifies file where logger is called from + file string + // line specifies line where logger is called from + line int + // function specifies function where logger is called from + function string + + // prefix specifies prefix used by logger + prefix string + // meta specifies meta-information of the object, if required + meta string +} + +// announcer which would be used in top-level functions, can be called as a 'default announcer' +var announcer Announcer + +// init creates default announcer +func init() { + announcer = New() +} + +// skip specifies file name which to be skipped from address +const skip = "announcer.go" + +// New creates new announcer +func New() Announcer { + return Announcer{ + writeLog: true, + } +} + +// V is inspired by log.V() +func (a Announcer) V(level log.Level) Announcer { + b := a + b.v = level + b.writeLog = true + return b +} + +// V is inspired by log.V() +func V(level log.Level) Announcer { + return announcer.V(level) +} + +// F adds function name +func (a Announcer) F() Announcer { + b := a + b.writeLog = true + _, _, b.function = util.Caller(skip) + return b +} + +// F adds function name +func F() Announcer { + return announcer.F() +} + +// L adds line number +func (a Announcer) L() Announcer { + b := a + b.writeLog = true + _, b.line, _ = util.Caller(skip) + return b +} + +// L adds line number +func L() Announcer { + return announcer.L() +} + +// FL adds filename +func (a Announcer) FL() Announcer { + b := a + b.writeLog = true + b.file, _, _ = util.Caller(skip) + return b +} + +// FL adds filename +func FL() Announcer { + return announcer.FL() +} + +// A adds full code address as 'file:line:function' +func (a Announcer) A() Announcer { + b := a + b.writeLog = true + b.file, b.line, b.function = util.Caller(skip) + return b +} + +// A adds full code address as 'file:line:function' +func A() Announcer { + return announcer.A() +} + +// S adds 'start of the function' tag, which includes: +// file, line, function and start prefix +func (a Announcer) S() Announcer { + b := a + b.writeLog = true + b.prefix = "start" + b.file, b.line, b.function = util.Caller(skip) + return b +} + +// S adds 'start of the function' tag, which includes: +// file, line, function and start prefix +func S() Announcer { + return announcer.S() +} + +// E adds 'end of the function' tag, which includes: +// file, line, function and start prefix +func (a Announcer) E() Announcer { + b := a + b.writeLog = true + b.prefix = "end" + b.file, b.line, b.function = util.Caller(skip) + return b +} + +// E adds 'end of the function' tag, which includes: +// file, line, function and start prefix +func E() Announcer { + return announcer.E() +} + +// M adds object meta as 'namespace/name' +func (a Announcer) M(m ...interface{}) Announcer { + if len(m) == 0 { + return a + } + + b := a + b.writeLog = true + switch len(m) { + case 1: + switch typed := m[0].(type) { + case string: + b.meta = typed + default: + if meta, ok := a.findMeta(m[0]); ok { + b.meta = meta + } else { + return a + } + } + case 2: + namespace, _ := m[0].(string) + name, _ := m[1].(string) + b.meta = namespace + "/" + name + } + return b +} + +// M adds object meta as 'namespace/name' +func M(m ...interface{}) Announcer { + return announcer.M(m...) +} + +// P triggers log to print line +func (a Announcer) P() { + a.Info("") +} + +// P triggers log to print line +func P() { + announcer.P() +} + +// Info is inspired by log.Infof() +func (a Announcer) Info(format string, args ...interface{}) { + // Produce classic log line + if !a.writeLog { + return + } + + format = a.prependFormat(format) + if a.v > 0 { + if len(args) > 0 { + log.V(a.v).Infof(format, args...) + } else { + log.V(a.v).Info(format) + } + } else { + if len(args) > 0 { + log.Infof(format, args...) + } else { + log.Info(format) + } + } +} + +// Info is inspired by log.Infof() +func Info(format string, args ...interface{}) { + announcer.Info(format, args...) +} + +// Warning is inspired by log.Warningf() +func (a Announcer) Warning(format string, args ...interface{}) { + // Produce classic log line + if !a.writeLog { + return + } + + format = a.prependFormat(format) + if len(args) > 0 { + log.Warningf(format, args...) + } else { + log.Warning(format) + } +} + +// Warning is inspired by log.Warningf() +func Warning(format string, args ...interface{}) { + announcer.Warning(format, args...) +} + +// Error is inspired by log.Errorf() +func (a Announcer) Error(format string, args ...interface{}) { + // Produce classic log line + if !a.writeLog { + return + } + + format = a.prependFormat(format) + if len(args) > 0 { + log.Errorf(format, args...) + } else { + log.Error(format) + } +} + +// Error is inspired by log.Errorf() +func Error(format string, args ...interface{}) { + announcer.Error(format, args...) +} + +// Fatal is inspired by log.Fatalf() +func (a Announcer) Fatal(format string, args ...interface{}) { + format = a.prependFormat(format) + // Write and exit + if len(args) > 0 { + log.Fatalf(format, args...) + } else { + log.Fatal(format) + } +} + +// Fatal is inspired by log.Fatalf() +func Fatal(format string, args ...interface{}) { + announcer.Fatal(format, args...) +} + +func (a Announcer) prependFormat(format string) string { + // Result format is expected to be 'file:line:function:prefix:meta:_start_format_' + // Prepend each component in reverse order + if a.meta != "" { + if format == "" { + format = a.meta + } else { + format = a.meta + ":" + format + } + } + if a.prefix != "" { + if format == "" { + format = a.prefix + } else { + format = a.prefix + ":" + format + } + } + if a.function != "" { + if format == "" { + format = a.function + "()" + } else { + format = a.function + "()" + ":" + format + } + } + if a.line != 0 { + if format == "" { + format = strconv.Itoa(a.line) + } else { + format = strconv.Itoa(a.line) + ":" + format + } + } + if a.file != "" { + if format == "" { + format = a.file + } else { + format = a.file + ":" + format + } + } + return format +} + +func (a Announcer) findMeta(m interface{}) (string, bool) { + if meta, ok := a.findInObjectMeta(m); ok { + return meta, ok + } + if meta, ok := a.findInCHI(m); ok { + return meta, ok + } + if meta, ok := a.findInAddress(m); ok { + return meta, ok + } + return "", false +} + +func (a Announcer) findInObjectMeta(m interface{}) (string, bool) { + if m == nil { + return "", false + } + meta := reflect.ValueOf(m) + if !meta.IsValid() || meta.IsNil() || meta.IsZero() { + return "", false + } + namespace := meta.Elem().FieldByName("Namespace") + if !namespace.IsValid() { + return "", false + } + name := meta.Elem().FieldByName("Name") + if !name.IsValid() { + return "", false + } + return namespace.String() + "/" + name.String(), true +} + +func (a Announcer) findInCHI(m interface{}) (string, bool) { + if m == nil { + return "", false + } + object := reflect.ValueOf(m) + if !object.IsValid() || object.IsNil() || object.IsZero() { + return "", false + } + chi := object.Elem().FieldByName("CHI") + if !chi.IsValid() || chi.IsNil() || chi.IsZero() { + return "", false + } + namespace := chi.Elem().FieldByName("Namespace") + if !namespace.IsValid() { + return "", false + } + name := chi.Elem().FieldByName("Name") + if !name.IsValid() { + return "", false + } + return namespace.String() + "/" + name.String(), true +} + +func (a Announcer) findInAddress(m interface{}) (string, bool) { + if m == nil { + return "", false + } + address := reflect.ValueOf(m) + if !address.IsValid() || address.IsNil() || address.IsZero() { + return "", false + } + namespace := address.Elem().FieldByName("Namespace") + if !namespace.IsValid() { + return "", false + } + name := address.Elem().FieldByName("CHIName") + if !name.IsValid() { + return "", false + } + return namespace.String() + "/" + name.String(), true +} diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go index f4c6fa76e..70d3b233d 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go @@ -15,10 +15,11 @@ package v1 import ( - "github.com/altinity/clickhouse-operator/pkg/util" - "github.com/altinity/clickhouse-operator/pkg/version" "math" "strings" + + "github.com/altinity/clickhouse-operator/pkg/util" + "github.com/altinity/clickhouse-operator/pkg/version" ) // fillStatus fills .Status diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go index 051ca4383..907152501 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go @@ -40,6 +40,18 @@ type ChiHostAddress struct { ClusterScopeCycleOffset int `json:"clusterScopeCycleOffset"` } -func (a ChiHostAddress) ShortString() string { +func (a ChiHostAddress) CompactString() string { return fmt.Sprintf("ns:%s|chi:%s|clu:%s|sha:%s|rep:%s|host:%s", a.Namespace, a.CHIName, a.ClusterName, a.ShardName, a.ReplicaName, a.HostName) } + +func (a ChiHostAddress) ClusterNameString() string { + return fmt.Sprintf("%s/%s", a.ClusterName, a.HostName) +} + +func (a ChiHostAddress) NamespaceNameString() string { + return fmt.Sprintf("%s/%s", a.Namespace, a.HostName) +} + +func (a ChiHostAddress) NamespaceCHINameString() string { + return fmt.Sprintf("%s/%s", a.Namespace, a.CHIName) +} diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go b/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go index c8b23f472..4e8f0fad3 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go @@ -16,11 +16,11 @@ package v1 import ( "bytes" + "fmt" "os" "strings" "time" - log "github.com/golang/glog" // log "k8s.io/klog" "github.com/imdario/mergo" @@ -151,21 +151,23 @@ type OperatorConfig struct { } // MergeFrom merges -func (config *OperatorConfig) MergeFrom(from *OperatorConfig, _type MergeType) { +func (config *OperatorConfig) MergeFrom(from *OperatorConfig, _type MergeType) error { switch _type { case MergeTypeFillEmptyValues: if err := mergo.Merge(config, *from); err != nil { - log.V(1).Infof("FAIL merge config Error: %q", err) + return fmt.Errorf("FAIL merge config Error: %q", err) } case MergeTypeOverrideByNonEmptyValues: if err := mergo.Merge(config, *from, mergo.WithOverride); err != nil { - log.V(1).Infof("FAIL merge config Error: %q", err) + return fmt.Errorf("FAIL merge config Error: %q", err) } } + + return nil } // readCHITemplates build OperatorConfig.CHITemplate from template files content -func (config *OperatorConfig) readCHITemplates() { +func (config *OperatorConfig) readCHITemplates() (errs []error) { // Read CHI template files config.CHITemplateFiles = util.ReadFilesIntoMap(config.CHITemplatesPath, config.isCHITemplateExt) @@ -174,11 +176,13 @@ func (config *OperatorConfig) readCHITemplates() { template := new(ClickHouseInstallation) if err := yaml.Unmarshal([]byte(config.CHITemplateFiles[filename]), template); err != nil { // Unable to unmarshal - skip incorrect template - log.V(1).Infof("FAIL readCHITemplates() unable to unmarshal file %s Error: %q", filename, err) + errs = append(errs, fmt.Errorf("FAIL readCHITemplates() unable to unmarshal file %s Error: %q", filename, err)) continue } config.enlistCHITemplate(template) } + + return } // enlistCHITemplate inserts template into templates catalog @@ -187,7 +191,6 @@ func (config *OperatorConfig) enlistCHITemplate(template *ClickHouseInstallation config.CHITemplates = make([]*ClickHouseInstallation, 0) } config.CHITemplates = append(config.CHITemplates, template) - log.V(1).Infof("enlistCHITemplate(%s/%s)", template.Namespace, template.Name) } // unlistCHITemplate removes template from templates catalog @@ -196,11 +199,9 @@ func (config *OperatorConfig) unlistCHITemplate(template *ClickHouseInstallation return } - log.V(1).Infof("unlistCHITemplate(%s/%s)", template.Namespace, template.Name) // Nullify found template entry for _, _template := range config.CHITemplates { if (_template.Name == template.Name) && (_template.Namespace == template.Namespace) { - log.V(1).Infof("unlistCHITemplate(%s/%s) - found, unlisting", template.Namespace, template.Name) // TODO normalize //config.CHITemplates[i] = nil _template.Name = "" @@ -226,7 +227,6 @@ func (config *OperatorConfig) FindTemplate(use *ChiUseTemplate, namespace string if use.Namespace != "" { // With fully-specified use template direct (full name) only match is applicable, and it is not possible // This is strange situation, however - log.V(1).Infof("STRANGE FindTemplate(%s/%s) - unexpected position", use.Namespace, use.Name) return nil } @@ -249,7 +249,6 @@ func (config *OperatorConfig) FindAutoTemplates() []*ClickHouseInstallation { res = append(res, _template) } } - log.V(3).Infof("Found %d auto templates", len(res)) return res } @@ -452,10 +451,6 @@ func (config *OperatorConfig) applyDefaultWatchNamespace() { // readClickHouseCustomConfigFiles reads all extra user-specified ClickHouse config files func (config *OperatorConfig) readClickHouseCustomConfigFiles() { - log.V(0).Infof("Read Common Config files from folder: %s", config.CHCommonConfigsPath) - log.V(0).Infof("Read Host Config files from folder: %s", config.CHHostConfigsPath) - log.V(0).Infof("Read Users Config files from folder: %s", config.CHUsersConfigsPath) - config.CHCommonConfigs = util.ReadFilesIntoMap(config.CHCommonConfigsPath, config.isCHConfigExt) config.CHHostConfigs = util.ReadFilesIntoMap(config.CHHostConfigsPath, config.isCHConfigExt) config.CHUsersConfigs = util.ReadFilesIntoMap(config.CHUsersConfigsPath, config.isCHConfigExt) @@ -542,11 +537,6 @@ func (config *OperatorConfig) String(hideCredentials bool) string { return b.String() } -// WriteToLog writes OperatorConfig into log -func (config *OperatorConfig) WriteToLog() { - log.V(1).Infof("OperatorConfig:\n%s", config.String(true)) -} - // TODO unify with GetInformerNamespace // IsWatchedNamespace returns whether specified namespace is in a list of watched func (config *OperatorConfig) IsWatchedNamespace(namespace string) bool { diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_settings.go b/pkg/apis/clickhouse.altinity.com/v1/type_settings.go index 5950a2653..571b6c632 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_settings.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_settings.go @@ -23,9 +23,6 @@ import ( "sort" "strconv" "strings" - - log "github.com/golang/glog" - // log "k8s.io/klog" ) const ( @@ -177,7 +174,6 @@ func unmarshalScalar(untyped interface{}) (string, bool) { typeOf := reflect.TypeOf(untyped) if typeOf == nil { // Unable to determine type of the value - log.V(3).Infof("unmarshalScalar() typeOf==nil") return "", false } @@ -220,12 +216,9 @@ func unmarshalScalar(untyped interface{}) (string, bool) { knownType = true } - str := typeOf.String() if knownType { - log.V(3).Infof("unmarshalScalar() type=%v value=%s", str, res) return res, true } else { - log.V(3).Infof("unmarshalScalar() type=%v - UNABLE to unmarshal", str) return "", false } } @@ -238,7 +231,6 @@ func unmarshalVector(untyped interface{}) ([]string, bool) { typeOf := reflect.TypeOf(untyped) if typeOf == nil { // Unable to determine type of the value - log.V(3).Infof("unmarshalVector() typeOf==nil") return nil, false } @@ -253,12 +245,9 @@ func unmarshalVector(untyped interface{}) ([]string, bool) { knownType = true } - str := typeOf.String() if knownType { - log.V(3).Infof("unmarshalVector() type=%v value=%s", str, res) return res, true } else { - log.V(3).Infof("unmarshalVector type=%v - UNABLE to unmarshal", str) return nil, false } } @@ -535,8 +524,6 @@ func string2Section(section string) (SettingsSection, error) { return SectionHost, nil } - log.V(1).Infof("unknown section specified %v", section) - return SectionEmpty, fmt.Errorf("unknown section specified %v", section) } diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_templates.go b/pkg/apis/clickhouse.altinity.com/v1/type_templates.go index f20d51be2..daa877018 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_templates.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_templates.go @@ -15,13 +15,11 @@ package v1 import ( - log "github.com/golang/glog" - // log "k8s.io/klog" - + "fmt" "github.com/imdario/mergo" ) -func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) { +func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) (errs []error) { if from == nil { return } @@ -46,7 +44,7 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) { // Override `to` template with `from` template //templates.PodTemplates[toIndex] = *fromTemplate.DeepCopy() if err := mergo.Merge(toTemplate, *fromTemplate, mergo.WithOverride); err != nil { - log.V(1).Infof("ERROR merge template(%s): %v", toTemplate.Name, err) + errs = append(errs, fmt.Errorf("ERROR merge template(%s): %v", toTemplate.Name, err)) } break } @@ -80,7 +78,7 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) { // Override `to` template with `from` template //templates.PodTemplates[toIndex] = *fromTemplate.DeepCopy() if err := mergo.Merge(toTemplate, *fromTemplate, mergo.WithOverride); err != nil { - log.V(1).Infof("ERROR merge template(%s): %v", toTemplate.Name, err) + errs = append(errs, fmt.Errorf("ERROR merge template(%s): %v", toTemplate.Name, err)) } break } @@ -114,7 +112,7 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) { // Override `to` template with `from` template //templates.VolumeClaimTemplates[toIndex] = *fromTemplate.DeepCopy() if err := mergo.Merge(toTemplate, *fromTemplate, mergo.WithOverride); err != nil { - log.V(1).Infof("ERROR merge template(%s): %v", toTemplate.Name, err) + errs = append(errs, fmt.Errorf("ERROR merge template(%s): %v", toTemplate.Name, err)) } break } @@ -148,7 +146,7 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) { // Override `to` template with `from` template //templates.ServiceTemplates[toIndex] = *fromTemplate.DeepCopy() if err := mergo.Merge(toTemplate, *fromTemplate, mergo.WithOverride); err != nil { - log.V(1).Infof("ERROR merge template(%s): %v", toTemplate.Name, err) + errs = append(errs, fmt.Errorf("ERROR merge template(%s): %v", toTemplate.Name, err)) } break } @@ -161,4 +159,6 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) { } } } + + return } diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go index bdb76de1a..46f7b3a90 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse.altinity.com/v1/types.go @@ -178,15 +178,22 @@ type ChiHostConfig struct { FilesFingerprint string `json:"filesfingerprint"` } +type StatefulSetStatus string + +const ( + StatefulSetStatusModified StatefulSetStatus = "modified" + StatefulSetStatusNew StatefulSetStatus = "new" + StatefulSetStatusSame StatefulSetStatus = "same" + StatefulSetStatusUnknown StatefulSetStatus = "unknown" +) + // ChiHostReconcileAttributes defines host reconcile status type ChiHostReconcileAttributes struct { + status StatefulSetStatus add bool remove bool modify bool unclear bool - - migrate bool - reconciled bool } func NewChiHostReconcileAttributes() *ChiHostReconcileAttributes { @@ -207,6 +214,15 @@ func (s *ChiHostReconcileAttributes) Any(to ChiHostReconcileAttributes) bool { return (s.add && to.add) || (s.remove && to.remove) || (s.modify && to.modify) || (s.unclear && to.unclear) } +func (s *ChiHostReconcileAttributes) SetStatus(status StatefulSetStatus) *ChiHostReconcileAttributes { + s.status = status + return s +} + +func (s *ChiHostReconcileAttributes) GetStatus() StatefulSetStatus { + return s.status +} + func (s *ChiHostReconcileAttributes) SetAdd() *ChiHostReconcileAttributes { s.add = true return s @@ -232,16 +248,6 @@ func (s *ChiHostReconcileAttributes) SetUnclear() *ChiHostReconcileAttributes { return s } -func (s *ChiHostReconcileAttributes) SetMigrate() *ChiHostReconcileAttributes { - s.migrate = true - return s -} - -func (s *ChiHostReconcileAttributes) SetReconciled() *ChiHostReconcileAttributes { - s.reconciled = true - return s -} - func (s *ChiHostReconcileAttributes) IsAdd() bool { return s.add } @@ -258,14 +264,6 @@ func (s *ChiHostReconcileAttributes) IsUnclear() bool { return s.unclear } -func (s *ChiHostReconcileAttributes) IsMigrate() bool { - return s.migrate -} - -func (s *ChiHostReconcileAttributes) IsReconciled() bool { - return s.reconciled -} - // CHITemplates defines templates section of .spec type ChiTemplates struct { // Templates diff --git a/pkg/apis/metrics/clickhouse_fetcher.go b/pkg/apis/metrics/clickhouse_fetcher.go index 9114ee9b0..bc3ffa3bf 100644 --- a/pkg/apis/metrics/clickhouse_fetcher.go +++ b/pkg/apis/metrics/clickhouse_fetcher.go @@ -16,10 +16,11 @@ package metrics import ( sqlmodule "database/sql" + "time" "github.com/MakeNowJust/heredoc" + "github.com/altinity/clickhouse-operator/pkg/model/clickhouse" - "time" ) const ( diff --git a/pkg/apis/metrics/exporter.go b/pkg/apis/metrics/exporter.go index 38053935f..4a644406f 100644 --- a/pkg/apis/metrics/exporter.go +++ b/pkg/apis/metrics/exporter.go @@ -17,8 +17,6 @@ package metrics import ( "encoding/json" "fmt" - "github.com/altinity/clickhouse-operator/pkg/chop" - "k8s.io/apimachinery/pkg/apis/meta/v1" "net/http" "sync" @@ -26,7 +24,9 @@ import ( // log "k8s.io/klog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/altinity/clickhouse-operator/pkg/chop" chopclientset "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" ) diff --git a/pkg/chop/chop.go b/pkg/chop/chop.go index 0c6e73594..ccf8197d7 100644 --- a/pkg/chop/chop.go +++ b/pkg/chop/chop.go @@ -16,11 +16,10 @@ package chop import ( "flag" + + log "github.com/altinity/clickhouse-operator/pkg/announcer" "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopclientset "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" - - log "github.com/golang/glog" - // log "k8s.io/klog" ) type CHOp struct { @@ -50,31 +49,31 @@ func (c *CHOp) Config() *v1.OperatorConfig { func (c *CHOp) SetupLog() { updated := false if c.Config().Logtostderr != "" { - log.V(1).Infof("Log option cur value %s=%s\n", "logtostderr", flag.Lookup("logtostderr").Value) - log.V(1).Infof("Log option new value %s=%s\n", "logtostderr", c.Config().Logtostderr) + c.logUpdate("logtostderr", c.Config().Logtostderr) updated = true _ = flag.Set("logtostderr", c.Config().Logtostderr) } if c.Config().Alsologtostderr != "" { - log.V(1).Infof("Log option cur value %s=%s\n", "alsologtostderr", flag.Lookup("alsologtostderr").Value) - log.V(1).Infof("Log option new value %s=%s\n", "alsologtostderr", c.Config().Alsologtostderr) + c.logUpdate("alsologtostderr", c.Config().Alsologtostderr) updated = true _ = flag.Set("alsologtostderr", c.Config().Alsologtostderr) } if c.Config().Stderrthreshold != "" { - log.V(1).Infof("Log option cur value %s=%s\n", "stderrthreshold", flag.Lookup("stderrthreshold").Value) - log.V(1).Infof("Log option new value %s=%s\n", "stderrthreshold", c.Config().Stderrthreshold) + c.logUpdate("stderrthreshold", c.Config().Stderrthreshold) updated = true _ = flag.Set("stderrthreshold", c.Config().Stderrthreshold) } if c.Config().V != "" { - log.V(1).Infof("Log option cur value %s=%s\n", "v", flag.Lookup("v").Value) - log.V(1).Infof("Log option new value %s=%s\n", "v", c.Config().V) + c.logUpdate("v", c.Config().V) updated = true _ = flag.Set("v", c.Config().V) } if updated { - log.V(1).Infof("Additional log options applied\n") + log.V(1).Info("Additional log options applied") } } + +func (c *CHOp) logUpdate(name, value string) { + log.V(1).Info("Log option '%s' change value from '%s' to '%s'", name, flag.Lookup(name).Value, value) +} diff --git a/pkg/chop/config_manager.go b/pkg/chop/config_manager.go index ec79accee..1f6e7ebbf 100644 --- a/pkg/chop/config_manager.go +++ b/pkg/chop/config_manager.go @@ -15,19 +15,18 @@ package chop import ( - log "github.com/golang/glog" - // log "k8s.io/klog" - - "github.com/kubernetes-sigs/yaml" "io/ioutil" "os" "os/user" "path/filepath" "sort" + "github.com/kubernetes-sigs/yaml" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + log "github.com/altinity/clickhouse-operator/pkg/announcer" chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopclientset "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type ConfigManager struct { @@ -77,7 +76,7 @@ func (cm *ConfigManager) Init() error { return err } log.V(1).Info("File-based ClickHouseOperatorConfigurations") - cm.fileConfig.WriteToLog() + log.V(1).Info(cm.fileConfig.String(true)) // Get configs from all config Custom Resources watchedNamespace := cm.fileConfig.GetInformerNamespace() @@ -89,14 +88,14 @@ func (cm *ConfigManager) Init() error { // From now on we have one unified CHOP config log.V(1).Info("Unified (but not post-processed yet) CHOP config") - cm.config.WriteToLog() + log.V(1).Info(cm.config.String(true)) // Finalize config by post-processing cm.config.Postprocess() // OperatorConfig is ready log.V(1).Info("Final CHOP config") - cm.config.WriteToLog() + log.V(1).Info(cm.config.String(true)) return nil } @@ -116,7 +115,7 @@ func (cm *ConfigManager) getCRBasedConfigs(namespace string) { // Get list of ClickHouseOperatorConfiguration objects var err error if cm.chopConfigList, err = cm.chopClient.ClickhouseV1().ClickHouseOperatorConfigurations(namespace).List(metav1.ListOptions{}); err != nil { - log.V(1).Infof("Error read ClickHouseOperatorConfigurations %v", err) + log.V(1).A().Error("Error read ClickHouseOperatorConfigurations %v", err) return } @@ -152,8 +151,8 @@ func (cm *ConfigManager) getCRBasedConfigs(namespace string) { // logCRBasedConfigs writes all ClickHouseOperatorConfiguration objects into log func (cm *ConfigManager) logCRBasedConfigs() { for _, chOperatorConfiguration := range cm.crConfigs { - log.V(1).Infof("chop config %s/%s :", chOperatorConfiguration.ConfigFolderPath, chOperatorConfiguration.ConfigFilePath) - chOperatorConfiguration.WriteToLog() + log.V(1).Info("chop config %s/%s :", chOperatorConfiguration.ConfigFolderPath, chOperatorConfiguration.ConfigFilePath) + log.V(1).Info(chOperatorConfiguration.String(true)) } } @@ -305,9 +304,9 @@ func (cm *ConfigManager) logEnvVarParams() { sort.Strings(keys) // Walk over sorted names aka keys - log.V(1).Infof("Parameters num: %d\n", len(cm.runtimeParams)) + log.V(1).Info("Parameters num: %d", len(cm.runtimeParams)) for _, k := range keys { - log.V(1).Infof("%s=%s\n", k, cm.runtimeParams[k]) + log.V(1).Info("%s=%s", k, cm.runtimeParams[k]) } } diff --git a/pkg/chop/kube_machinery.go b/pkg/chop/kube_machinery.go index b6b4106bd..3ad3f0fca 100644 --- a/pkg/chop/kube_machinery.go +++ b/pkg/chop/kube_machinery.go @@ -20,9 +20,7 @@ import ( "os/user" "path/filepath" - log "github.com/golang/glog" - // log "k8s.io/klog" - + log "github.com/altinity/clickhouse-operator/pkg/announcer" chopclientset "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" "github.com/altinity/clickhouse-operator/pkg/version" @@ -67,18 +65,18 @@ func getKubeConfig(kubeConfigFile, masterURL string) (*kuberest.Config, error) { func GetClientset(kubeConfigFile, masterURL string) (*kube.Clientset, *chopclientset.Clientset) { kubeConfig, err := getKubeConfig(kubeConfigFile, masterURL) if err != nil { - log.Fatalf("Unable to build kubeconf: %s", err.Error()) + log.A().Fatal("Unable to build kubeconf: %s", err.Error()) os.Exit(1) } kubeClientset, err := kube.NewForConfig(kubeConfig) if err != nil { - log.Fatalf("Unable to initialize kubernetes API clientset: %s", err.Error()) + log.A().Fatal("Unable to initialize kubernetes API clientset: %s", err.Error()) } chopClientset, err := chopclientset.NewForConfig(kubeConfig) if err != nil { - log.Fatalf("Unable to initialize clickhouse-operator API clientset: %s", err.Error()) + log.A().Fatal("Unable to initialize clickhouse-operator API clientset: %s", err.Error()) } return kubeClientset, chopClientset @@ -89,7 +87,7 @@ func GetCHOp(chopClient *chopclientset.Clientset, initCHOpConfigFilePath string) // Create operator instance chop := NewCHOp(version.Version, chopClient, initCHOpConfigFilePath) if err := chop.Init(); err != nil { - log.Fatalf("Unable to init CHOP instance %v\n", err) + log.A().Fatal("Unable to init CHOP instance %v", err) os.Exit(1) } diff --git a/pkg/controller/chi/announcer.go b/pkg/controller/chi/announcer.go index 1c66750f4..10f964447 100644 --- a/pkg/controller/chi/announcer.go +++ b/pkg/controller/chi/announcer.go @@ -17,132 +17,282 @@ package chi import ( "fmt" - log "github.com/golang/glog" - // log "k8s.io/klog" - + a "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + log "github.com/golang/glog" ) // Announcer handler all log/event/status messages going outside of controller/worker type Announcer struct { - c *Controller - chi *chop.ClickHouseInstallation - v log.Level - writeLog bool - writeEvent bool - eventAction string - eventReason string - writeStatusAction bool + a.Announcer + + ctrl *Controller + chi *chop.ClickHouseInstallation + + // writeEvent specifies whether to produce k8s event into chi, therefore requires chi to be specified + // See k8s event for details. + // https://kubernetes.io/docs/reference/kubernetes-api/cluster-resources/event-v1/ + writeEvent bool + // eventAction specifies k8s event action + eventAction string + // event reason specifies k8s event reason + eventReason string + + // writeStatusAction specifies whether to produce action into `ClickHouseInstallation.Status.Action` of chi, + // therefore requires chi to be specified + writeStatusAction bool + // writeStatusAction specifies whether to produce action into `ClickHouseInstallation.Status.Actions` of chi, + // therefore requires chi to be specified writeStatusActions bool - writeStatusError bool + // writeStatusAction specifies whether to produce action into `ClickHouseInstallation.Status.Error` of chi, + // therefore requires chi to be specified + writeStatusError bool } -// NewAnnouncer creates new announcer -func NewAnnouncer(c *Controller) Announcer { +// New creates new announcer +func NewAnnouncer() Announcer { return Announcer{ - c: c, - writeLog: true, + Announcer: a.New(), } } // V is inspired by log.V() func (a Announcer) V(level log.Level) Announcer { b := a - b.v = level - b.writeLog = true + b.Announcer = b.Announcer.V(level) return b } -// WithEvent is used in chained calls in order to produce event -func (a Announcer) WithEvent( - chi *chop.ClickHouseInstallation, - action string, - reason string, -) Announcer { +// F adds function name +func (a Announcer) F() Announcer { b := a - b.writeEvent = true - b.chi = chi - b.eventAction = action - b.eventReason = reason + b.Announcer = b.Announcer.F() return b } -// WithStatusAction is used in chained calls in order to produce action in ClickHouseInstallation.Status.Action -func (a Announcer) WithStatusAction(chi *chop.ClickHouseInstallation) Announcer { +// L adds line number +func (a Announcer) L() Announcer { b := a - b.writeStatusAction = true - b.writeStatusActions = true - b.chi = chi + b.Announcer = b.Announcer.L() return b } -// WithStatusActions is used in chained calls in order to produce action in ClickHouseInstallation.Status.Actions -func (a Announcer) WithStatusActions(chi *chop.ClickHouseInstallation) Announcer { +// FL adds filename +func (a Announcer) FL() Announcer { b := a - b.writeStatusActions = true - b.chi = chi + b.Announcer = b.Announcer.FL() return b } -// WithStatusAction is used in chained calls in order to produce error in ClickHouseInstallation.Status.Error -func (a Announcer) WithStatusError(chi *chop.ClickHouseInstallation) Announcer { +// A adds full code address as 'file:line:function' +func (a Announcer) A() Announcer { + b := a + b.Announcer = b.Announcer.A() + return b +} + +// S adds 'start of the function' tag +func (a Announcer) S() Announcer { + b := a + b.Announcer = b.Announcer.S() + return b +} + +// E adds 'end of the function' tag +func (a Announcer) E() Announcer { b := a - b.writeStatusError = true - b.chi = chi + b.Announcer = b.Announcer.E() return b } +// M adds object meta as 'namespace/name' +func (a Announcer) M(m ...interface{}) Announcer { + b := a + b.Announcer = b.Announcer.M(m...) + return b +} + +// P triggers log to print line +func (a Announcer) P() { + a.Info("") +} + // Info is inspired by log.Infof() func (a Announcer) Info(format string, args ...interface{}) { - if a.writeLog { - if a.v > 0 { - log.V(a.v).Infof(format, args...) + // Produce classic log line + a.Announcer.Info(format, args...) + + // Produce k8s event + if a.writeEvent && a.chiCapable() { + if len(args) > 0 { + a.ctrl.EventInfo(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) } else { - log.Infof(format, args...) + a.ctrl.EventInfo(a.chi, a.eventAction, a.eventReason, fmt.Sprint(format)) } } - if a.writeEvent { - a.c.eventInfo(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) - } + + // Produce chi status record a.writeCHIStatus(format, args...) } // Warning is inspired by log.Warningf() func (a Announcer) Warning(format string, args ...interface{}) { - if a.writeLog { - log.Warningf(format, args...) - } - if a.writeEvent { - a.c.eventWarning(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) + // Produce classic log line + a.Announcer.Warning(format, args...) + + // Produce k8s event + if a.writeEvent && a.chiCapable() { + if len(args) > 0 { + a.ctrl.EventWarning(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) + } else { + a.ctrl.EventWarning(a.chi, a.eventAction, a.eventReason, fmt.Sprint(format)) + } } + + // Produce chi status record a.writeCHIStatus(format, args...) } // Error is inspired by log.Errorf() func (a Announcer) Error(format string, args ...interface{}) { - if a.writeLog { - log.Errorf(format, args...) + // Produce classic log line + a.Announcer.Error(format, args...) + + // Produce k8s event + if a.writeEvent && a.chiCapable() { + if len(args) > 0 { + a.ctrl.EventError(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) + } else { + a.ctrl.EventError(a.chi, a.eventAction, a.eventReason, fmt.Sprint(format)) + } } - if a.writeEvent { - a.c.eventError(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) + + // Produce chi status record + a.writeCHIStatus(format, args...) +} + +// Fatal is inspired by log.Fatalf() +func (a Announcer) Fatal(format string, args ...interface{}) { + // Produce k8s event + if a.writeEvent && a.chiCapable() { + if len(args) > 0 { + a.ctrl.EventError(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) + } else { + a.ctrl.EventError(a.chi, a.eventAction, a.eventReason, fmt.Sprint(format)) + } } + + // Produce chi status record a.writeCHIStatus(format, args...) + + // Write and exit + a.Announcer.Fatal(format, args...) +} + +// WithController specifies controller to be used in case `chi`-related announces need to be done +func (a Announcer) WithController(ctrl *Controller) Announcer { + b := a + b.ctrl = ctrl + return b +} + +// WithEvent is used in chained calls in order to produce event into `chi` +func (a Announcer) WithEvent( + chi *chop.ClickHouseInstallation, + action string, + reason string, +) Announcer { + b := a + if chi == nil { + b.writeEvent = false + b.chi = nil + b.eventAction = "" + b.eventReason = "" + } else { + b.writeEvent = true + b.chi = chi + b.eventAction = action + b.eventReason = reason + } + return b +} + +// WithStatusAction is used in chained calls in order to produce action into `ClickHouseInstallation.Status.Action` +func (a Announcer) WithStatusAction(chi *chop.ClickHouseInstallation) Announcer { + b := a + if chi == nil { + b.chi = nil + b.writeStatusAction = false + b.writeStatusActions = false + } else { + b.chi = chi + b.writeStatusAction = true + b.writeStatusActions = true + } + return b +} + +// WithStatusActions is used in chained calls in order to produce action in ClickHouseInstallation.Status.Actions +func (a Announcer) WithStatusActions(chi *chop.ClickHouseInstallation) Announcer { + b := a + if chi == nil { + b.chi = nil + b.writeStatusActions = false + } else { + b.chi = chi + b.writeStatusActions = true + } + return b +} + +// WithStatusAction is used in chained calls in order to produce error in ClickHouseInstallation.Status.Error +func (a Announcer) WithStatusError(chi *chop.ClickHouseInstallation) Announcer { + b := a + if chi == nil { + b.chi = nil + b.writeStatusError = false + } else { + b.chi = chi + b.writeStatusError = true + } + return b +} + +// chiCapable checks whether announcer is capable to produce chi-based announcements +func (a Announcer) chiCapable() bool { + return (a.ctrl != nil) && (a.chi != nil) } // writeCHIStatus is internal function which writes ClickHouseInstallation.Status func (a Announcer) writeCHIStatus(format string, args ...interface{}) { + if !a.chiCapable() { + return + } + if a.writeStatusAction { - a.chi.Status.Action = fmt.Sprintf(format, args...) + if len(args) > 0 { + a.chi.Status.Action = fmt.Sprintf(format, args...) + } else { + a.chi.Status.Action = fmt.Sprint(format) + } } if a.writeStatusActions { - (&a.chi.Status).PushAction(fmt.Sprintf(format, args...)) + if len(args) > 0 { + (&a.chi.Status).PushAction(fmt.Sprintf(format, args...)) + } else { + (&a.chi.Status).PushAction(fmt.Sprint(format)) + } } if a.writeStatusError { - (&a.chi.Status).SetAndPushError(fmt.Sprintf(format, args...)) + if len(args) > 0 { + (&a.chi.Status).SetAndPushError(fmt.Sprintf(format, args...)) + } else { + (&a.chi.Status).SetAndPushError(fmt.Sprint(format)) + } } // Propagate status updates into object if a.writeStatusAction || a.writeStatusActions || a.writeStatusError { - _ = a.c.updateCHIObjectStatus(a.chi, true) + _ = a.ctrl.updateCHIObjectStatus(a.chi, true) } } diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index 6deb32c78..ab486b777 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -27,7 +27,7 @@ import ( chopmodels "github.com/altinity/clickhouse-operator/pkg/model" "github.com/altinity/clickhouse-operator/pkg/util" - log "github.com/golang/glog" + log "github.com/altinity/clickhouse-operator/pkg/announcer" "gopkg.in/d4l3k/messagediff.v1" apps "k8s.io/api/apps/v1" core "k8s.io/api/core/v1" @@ -58,7 +58,7 @@ func NewController( // Setup events eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(log.Infof) + eventBroadcaster.StartLogging(log.Info) eventBroadcaster.StartRecordingToSink( &typedcore.EventSinkImpl{ Interface: kubeClient.CoreV1().Events(""), @@ -122,7 +122,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chi.Namespace) { return } - log.V(2).Infof("chiInformer.AddFunc - %s/%s added", chi.Namespace, chi.Name) + log.V(2).M(chi).Info("chiInformer.AddFunc") c.enqueueObject(chi.Namespace, chi.Name, NewReconcileChi(reconcileAdd, nil, chi)) }, UpdateFunc: func(old, new interface{}) { @@ -131,7 +131,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(newChi.Namespace) { return } - log.V(2).Info("chiInformer.UpdateFunc") + log.V(2).M(newChi).Info("chiInformer.UpdateFunc") c.enqueueObject(newChi.Namespace, newChi.Name, NewReconcileChi(reconcileUpdate, oldChi, newChi)) }, DeleteFunc: func(obj interface{}) { @@ -139,7 +139,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chi.Namespace) { return } - log.V(2).Infof("chiInformer.DeleteFunc - CHI %s/%s deleted", chi.Namespace, chi.Name) + log.V(2).M(chi).Info("chiInformer.DeleteFunc") c.enqueueObject(chi.Namespace, chi.Name, NewReconcileChi(reconcileDelete, chi, nil)) }, }) @@ -150,7 +150,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chit.Namespace) { return } - log.V(2).Infof("chitInformer.AddFunc - %s/%s added", chit.Namespace, chit.Name) + log.V(2).M(chit).Info("chitInformer.AddFunc") c.enqueueObject(chit.Namespace, chit.Name, NewReconcileChit(reconcileAdd, nil, chit)) }, UpdateFunc: func(old, new interface{}) { @@ -159,7 +159,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(newChit.Namespace) { return } - log.V(2).Infof("chitInformer.UpdateFunc - %s/%s", newChit.Namespace, newChit.Name) + log.V(2).M(newChit).Info("chitInformer.UpdateFunc") c.enqueueObject(newChit.Namespace, newChit.Name, NewReconcileChit(reconcileUpdate, oldChit, newChit)) }, DeleteFunc: func(obj interface{}) { @@ -167,7 +167,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chit.Namespace) { return } - log.V(2).Infof("chitInformer.DeleteFunc - %s/%s deleted", chit.Namespace, chit.Name) + log.V(2).M(chit).Info("chitInformer.DeleteFunc") c.enqueueObject(chit.Namespace, chit.Name, NewReconcileChit(reconcileDelete, chit, nil)) }, }) @@ -178,7 +178,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chopConfig.Namespace) { return } - log.V(2).Infof("chopInformer.AddFunc - %s/%s added", chopConfig.Namespace, chopConfig.Name) + log.V(2).M(chopConfig).Info("chopInformer.AddFunc") c.enqueueObject(chopConfig.Namespace, chopConfig.Name, NewReconcileChopConfig(reconcileAdd, nil, chopConfig)) }, UpdateFunc: func(old, new interface{}) { @@ -187,7 +187,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(newChopConfig.Namespace) { return } - log.V(2).Infof("chopInformer.UpdateFunc - %s/%s", newChopConfig.Namespace, newChopConfig.Name) + log.V(2).M(newChopConfig).Info("chopInformer.UpdateFunc") c.enqueueObject(newChopConfig.Namespace, newChopConfig.Name, NewReconcileChopConfig(reconcileUpdate, oldChopConfig, newChopConfig)) }, DeleteFunc: func(obj interface{}) { @@ -195,7 +195,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chopConfig.Namespace) { return } - log.V(2).Infof("chopInformer.DeleteFunc - %s/%s deleted", chopConfig.Namespace, chopConfig.Name) + log.V(2).M(chopConfig).Info("chopInformer.DeleteFunc") c.enqueueObject(chopConfig.Namespace, chopConfig.Name, NewReconcileChopConfig(reconcileDelete, chopConfig, nil)) }, }) @@ -206,20 +206,21 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&service.ObjectMeta) { return } - log.V(2).Infof("serviceInformer AddFunc %s/%s", service.Namespace, service.Name) + log.V(3).M(service).Info("serviceInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { oldService := old.(*core.Service) if !c.isTrackedObject(&oldService.ObjectMeta) { return } + log.V(3).M(oldService).Info("serviceInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { service := obj.(*core.Service) if !c.isTrackedObject(&service.ObjectMeta) { return } - log.V(2).Infof("serviceInformer DeleteFunc %s/%s", service.Namespace, service.Name) + log.V(3).M(service).Info("serviceInformer.DeleteFunc") }, }) @@ -229,7 +230,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&endpoints.ObjectMeta) { return } - log.V(2).Infof("endpointsInformer AddFunc %s/%s", endpoints.Namespace, endpoints.Name) + log.V(3).M(endpoints).Info("endpointsInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { oldEndpoints := old.(*core.Endpoints) @@ -240,14 +241,14 @@ func (c *Controller) addEventHandlers( diff, equal := messagediff.DeepDiff(oldEndpoints, newEndpoints) if equal { - log.V(2).Infof("onUpdateEndpoints(%s/%s): no changes found", oldEndpoints.Namespace, oldEndpoints.Name) + log.V(2).M(oldEndpoints).Info("endpointsInformer.UpdateFunc: no changes found") // No need to react return } added := false for path := range diff.Added { - log.V(2).Infof("onUpdateEndpoints(%s/%s): added %v", oldEndpoints.Namespace, oldEndpoints.Name, path) + log.V(2).M(oldEndpoints).Info("endpointsInformer.UpdateFunc: added %v", path) for _, pathnode := range *path { s := pathnode.String() if s == ".Addresses" { @@ -256,14 +257,14 @@ func (c *Controller) addEventHandlers( } } for path := range diff.Removed { - log.V(2).Infof("onUpdateEndpoints(%s/%s): removed %v", oldEndpoints.Namespace, oldEndpoints.Name, path) + log.V(2).M(oldEndpoints).Info("endpointsInformer.UpdateFunc: removed %v", path) } for path := range diff.Modified { - log.V(2).Infof("onUpdateEndpoints(%s/%s): modified %v", oldEndpoints.Namespace, oldEndpoints.Name, path) + log.V(2).M(oldEndpoints).Info("endpointsInformer.UpdateFunc: modified %v", path) } if added { - log.V(1).Infof("endpointsInformer UpdateFunc(%s/%s) IP ASSIGNED %v", newEndpoints.Namespace, newEndpoints.Name, newEndpoints.Subsets) + log.V(1).M(oldEndpoints).Info("endpointsInformer.UpdateFunc: IP ASSIGNED %v", newEndpoints.Subsets) c.enqueueObject(newEndpoints.Namespace, newEndpoints.Name, NewDropDns(&newEndpoints.ObjectMeta)) } }, @@ -272,7 +273,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&endpoints.ObjectMeta) { return } - log.V(2).Infof("endpointsInformer DeleteFunc %s/%s", endpoints.Namespace, endpoints.Name) + log.V(2).M(endpoints).Info("endpointsInformer.DeleteFunc") }, }) @@ -282,21 +283,21 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&configMap.ObjectMeta) { return } - log.V(2).Infof("configMapInformer AddFunc %s/%s", configMap.Namespace, configMap.Name) + log.V(3).M(configMap).Info("configMapInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { configMap := old.(*core.ConfigMap) if !c.isTrackedObject(&configMap.ObjectMeta) { return } - log.V(2).Infof("configMapInformer UpdateFunc %s/%s", configMap.Namespace, configMap.Name) + log.V(3).M(configMap).Info("configMapInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { configMap := obj.(*core.ConfigMap) if !c.isTrackedObject(&configMap.ObjectMeta) { return } - log.V(2).Infof("configMapInformer DeleteFunc %s/%s", configMap.Namespace, configMap.Name) + log.V(3).M(configMap).Info("configMapInformer.DeleteFunc") }, }) @@ -306,7 +307,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - log.V(2).Infof("statefulSetInformer AddFunc %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(3).M(statefulSet).Info("statefulSetInformer.AddFunc") //controller.handleObject(obj) }, UpdateFunc: func(old, new interface{}) { @@ -314,14 +315,14 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - log.V(2).Infof("statefulSetInformer UpdateFunc %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(3).M(statefulSet).Info("statefulSetInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { statefulSet := obj.(*apps.StatefulSet) if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - log.V(2).Infof("statefulSetInformer DeleteFunc %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(3).M(statefulSet).Info("statefulSetInformer.DeleteFunc") //controller.handleObject(obj) }, }) @@ -332,21 +333,21 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&pod.ObjectMeta) { return } - log.V(2).Infof("podInformer AddFunc %s/%s", pod.Namespace, pod.Name) + log.V(3).M(pod).Info("podInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { pod := old.(*core.Pod) if !c.isTrackedObject(&pod.ObjectMeta) { return } - log.V(2).Infof("podInformer UpdateFunc %s/%s", pod.Namespace, pod.Name) + log.V(3).M(pod).Info("podInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { pod := obj.(*core.Pod) if !c.isTrackedObject(&pod.ObjectMeta) { return } - log.V(2).Infof("podInformer DeleteFunc %s/%s", pod.Namespace, pod.Name) + log.V(3).M(pod).Info("podInformer.DeleteFunc") }, }) } @@ -385,15 +386,15 @@ func (c *Controller) Run(ctx context.Context) { // Start threads // workersNum := len(c.queues) - log.V(1).Infof("ClickHouseInstallation controller: starting workers number: %d", workersNum) + log.V(1).A().Info("ClickHouseInstallation controller: starting workers number: %d", workersNum) for i := 0; i < workersNum; i++ { - log.V(1).Infof("ClickHouseInstallation controller: starting worker %d out of %d", i+1, workersNum) + log.V(1).A().Info("ClickHouseInstallation controller: starting worker %d out of %d", i+1, workersNum) worker := c.newWorker(c.queues[i]) go wait.Until(worker.run, runWorkerPeriod, ctx.Done()) } - defer log.V(1).Info("ClickHouseInstallation controller: shutting down workers") + defer log.V(1).A().Info("ClickHouseInstallation controller: shutting down workers") - log.V(1).Info("ClickHouseInstallation controller: workers started") + log.V(1).A().Info("ClickHouseInstallation controller: workers started") <-ctx.Done() } @@ -425,9 +426,9 @@ func (c *Controller) updateWatch(namespace, name string, hostnames []string) { // updateWatchAsync func (c *Controller) updateWatchAsync(namespace, name string, hostnames []string) { if err := metrics.InformMetricsExporterAboutWatchedCHI(namespace, name, hostnames); err != nil { - log.V(1).Infof("FAIL update watch (%s/%s): %q", namespace, name, err) + log.V(1).A().Info("FAIL update watch (%s/%s): %q", namespace, name, err) } else { - log.V(2).Infof("OK update watch (%s/%s)", namespace, name) + log.V(2).Info("OK update watch (%s/%s)", namespace, name) } } @@ -439,15 +440,15 @@ func (c *Controller) deleteWatch(namespace, name string) { // deleteWatchAsync func (c *Controller) deleteWatchAsync(namespace, name string) { if err := metrics.InformMetricsExporterToDeleteWatchedCHI(namespace, name); err != nil { - log.V(1).Infof("FAIL delete watch (%s/%s): %q", namespace, name, err) + log.V(1).A().Info("FAIL delete watch (%s/%s): %q", namespace, name, err) } else { - log.V(2).Infof("OK delete watch (%s/%s)", namespace, name) + log.V(2).Info("OK delete watch (%s/%s)", namespace, name) } } // addChit sync new CHIT - creates all its resources func (c *Controller) addChit(chit *chi.ClickHouseInstallationTemplate) error { - log.V(1).Infof("addChit(%s/%s)", chit.Namespace, chit.Name) + log.V(1).M(chit).F().P() c.chop.Config().AddCHITemplate((*chi.ClickHouseInstallation)(chit)) return nil } @@ -455,19 +456,19 @@ func (c *Controller) addChit(chit *chi.ClickHouseInstallationTemplate) error { // updateChit sync CHIT which was already created earlier func (c *Controller) updateChit(old, new *chi.ClickHouseInstallationTemplate) error { if old.ObjectMeta.ResourceVersion == new.ObjectMeta.ResourceVersion { - log.V(2).Infof("updateChit(%s/%s): ResourceVersion did not change: %s", old.Namespace, old.Name, old.ObjectMeta.ResourceVersion) + log.V(2).M(old).F().Info("ResourceVersion did not change: %s", old.ObjectMeta.ResourceVersion) // No need to react return nil } - log.V(2).Infof("updateChit(%s/%s):", new.Namespace, new.Name) + log.V(2).M(new).F().Info("ResourceVersion change: %s to %s", old.ObjectMeta.ResourceVersion, new.ObjectMeta.ResourceVersion) c.chop.Config().UpdateCHITemplate((*chi.ClickHouseInstallation)(new)) return nil } // deleteChit deletes CHIT func (c *Controller) deleteChit(chit *chi.ClickHouseInstallationTemplate) error { - log.V(2).Infof("deleteChit(%s/%s):", chit.Namespace, chit.Name) + log.V(2).M(chit).F().P() c.chop.Config().DeleteCHITemplate((*chi.ClickHouseInstallation)(chit)) return nil } @@ -475,9 +476,9 @@ func (c *Controller) deleteChit(chit *chi.ClickHouseInstallationTemplate) error // addChopConfig func (c *Controller) addChopConfig(chopConfig *chi.ClickHouseOperatorConfiguration) error { if c.chop.ConfigManager.IsConfigListed(chopConfig) { - log.V(1).Infof("addChopConfig(%s/%s) already known config - do nothing", chopConfig.Namespace, chopConfig.Name) + log.V(1).M(chopConfig).F().Info("already known config - do nothing") } else { - log.V(1).Infof("addChopConfig(%s/%s) new, previously unknown config, need to apply", chopConfig.Namespace, chopConfig.Name) + log.V(1).M(chopConfig).F().Info("new, previously unknown config, need to apply") // TODO // NEED REFACTORING // os.Exit(0) @@ -489,12 +490,12 @@ func (c *Controller) addChopConfig(chopConfig *chi.ClickHouseOperatorConfigurati // updateChopConfig func (c *Controller) updateChopConfig(old, new *chi.ClickHouseOperatorConfiguration) error { if old.ObjectMeta.ResourceVersion == new.ObjectMeta.ResourceVersion { - log.V(2).Infof("updateChopConfig(%s/%s): ResourceVersion did not change: %s", old.Namespace, old.Name, old.ObjectMeta.ResourceVersion) + log.V(2).M(old).F().Info("ResourceVersion did not change: %s", old.ObjectMeta.ResourceVersion) // No need to react return nil } - log.V(2).Infof("updateChopConfig(%s/%s):", new.Namespace, new.Name) + log.V(2).M(new).F().Info("ResourceVersion change: %s to %s", old.ObjectMeta.ResourceVersion, new.ObjectMeta.ResourceVersion) // TODO // NEED REFACTORING //os.Exit(0) @@ -504,7 +505,7 @@ func (c *Controller) updateChopConfig(old, new *chi.ClickHouseOperatorConfigurat // deleteChit deletes CHIT func (c *Controller) deleteChopConfig(chopConfig *chi.ClickHouseOperatorConfiguration) error { - log.V(2).Infof("deleteChopConfig(%s/%s):", chopConfig.Namespace, chopConfig.Name) + log.V(2).M(chopConfig).F().P() // TODO // NEED REFACTORING //os.Exit(0) @@ -514,20 +515,16 @@ func (c *Controller) deleteChopConfig(chopConfig *chi.ClickHouseOperatorConfigur // updateCHIObject updates ClickHouseInstallation object func (c *Controller) updateCHIObject(chi *chi.ClickHouseInstallation) error { - namespace, name := util.NamespaceName(chi.ObjectMeta) - new, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Update(chi) - + new, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.ObjectMeta.Namespace).Update(chi) if err != nil { // Error update - log.V(1).Infof("ERROR update CHI (%s/%s): %q", namespace, name, err) + log.V(1).M(chi).A().Error("%q", err) return err } if chi.ObjectMeta.ResourceVersion != new.ObjectMeta.ResourceVersion { // Updated - log.V(2).Infof("updateCHIObject(%s/%s): ResourceVersion bump %s=>%s", - namespace, name, chi.ObjectMeta.ResourceVersion, new.ObjectMeta.ResourceVersion, - ) + log.V(2).M(chi).F().Info("ResourceVersion change: %s to %s", chi.ObjectMeta.ResourceVersion, new.ObjectMeta.ResourceVersion) chi.ObjectMeta.ResourceVersion = new.ObjectMeta.ResourceVersion return nil } @@ -540,21 +537,21 @@ func (c *Controller) updateCHIObject(chi *chi.ClickHouseInstallation) error { // updateCHIObjectStatus updates ClickHouseInstallation object's Status func (c *Controller) updateCHIObjectStatus(chi *chi.ClickHouseInstallation, tolerateAbsence bool) error { namespace, name := util.NamespaceName(chi.ObjectMeta) - log.V(2).Infof("Update CHI status (%s/%s)", namespace, name) + log.V(2).M(chi).F().Info("Update CHI status") cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(name, newGetOptions()) if err != nil { if tolerateAbsence { return nil } - log.V(1).Infof("ERROR GetCHI (%s/%s): %q", namespace, name, err) + log.V(1).M(chi).A().Error("%q", err) return err } if cur == nil { if tolerateAbsence { return nil } - log.V(1).Infof("ERROR GetCHI (%s/%s): NULL returned", namespace, name) + log.V(1).M(chi).A().Error("NULL returned") return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", namespace, name) } @@ -565,15 +562,15 @@ func (c *Controller) updateCHIObjectStatus(chi *chi.ClickHouseInstallation, tole // installFinalizer func (c *Controller) installFinalizer(chi *chi.ClickHouseInstallation) error { - namespace, name := util.NamespaceName(chi.ObjectMeta) - log.V(2).Infof("Update CHI status (%s/%s)", namespace, name) + log.V(2).M(chi).S().P() + defer log.V(2).M(chi).E().P() - cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(name, newGetOptions()) + cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.Namespace).Get(chi.Name, newGetOptions()) if err != nil { return err } if cur == nil { - return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", namespace, name) + return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", chi.Namespace, chi.Name) } if util.InArray(FinalizerName, cur.ObjectMeta.Finalizers) { @@ -587,15 +584,15 @@ func (c *Controller) installFinalizer(chi *chi.ClickHouseInstallation) error { // uninstallFinalizer func (c *Controller) uninstallFinalizer(chi *chi.ClickHouseInstallation) error { - namespace, name := util.NamespaceName(chi.ObjectMeta) - log.V(2).Infof("Update CHI status (%s/%s)", namespace, name) + log.V(2).M(chi).S().P() + defer log.V(2).M(chi).E().P() - cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(name, newGetOptions()) + cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.Namespace).Get(chi.Name, newGetOptions()) if err != nil { return err } if cur == nil { - return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", namespace, name) + return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", chi.Namespace, chi.Name) } cur.ObjectMeta.Finalizers = util.RemoveFromArray(FinalizerName, cur.ObjectMeta.Finalizers) @@ -634,13 +631,13 @@ func (c *Controller) handleObject(obj interface{}) { return } - log.V(1).Infof("Processing object: %s", object.GetName()) + log.V(1).Info("Processing object: %s", object.GetName()) // Get owner - it is expected to be CHI chi, err := c.chiLister.ClickHouseInstallations(object.GetNamespace()).Get(ownerRef.Name) if err != nil { - log.V(1).Infof("ignoring orphaned object '%s' of ClickHouseInstallation '%s'", object.GetSelfLink(), ownerRef.Name) + log.V(1).Info("ignoring orphaned object '%s' of ClickHouseInstallation '%s'", object.GetSelfLink(), ownerRef.Name) return } @@ -650,11 +647,11 @@ func (c *Controller) handleObject(obj interface{}) { // waitForCacheSync is a logger-wrapper over cache.WaitForCacheSync() and it waits for caches to populate func waitForCacheSync(name string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) bool { - log.V(1).Infof("Syncing caches for %s controller", name) + log.V(1).F().Info("Syncing caches for %s controller", name) if !cache.WaitForCacheSync(stopCh, cacheSyncs...) { utilruntime.HandleError(fmt.Errorf(messageUnableToSync, name)) return false } - log.V(1).Infof("Caches are synced for %s controller", name) + log.V(1).F().Info("Caches are synced for %s controller", name) return true } diff --git a/pkg/controller/chi/creator.go b/pkg/controller/chi/creator.go index fd2feb1af..d91140832 100644 --- a/pkg/controller/chi/creator.go +++ b/pkg/controller/chi/creator.go @@ -18,44 +18,44 @@ package chi import ( "errors" "fmt" - log "github.com/golang/glog" - "k8s.io/api/core/v1" + "github.com/altinity/clickhouse-operator/pkg/util" - // log "k8s.io/klog" + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" - apps "k8s.io/api/apps/v1" ) // createStatefulSet is an internal function, used in reconcileStatefulSet only func (c *Controller) createStatefulSet(statefulSet *apps.StatefulSet, host *chop.ChiHost) error { - log.V(1).Infof("Create StatefulSet %s/%s", statefulSet.Namespace, statefulSet.Name) - if statefulSet, err := c.kubeClient.AppsV1().StatefulSets(statefulSet.Namespace).Create(statefulSet); err != nil { - // Error call Create() + log.V(1).M(host).F().P() + + if _, err := c.kubeClient.AppsV1().StatefulSets(statefulSet.Namespace).Create(statefulSet); err != nil { + // Unable to create StatefulSet at all return err - } else if err := c.waitHostReady(host); err == nil { + } + + // StatefulSet created, wait until it is ready + + if err := c.waitHostReady(host); err == nil { // Target generation reached, StatefulSet created successfully return nil - } else { - // Unable to run StatefulSet, StatefulSet create failed, time to rollback? - return c.onStatefulSetCreateFailed(statefulSet, host) } - return fmt.Errorf("unexpected flow") + // Unable to run StatefulSet, StatefulSet create failed, time to rollback? + return c.onStatefulSetCreateFailed(statefulSet, host) } // updateStatefulSet is an internal function, used in reconcileStatefulSet only func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStatefulSet *apps.StatefulSet, host *chop.ChiHost) error { - // Convenience shortcuts - namespace := newStatefulSet.Namespace - name := newStatefulSet.Name - log.V(2).Infof("updateStatefulSet(%s/%s)", namespace, name) + log.V(2).M(host).F().P() // Apply newStatefulSet and wait for Generation to change - updatedStatefulSet, err := c.kubeClient.AppsV1().StatefulSets(namespace).Update(newStatefulSet) + updatedStatefulSet, err := c.kubeClient.AppsV1().StatefulSets(newStatefulSet.Namespace).Update(newStatefulSet) if err != nil { // Update failed - log.V(1).Infof("updateStatefulSet(%s/%s) - git err: %v", namespace, name, err) + log.V(1).M(host).A().Error("%v", err) return err } @@ -65,18 +65,18 @@ func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStat if updatedStatefulSet.Generation == oldStatefulSet.Generation { // Generation is not updated - no changes in .spec section were made - log.V(2).Infof("updateStatefulSet(%s/%s) - no generation change", namespace, name) + log.V(2).M(host).F().Info("no generation change") return nil } - log.V(1).Infof("updateStatefulSet(%s/%s) - generation change %d=>%d", namespace, name, oldStatefulSet.Generation, updatedStatefulSet.Generation) + log.V(1).M(host).F().Info("generation change %d=>%d", oldStatefulSet.Generation, updatedStatefulSet.Generation) if err := c.waitHostReady(host); err == nil { // Target generation reached, StatefulSet updated successfully return nil } else { // Unable to run StatefulSet, StatefulSet update failed, time to rollback? - return c.onStatefulSetUpdateFailed(oldStatefulSet) + return c.onStatefulSetUpdateFailed(oldStatefulSet, host) } return fmt.Errorf("unexpected flow") @@ -84,16 +84,13 @@ func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStat // updateStatefulSet is an internal function, used in reconcileStatefulSet only func (c *Controller) updatePersistentVolume(pv *v1.PersistentVolume) error { - // Convenience shortcuts - namespace := pv.Namespace - name := pv.Name - log.V(2).Infof("updatePersistentVolume(%s/%s)", namespace, name) + log.V(2).M(pv).F().P() // Apply newStatefulSet and wait for Generation to change _, err := c.kubeClient.CoreV1().PersistentVolumes().Update(pv) if err != nil { // Update failed - log.V(1).Infof("updatePersistentVolume(%s/%s) - git err: %v", namespace, name, err) + log.V(1).M(pv).A().Error("%v", err) return err } @@ -103,30 +100,26 @@ func (c *Controller) updatePersistentVolume(pv *v1.PersistentVolume) error { // onStatefulSetCreateFailed handles situation when StatefulSet create failed // It can just delete failed StatefulSet or do nothing func (c *Controller) onStatefulSetCreateFailed(failedStatefulSet *apps.StatefulSet, host *chop.ChiHost) error { - // Convenience shortcuts - namespace := failedStatefulSet.Namespace - name := failedStatefulSet.Name - // What to do with StatefulSet - look into chop configuration settings switch c.chop.Config().OnStatefulSetCreateFailureAction { case chop.OnStatefulSetCreateFailureActionAbort: // Report appropriate error, it will break reconcile loop - log.V(1).Infof("onStatefulSetCreateFailed(%s/%s) - abort", namespace, name) - return errors.New(fmt.Sprintf("Create failed on %s/%s", namespace, name)) + log.V(1).M(host).F().Info("abort") + return errors.New(fmt.Sprintf("Create failed on %s", util.NamespaceNameString(failedStatefulSet.ObjectMeta))) case chop.OnStatefulSetCreateFailureActionDelete: // Delete gracefully failed StatefulSet - log.V(1).Infof("onStatefulSetCreateFailed(%s/%s) - going to DELETE FAILED StatefulSet", namespace, name) + log.V(1).M(host).F().Info("going to DELETE FAILED StatefulSet %s", util.NamespaceNameString(failedStatefulSet.ObjectMeta)) _ = c.deleteHost(host) return c.shouldContinueOnCreateFailed() case chop.OnStatefulSetCreateFailureActionIgnore: // Ignore error, continue reconcile loop - log.V(1).Infof("onStatefulSetCreateFailed(%s/%s) - going to ignore error", namespace, name) + log.V(1).M(host).F().Info("going to ignore error %s", util.NamespaceNameString(failedStatefulSet.ObjectMeta)) return nil default: - log.V(1).Infof("Unknown c.chop.Config().OnStatefulSetCreateFailureAction=%s", c.chop.Config().OnStatefulSetCreateFailureAction) + log.V(1).M(host).A().Error("Unknown c.chop.Config().OnStatefulSetCreateFailureAction=%s", c.chop.Config().OnStatefulSetCreateFailureAction) return nil } @@ -135,7 +128,7 @@ func (c *Controller) onStatefulSetCreateFailed(failedStatefulSet *apps.StatefulS // onStatefulSetUpdateFailed handles situation when StatefulSet update failed // It can try to revert StatefulSet to its previous version, specified in rollbackStatefulSet -func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.StatefulSet) error { +func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.StatefulSet, host *chop.ChiHost) error { // Convenience shortcuts namespace := rollbackStatefulSet.Namespace name := rollbackStatefulSet.Name @@ -144,12 +137,12 @@ func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.Statefu switch c.chop.Config().OnStatefulSetUpdateFailureAction { case chop.OnStatefulSetUpdateFailureActionAbort: // Report appropriate error, it will break reconcile loop - log.V(1).Infof("onStatefulSetUpdateFailed(%s/%s) - abort", namespace, name) + log.V(1).M(host).F().Info("abort StatefulSet %s", util.NamespaceNameString(rollbackStatefulSet.ObjectMeta)) return errors.New(fmt.Sprintf("Update failed on %s/%s", namespace, name)) case chop.OnStatefulSetUpdateFailureActionRollback: // Need to revert current StatefulSet to oldStatefulSet - log.V(1).Infof("onStatefulSetUpdateFailed(%s/%s) - going to ROLLBACK FAILED StatefulSet", namespace, name) + log.V(1).M(host).F().Info("going to ROLLBACK FAILED StatefulSet %s", util.NamespaceNameString(rollbackStatefulSet.ObjectMeta)) if statefulSet, err := c.statefulSetLister.StatefulSets(namespace).Get(name); err != nil { // Unable to get StatefulSet return err @@ -161,18 +154,18 @@ func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.Statefu // This will rollback Pod to previous .spec statefulSet.Spec = *rollbackStatefulSet.Spec.DeepCopy() statefulSet, err = c.kubeClient.AppsV1().StatefulSets(namespace).Update(statefulSet) - _ = c.statefulSetDeletePod(statefulSet) + _ = c.statefulSetDeletePod(statefulSet, host) return c.shouldContinueOnUpdateFailed() } case chop.OnStatefulSetUpdateFailureActionIgnore: // Ignore error, continue reconcile loop - log.V(1).Infof("onStatefulSetUpdateFailed(%s/%s) - going to ignore error", namespace, name) + log.V(1).M(host).F().Info("going to ignore error %s", util.NamespaceNameString(rollbackStatefulSet.ObjectMeta)) return nil default: - log.V(1).Infof("Unknown c.chop.Config().OnStatefulSetUpdateFailureAction=%s", c.chop.Config().OnStatefulSetUpdateFailureAction) + log.V(1).M(host).A().Error("Unknown c.chop.Config().OnStatefulSetUpdateFailureAction=%s", c.chop.Config().OnStatefulSetUpdateFailureAction) return nil } diff --git a/pkg/controller/chi/deleter.go b/pkg/controller/chi/deleter.go index 1f35f9c99..ef490f32c 100644 --- a/pkg/controller/chi/deleter.go +++ b/pkg/controller/chi/deleter.go @@ -15,14 +15,11 @@ package chi import ( - "k8s.io/api/core/v1" - "time" - - log "github.com/golang/glog" - // log "k8s.io/klog" apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopmodel "github.com/altinity/clickhouse-operator/pkg/model" ) @@ -37,14 +34,14 @@ func (c *Controller) deleteHost(host *chop.ChiHost) error { // 5. Service // Need to delete all these item - log.V(1).Infof("Controller delete host started %s/%s", host.Address.ClusterName, host.Name) + log.V(1).M(host).S().Info(host.Address.ClusterNameString()) _ = c.deleteStatefulSet(host) _ = c.deletePVC(host) _ = c.deleteConfigMap(host) _ = c.deleteServiceHost(host) - log.V(1).Infof("Controller delete host completed %s/%s", host.Address.ClusterName, host.Name) + log.V(1).M(host).E().Info(host.Address.ClusterNameString()) return nil } @@ -65,39 +62,39 @@ func (c *Controller) deleteConfigMapsCHI(chi *chop.ClickHouseInstallation) error // Delete ConfigMap err = c.kubeClient.CoreV1().ConfigMaps(chi.Namespace).Delete(configMapCommon, newDeleteOptions()) if err == nil { - log.V(1).Infof("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommon) + log.V(1).M(chi).Info("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommon) } else if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommon) + log.V(1).M(chi).Info("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommon) err = nil } else { - log.V(1).Infof("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommon, err) + log.V(1).M(chi).A().Error("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommon, err) } err = c.kubeClient.CoreV1().ConfigMaps(chi.Namespace).Delete(configMapCommonUsersName, newDeleteOptions()) if err == nil { - log.V(1).Infof("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName) + log.V(1).M(chi).Info("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName) } else if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName) + log.V(1).M(chi).Info("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName) err = nil } else { - log.V(1).Infof("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommonUsersName, err) + log.V(1).M(chi).A().Error("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommonUsersName, err) } return err } // statefulSetDeletePod delete a pod of a StatefulSet. This requests StatefulSet to relaunch deleted pod -func (c *Controller) statefulSetDeletePod(statefulSet *apps.StatefulSet) error { +func (c *Controller) statefulSetDeletePod(statefulSet *apps.StatefulSet, host *chop.ChiHost) error { name := chopmodel.CreatePodName(statefulSet) - log.V(1).Infof("Delete Pod %s/%s", statefulSet.Namespace, name) + log.V(1).M(host).Info("Delete Pod %s/%s", statefulSet.Namespace, name) err := c.kubeClient.CoreV1().Pods(statefulSet.Namespace).Delete(name, newDeleteOptions()) if err == nil { - log.V(1).Infof("OK delete Pod %s/%s", statefulSet.Namespace, name) + log.V(1).M(host).Info("OK delete Pod %s/%s", statefulSet.Namespace, name) } else if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found Pod %s/%s", statefulSet.Namespace, name) + log.V(1).M(host).Info("NEUTRAL not found Pod %s/%s", statefulSet.Namespace, name) err = nil } else { - log.V(1).Infof("FAIL delete ConfigMap %s/%s err:%v", statefulSet.Namespace, name, err) + log.V(1).M(host).A().Error("FAIL delete ConfigMap %s/%s err:%v", statefulSet.Namespace, name, err) } return err @@ -114,78 +111,68 @@ func (c *Controller) deleteStatefulSet(host *chop.ChiHost) error { name := chopmodel.CreateStatefulSetName(host) namespace := host.Address.Namespace - log.V(1).Infof("deleteStatefulSet(%s/%s)", namespace, name) + log.V(1).M(host).F().Info("%s/%s", namespace, name) - statefulSet, err := c.getStatefulSetByHost(host) - if err != nil { + if sts, err := c.getStatefulSet(host); err == nil { + host.StatefulSet = sts + } else { if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found StatefulSet %s/%s", namespace, name) + log.V(1).M(host).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) } else { - log.V(1).Infof("error get StatefulSet %s/%s err:%v", namespace, name, err) + log.V(1).M(host).A().Error("FAIL get StatefulSet %s/%s err:%v", namespace, name, err) } - return nil + return err } // Scale StatefulSet down to 0 pods count. // This is the proper and graceful way to delete StatefulSet var zero int32 = 0 - statefulSet.Spec.Replicas = &zero - statefulSet, _ = c.kubeClient.AppsV1().StatefulSets(namespace).Update(statefulSet) - _ = c.waitStatefulSetReady(statefulSet) - host.StatefulSet = statefulSet + host.StatefulSet.Spec.Replicas = &zero + if _, err := c.kubeClient.AppsV1().StatefulSets(namespace).Update(host.StatefulSet); err != nil { + log.V(1).M(host).Error("UNABLE to update StatefulSet %s/%s", namespace, name) + return err + } + + // Wait until StatefulSet scales down to 0 pods count. + _ = c.waitHostReady(host) // And now delete empty StatefulSet if err := c.kubeClient.AppsV1().StatefulSets(namespace).Delete(name, newDeleteOptions()); err == nil { - log.V(1).Infof("OK delete StatefulSet %s/%s", namespace, name) - c.syncStatefulSet(host) + log.V(1).M(host).Info("OK delete StatefulSet %s/%s", namespace, name) + c.waitHostDeleted(host) } else if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found StatefulSet %s/%s", namespace, name) + log.V(1).M(host).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) err = nil } else { - log.V(1).Infof("FAIL delete StatefulSet %s/%s err: %v", namespace, name, err) + log.V(1).M(host).A().Error("FAIL delete StatefulSet %s/%s err: %v", namespace, name, err) return nil } return nil } -// syncStatefulSet -func (c *Controller) syncStatefulSet(host *chop.ChiHost) { - for { - // TODO - // There should be better way to sync cache - if _, err := c.getStatefulSetByHost(host); err == nil { - log.V(2).Infof("cache NOT yet synced") - time.Sleep(15 * time.Second) - } else { - log.V(1).Infof("cache synced") - return - } - } -} - // deletePVC deletes PersistentVolumeClaim func (c *Controller) deletePVC(host *chop.ChiHost) error { - log.V(2).Info("deletePVC() - start") - defer log.V(2).Info("deletePVC() - end") + log.V(2).M(host).S().P() + defer log.V(2).M(host).E().P() namespace := host.Address.Namespace c.walkActualPVCs(host, func(pvc *v1.PersistentVolumeClaim) { if !chopmodel.HostCanDeletePVC(host, pvc.Name) { - log.V(1).Infof("PVC %s/%s should not be deleted, leave it intact", namespace, pvc.Name) + log.V(1).M(host).Info("PVC %s/%s should not be deleted, leave it intact", namespace, pvc.Name) // Move to the next PVC return } // Actually delete PVC if err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Delete(pvc.Name, newDeleteOptions()); err == nil { - log.V(1).Infof("OK delete PVC %s/%s", namespace, pvc.Name) + log.V(1).M(host).Info("OK delete PVC %s/%s", namespace, pvc.Name) } else if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found PVC %s/%s", namespace, pvc.Name) + log.V(1).M(host).Info("NEUTRAL not found PVC %s/%s", namespace, pvc.Name) err = nil } else { - log.Errorf("FAIL to delete PVC %s/%s err:%v", namespace, pvc.Name, err) + log.M(host).A().Error("FAIL to delete PVC %s/%s err:%v", namespace, pvc.Name, err) } }) @@ -197,15 +184,15 @@ func (c *Controller) deleteConfigMap(host *chop.ChiHost) error { name := chopmodel.CreateConfigMapPodName(host) namespace := host.Address.Namespace - log.V(1).Infof("deleteConfigMap(%s/%s)", namespace, name) + log.V(1).M(host).F().Info("%s/%s", namespace, name) if err := c.kubeClient.CoreV1().ConfigMaps(namespace).Delete(name, newDeleteOptions()); err == nil { - log.V(1).Infof("OK delete ConfigMap %s/%s", namespace, name) + log.V(1).M(host).Info("OK delete ConfigMap %s/%s", namespace, name) } else if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found ConfigMap %s/%s", namespace, name) + log.V(1).M(host).Info("NEUTRAL not found ConfigMap %s/%s", namespace, name) err = nil } else { - log.V(1).Infof("FAIL delete ConfigMap %s/%s err:%v", namespace, name, err) + log.V(1).M(host).A().Error("FAIL delete ConfigMap %s/%s err:%v", namespace, name, err) } return nil @@ -215,7 +202,7 @@ func (c *Controller) deleteConfigMap(host *chop.ChiHost) error { func (c *Controller) deleteServiceHost(host *chop.ChiHost) error { serviceName := chopmodel.CreateStatefulSetServiceName(host) namespace := host.Address.Namespace - log.V(1).Infof("deleteServiceReplica(%s/%s)", namespace, serviceName) + log.V(1).M(host).F().Info("%s/%s", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -223,7 +210,7 @@ func (c *Controller) deleteServiceHost(host *chop.ChiHost) error { func (c *Controller) deleteServiceShard(shard *chop.ChiShard) error { serviceName := chopmodel.CreateShardServiceName(shard) namespace := shard.Address.Namespace - log.V(1).Infof("deleteServiceShard(%s/%s)", namespace, serviceName) + log.V(1).M(shard).F().Info("%s/%s", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -231,7 +218,7 @@ func (c *Controller) deleteServiceShard(shard *chop.ChiShard) error { func (c *Controller) deleteServiceCluster(cluster *chop.ChiCluster) error { serviceName := chopmodel.CreateClusterServiceName(cluster) namespace := cluster.Address.Namespace - log.V(1).Infof("deleteServiceCluster(%s/%s)", namespace, serviceName) + log.V(1).M(cluster).F().Info("%s/%s", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -239,7 +226,7 @@ func (c *Controller) deleteServiceCluster(cluster *chop.ChiCluster) error { func (c *Controller) deleteServiceCHI(chi *chop.ClickHouseInstallation) error { serviceName := chopmodel.CreateCHIServiceName(chi) namespace := chi.Namespace - log.V(1).Infof("deleteServiceCHI(%s/%s)", namespace, serviceName) + log.V(1).M(chi).F().Info("%s/%s", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -258,9 +245,9 @@ func (c *Controller) deleteServiceIfExists(namespace, name string) error { // Delete service err = c.kubeClient.CoreV1().Services(namespace).Delete(name, newDeleteOptions()) if err == nil { - log.V(1).Infof("OK delete Service %s/%s", namespace, name) + log.V(1).M(namespace, name).Info("OK delete Service %s/%s", namespace, name) } else { - log.V(1).Infof("FAIL delete Service %s/%s err:%v", namespace, name, err) + log.V(1).M(namespace, name).A().Error("FAIL delete Service %s/%s err:%v", namespace, name, err) } return err diff --git a/pkg/controller/chi/event.go b/pkg/controller/chi/event.go index f89a2fb68..92233a4a8 100644 --- a/pkg/controller/chi/event.go +++ b/pkg/controller/chi/event.go @@ -17,11 +17,10 @@ package chi import ( "time" - log "github.com/golang/glog" - // log "k8s.io/klog" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" ) @@ -60,7 +59,7 @@ const ( eventReasonDeleteFailed = "DeleteFailed" ) -func (c *Controller) eventInfo( +func (c *Controller) EventInfo( chi *chop.ClickHouseInstallation, action string, reason string, @@ -69,7 +68,7 @@ func (c *Controller) eventInfo( c.emitEvent(chi, eventTypeInfo, action, reason, message) } -func (c *Controller) eventWarning( +func (c *Controller) EventWarning( chi *chop.ClickHouseInstallation, action string, reason string, @@ -78,7 +77,7 @@ func (c *Controller) eventWarning( c.emitEvent(chi, eventTypeWarning, action, reason, message) } -func (c *Controller) eventError( +func (c *Controller) EventError( chi *chop.ClickHouseInstallation, action string, reason string, @@ -139,6 +138,6 @@ func (c *Controller) emitEvent( _, err := c.kubeClient.CoreV1().Events(namespace).Create(event) if err != nil { - log.V(1).Infof("Create Event failed: %v", err) + log.M(chi).A().Error("Create Event failed: %v", err) } } diff --git a/pkg/controller/chi/getter.go b/pkg/controller/chi/getter.go index 047a1414c..81c70b58d 100644 --- a/pkg/controller/chi/getter.go +++ b/pkg/controller/chi/getter.go @@ -124,9 +124,26 @@ func (c *Controller) getService(objMeta *meta.ObjectMeta, byNameOnly bool) (*cor return nil, fmt.Errorf("too much objects found %d expecting 1", len(objects)) } +// getStatefulSet gets StatefulSet. Accepted types: +// 1. *meta.ObjectMeta +// 2. *chop.ChiHost +func (c *Controller) getStatefulSet(obj interface{}, byName ...bool) (*apps.StatefulSet, error) { + switch typedObj := obj.(type) { + case *meta.ObjectMeta: + var b bool + if len(byName) > 0 { + b = byName[0] + } + return c.getStatefulSetByMeta(typedObj, b) + case *chop.ChiHost: + return c.getStatefulSetByHost(typedObj) + } + return nil, fmt.Errorf("unknown type") +} + // getStatefulSet gets StatefulSet either by namespaced name or by labels // TODO review byNameOnly params -func (c *Controller) getStatefulSet(objMeta *meta.ObjectMeta, byNameOnly bool) (*apps.StatefulSet, error) { +func (c *Controller) getStatefulSetByMeta(objMeta *meta.ObjectMeta, byNameOnly bool) (*apps.StatefulSet, error) { get := c.statefulSetLister.StatefulSets(objMeta.Namespace).Get list := c.statefulSetLister.StatefulSets(objMeta.Namespace).List var objects []*apps.StatefulSet @@ -181,6 +198,22 @@ func (c *Controller) getStatefulSetByHost(host *chop.ChiHost) (*apps.StatefulSet return c.statefulSetLister.StatefulSets(namespace).Get(name) } +// getPod gets pod for host or StatefulSet. Accepted types: +// 1. *apps.StatefulSet +// 2. *chop.ChiHost +func (c *Controller) getPod(obj interface{}) (*core.Pod, error) { + var name, namespace string + switch typedObj := obj.(type) { + case *chop.ChiHost: + name = chopmodel.CreatePodName(obj) + namespace = typedObj.Address.Namespace + case *apps.StatefulSet: + name = chopmodel.CreatePodName(obj) + namespace = typedObj.Namespace + } + return c.kubeClient.CoreV1().Pods(namespace).Get(name, newGetOptions()) +} + // GetCHIByObjectMeta gets CHI by namespaced name func (c *Controller) GetCHIByObjectMeta(objectMeta *meta.ObjectMeta) (*chiv1.ClickHouseInstallation, error) { chiName, err := chopmodel.GetCHINameFromObjectMeta(objectMeta) diff --git a/pkg/controller/chi/labeler.go b/pkg/controller/chi/labeler.go index 900924016..398537dda 100644 --- a/pkg/controller/chi/labeler.go +++ b/pkg/controller/chi/labeler.go @@ -15,12 +15,12 @@ package chi import ( - log "github.com/golang/glog" - // log "k8s.io/klog" "k8s.io/apimachinery/pkg/apis/meta/v1" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/model" + "github.com/altinity/clickhouse-operator/pkg/util" ) func (c *Controller) labelMyObjectsTree() { @@ -55,21 +55,21 @@ func (c *Controller) labelMyObjectsTree() { namespace, ok2 := c.chop.ConfigManager.GetRuntimeParam(chiv1.OPERATOR_POD_NAMESPACE) if !ok1 || !ok2 { - log.V(1).Infof("ERROR fetch Pod name out of %s/%s", namespace, podName) + log.V(1).M(namespace, podName).A().Error("ERROR fetch Pod name out of %s/%s", namespace, podName) return } // Pod namespaced name found, fetch the Pod pod, err := c.podLister.Pods(namespace).Get(podName) if err != nil { - log.V(1).Infof("ERROR get Pod %s/%s", namespace, podName) + log.V(1).M(namespace, podName).A().Error("ERROR get Pod %s/%s", namespace, podName) return } // Put label on the Pod c.addLabels(&pod.ObjectMeta) if _, err := c.kubeClient.CoreV1().Pods(namespace).Update(pod); err != nil { - log.V(1).Infof("ERROR put label on Pod %s/%s %v", namespace, podName, err) + log.V(1).M(namespace, podName).A().Error("ERROR put label on Pod %s/%s %v", namespace, podName, err) } // Find parent ReplicaSet @@ -85,21 +85,21 @@ func (c *Controller) labelMyObjectsTree() { if replicaSetName == "" { // ReplicaSet not found - log.V(1).Infof("ERROR ReplicaSet for Pod %s/%s not found", namespace, podName) + log.V(1).M(namespace, podName).A().Error("ERROR ReplicaSet for Pod %s/%s not found", namespace, podName) return } // ReplicaSet namespaced name found, fetch the ReplicaSet replicaSet, err := c.kubeClient.AppsV1().ReplicaSets(namespace).Get(replicaSetName, v1.GetOptions{}) if err != nil { - log.V(1).Infof("ERROR get ReplicaSet %s/%s %v", namespace, replicaSetName, err) + log.V(1).M(namespace, replicaSetName).A().Error("ERROR get ReplicaSet %s/%s %v", namespace, replicaSetName, err) return } // Put label on the ReplicaSet c.addLabels(&replicaSet.ObjectMeta) if _, err := c.kubeClient.AppsV1().ReplicaSets(namespace).Update(replicaSet); err != nil { - log.V(1).Infof("ERROR put label on ReplicaSet %s/%s %v", namespace, replicaSetName, err) + log.V(1).M(namespace, replicaSetName).A().Error("ERROR put label on ReplicaSet %s/%s %v", namespace, replicaSetName, err) } // Find parent Deployment @@ -115,25 +115,32 @@ func (c *Controller) labelMyObjectsTree() { if deploymentName == "" { // Deployment not found - log.V(1).Infof("ERROR Deployment for %s Pod %s ReplicaSet %s not found", namespace, podName, replicaSetName) + log.V(1).M(namespace, replicaSetName).A().Error("ERROR Deployment for %s Pod %s ReplicaSet %s not found", namespace, podName, replicaSetName) return } // Deployment namespaced name found, fetch the Deployment deployment, err := c.kubeClient.AppsV1().Deployments(namespace).Get(deploymentName, v1.GetOptions{}) if err != nil { - log.V(1).Infof("ERROR get Deployment %s/%s", namespace, deploymentName) + log.V(1).M(namespace, deploymentName).A().Error("ERROR get Deployment %s/%s", namespace, deploymentName) return } // Put label on the Deployment c.addLabels(&deployment.ObjectMeta) if _, err := c.kubeClient.AppsV1().Deployments(namespace).Update(deployment); err != nil { - log.V(1).Infof("ERROR put label on Deployment %s/%s %v", namespace, deploymentName, err) + log.V(1).M(namespace, deploymentName).A().Error("ERROR put label on Deployment %s/%s %v", namespace, deploymentName, err) } } +// addLabels adds app and version labels func (c *Controller) addLabels(meta *v1.ObjectMeta) { - meta.Labels[model.LabelAppName] = model.LabelAppValue - meta.Labels[model.LabelCHOP] = c.chop.Version + util.MergeStringMapsOverwrite( + meta.Labels, + // Add the following labels + map[string]string{ + model.LabelAppName: model.LabelAppValue, + model.LabelCHOP: c.chop.Version, + }, + ) } diff --git a/pkg/controller/chi/pods.go b/pkg/controller/chi/pods.go index 21f17d7c0..72fbcc64b 100644 --- a/pkg/controller/chi/pods.go +++ b/pkg/controller/chi/pods.go @@ -17,19 +17,52 @@ package chi import ( "k8s.io/api/core/v1" - log "github.com/golang/glog" - // log "k8s.io/klog" - + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopmodel "github.com/altinity/clickhouse-operator/pkg/model" ) +func (c *Controller) appendLabelReady(host *chop.ChiHost) error { + pod, err := c.getPod(host) + if err != nil { + log.M(host).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) + return err + } + + chopmodel.AppendLabelReady(&pod.ObjectMeta) + _, err = c.kubeClient.CoreV1().Pods(pod.Namespace).Update(pod) + if err != nil { + log.M(host).A().Error("FAIL setting 'ready' label for host %s err:%v", host.Address.NamespaceNameString(), err) + return err + } + return err +} + +func (c *Controller) deleteLabelReady(host *chop.ChiHost) error { + if host == nil { + return nil + } + if host.StatefulSet.Spec.Replicas != nil { + if *host.StatefulSet.Spec.Replicas == 0 { + return nil + } + } + + pod, err := c.getPod(host) + if err != nil { + log.V(1).M(host).F().Info("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) + return err + } + + chopmodel.DeleteLabelReady(&pod.ObjectMeta) + _, err = c.kubeClient.CoreV1().Pods(pod.Namespace).Update(pod) + return err +} + func (c *Controller) walkContainers(host *chop.ChiHost, f func(container *v1.Container)) { - namespace := host.Address.Namespace - name := chopmodel.CreatePodName(host) - pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(name, newGetOptions()) + pod, err := c.getPod(host) if err != nil { - log.Errorf("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) + log.M(host).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) return } @@ -40,11 +73,9 @@ func (c *Controller) walkContainers(host *chop.ChiHost, f func(container *v1.Con } func (c *Controller) walkContainerStatuses(host *chop.ChiHost, f func(status *v1.ContainerStatus)) { - namespace := host.Address.Namespace - name := chopmodel.CreatePodName(host) - pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(name, newGetOptions()) + pod, err := c.getPod(host) if err != nil { - log.Errorf("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) + log.M(host).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) return } diff --git a/pkg/controller/chi/poller.go b/pkg/controller/chi/poller.go index 0b751f045..228db5546 100644 --- a/pkg/controller/chi/poller.go +++ b/pkg/controller/chi/poller.go @@ -20,12 +20,10 @@ import ( "fmt" "time" - log "github.com/golang/glog" - // log "k8s.io/klog" - apps "k8s.io/api/apps/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/model" ) @@ -35,20 +33,9 @@ const ( waitStatefulSetGenerationTimeoutToCreateStatefulSet = 30 ) -// waitStatefulSetReady polls StatefulSet for reaching target generation and Ready state -func (c *Controller) waitStatefulSetReady(statefulSet *apps.StatefulSet) error { - if err := c.pollStatefulSet(statefulSet, nil, func(sts *apps.StatefulSet) bool { - return model.IsStatefulSetGeneration(sts, sts.Generation) - }); err == nil { - return c.pollStatefulSet(statefulSet, nil, model.IsStatefulSetReady) - } else { - return err - } -} - -// waitHostNotReady polls StatefulSet for not exists or not ready +// waitHostNotReady polls host's StatefulSet for not exists or not ready func (c *Controller) waitHostNotReady(host *chop.ChiHost) error { - err := c.pollStatefulSet(host, NewStatefulSetPollOptionsConfigNoCreate(c.chop.Config()), model.IsStatefulSetNotReady) + err := c.pollStatefulSet(host, NewStatefulSetPollOptionsConfigNoCreate(c.chop.Config()), model.IsStatefulSetNotReady, nil) if apierrors.IsNotFound(err) { err = nil } @@ -56,9 +43,54 @@ func (c *Controller) waitHostNotReady(host *chop.ChiHost) error { return err } -// waitHostReady polls hosts's StatefulSet until it is ready +// waitHostReady polls host's StatefulSet until it is ready func (c *Controller) waitHostReady(host *chop.ChiHost) error { - return c.waitStatefulSetReady(host.StatefulSet) + // Wait for StatefulSet to reach generation + err := c.pollStatefulSet( + host.StatefulSet, + nil, + func(sts *apps.StatefulSet) bool { + if sts == nil { + return false + } + _ = c.deleteLabelReady(host) + return model.IsStatefulSetGeneration(sts, sts.Generation) + }, + func() { + _ = c.deleteLabelReady(host) + }, + ) + if err != nil { + return err + } + + // Wait StatefulSet to reach ready status + return c.pollStatefulSet( + host.StatefulSet, + nil, + func(sts *apps.StatefulSet) bool { + _ = c.deleteLabelReady(host) + return model.IsStatefulSetReady(sts) + }, + func() { + _ = c.deleteLabelReady(host) + }, + ) +} + +// waitHostDeleted polls host's StatefulSet until it is not available +func (c *Controller) waitHostDeleted(host *chop.ChiHost) { + for { + // TODO + // Probably there would be better way to wait until k8s reported StatefulSet deleted + if _, err := c.getStatefulSet(host); err == nil { + log.V(2).Info("cache NOT yet synced") + time.Sleep(15 * time.Second) + } else { + log.V(1).Info("cache synced") + return + } + } } // waitHostRunning polls host for `Running` state @@ -71,24 +103,24 @@ func (c *Controller) waitHostRunning(host *chop.ChiHost) error { for { if c.isHostRunning(host) { // All is good, job done, exit - log.V(1).Infof("waitHostRunning(%s/%s)-OK", namespace, name) + log.V(1).M(host).F().Info("%s/%s-OK", namespace, name) return nil } // Object is found, function not positive if time.Since(start) >= (time.Duration(waitStatefulSetGenerationTimeoutBeforeStartBothering) * time.Second) { // Start bothering with log messages after some time only - log.V(1).Infof("waitHostRunning(%s/%s)-WAIT", namespace, name) + log.V(1).M(host).F().Info("%s/%s-WAIT", namespace, name) } if time.Since(start) >= (time.Duration(c.chop.Config().StatefulSetUpdateTimeout) * time.Second) { // Timeout reached, no good result available, time to quit - log.V(1).Infof("ERROR waitHostRunning(%s/%s) - TIMEOUT reached", namespace, name) + log.V(1).M(host).F().Error("%s/%s-TIMEOUT reached", namespace, name) return errors.New(fmt.Sprintf("waitHostRunning(%s/%s) - wait timeout", namespace, name)) } // Wait some more time - log.V(2).Infof("waithostRunning(%s/%s)", namespace, name) + log.V(2).M(host).F().Info("%s/%s", namespace, name) select { case <-time.After(time.Duration(c.chop.Config().StatefulSetUpdatePollPeriod) * time.Second): } @@ -101,7 +133,8 @@ type StatefulSetPollOptions struct { StartBotheringAfterTimeout time.Duration CreateTimeout time.Duration Timeout time.Duration - Interval time.Duration + MainInterval time.Duration + BackgroundInterval time.Duration } func NewStatefulSetPollOptions() *StatefulSetPollOptions { @@ -113,7 +146,8 @@ func NewStatefulSetPollOptionsConfig(config *chop.OperatorConfig) *StatefulSetPo StartBotheringAfterTimeout: time.Duration(waitStatefulSetGenerationTimeoutBeforeStartBothering) * time.Second, CreateTimeout: time.Duration(waitStatefulSetGenerationTimeoutToCreateStatefulSet) * time.Second, Timeout: time.Duration(config.StatefulSetUpdateTimeout) * time.Second, - Interval: time.Duration(config.StatefulSetUpdatePollPeriod) * time.Second, + MainInterval: time.Duration(config.StatefulSetUpdatePollPeriod) * time.Second, + BackgroundInterval: 1 * time.Second, } } @@ -121,13 +155,19 @@ func NewStatefulSetPollOptionsConfigNoCreate(config *chop.OperatorConfig) *State return &StatefulSetPollOptions{ StartBotheringAfterTimeout: time.Duration(waitStatefulSetGenerationTimeoutBeforeStartBothering) * time.Second, //CreateTimeout: time.Duration(waitStatefulSetGenerationTimeoutToCreateStatefulSet) * time.Second, - Timeout: time.Duration(config.StatefulSetUpdateTimeout) * time.Second, - Interval: time.Duration(config.StatefulSetUpdatePollPeriod) * time.Second, + Timeout: time.Duration(config.StatefulSetUpdateTimeout) * time.Second, + MainInterval: time.Duration(config.StatefulSetUpdatePollPeriod) * time.Second, + BackgroundInterval: 1 * time.Second, } } // pollStatefulSet polls StatefulSet with poll callback function. -func (c *Controller) pollStatefulSet(entity interface{}, opts *StatefulSetPollOptions, f func(set *apps.StatefulSet) bool) error { +func (c *Controller) pollStatefulSet( + entity interface{}, + opts *StatefulSetPollOptions, + mainFn func(set *apps.StatefulSet) bool, + backFn func(), +) error { if opts == nil { opts = NewStatefulSetPollOptionsConfig(c.chop.Config()) } @@ -151,33 +191,33 @@ func (c *Controller) pollStatefulSet(entity interface{}, opts *StatefulSetPollOp for { if statefulSet, err := c.statefulSetLister.StatefulSets(namespace).Get(name); err == nil { // Object is found - if f(statefulSet) { + if mainFn(statefulSet) { // All is good, job done, exit - log.V(1).Infof("pollStatefulSet(%s/%s)-OK :%s", namespace, name, model.StrStatefulSetStatus(&statefulSet.Status)) + log.V(1).M(namespace, name).F().Info("OK :%s", model.StrStatefulSetStatus(&statefulSet.Status)) return nil } // Object is found, but function is not positive if time.Since(start) >= opts.StartBotheringAfterTimeout { // Start bothering with log messages after some time only - log.V(1).Infof("pollStatefulSet(%s/%s)-WAIT:%s", namespace, name, model.StrStatefulSetStatus(&statefulSet.Status)) + log.V(1).M(namespace, name).F().Info("WAIT:%s", model.StrStatefulSetStatus(&statefulSet.Status)) } } else if apierrors.IsNotFound(err) { // Object is not found - it either failed to be created or just still not created if time.Since(start) >= opts.CreateTimeout { // No more wait for object to be created. Consider create as failed. if opts.CreateTimeout > 0 { - log.V(1).Infof("ERROR pollStatefulSet(%s/%s) Get() FAILED - StatefulSet still not found, abort", namespace, name) + log.V(1).M(namespace, name).F().Error("Get() FAILED - StatefulSet still not found, abort") } else { - log.V(1).Infof("pollStatefulSet(%s/%s) Get() NEUTRAL StatefulSet not found and no wait required", namespace, name) + log.V(1).M(namespace, name).F().Info("Get() NEUTRAL StatefulSet not found and no wait required") } return err } // Object with such name not found - may be is still being created - wait for it - log.V(1).Infof("pollStatefulSet(%s/%s)-WAIT: object not found. Not created yet?", namespace, name) + log.V(1).M(namespace, name).F().Info("WAIT: object not found. Not created yet?") } else { // Some kind of total error - log.Errorf("ERROR pollStatefulSet(%s/%s) Get() FAILED", namespace, name) + log.M(namespace, name).A().Error("%s/%s Get() FAILED", namespace, name) return err } @@ -185,20 +225,34 @@ func (c *Controller) pollStatefulSet(entity interface{}, opts *StatefulSetPollOp if time.Since(start) >= opts.Timeout { // Timeout reached, no good result available, time to quit - log.V(1).Infof("ERROR pollStatefulSet(%s/%s) - TIMEOUT reached", namespace, name) + log.V(1).M(namespace, name).F().Info("%s/%s - TIMEOUT reached") return errors.New(fmt.Sprintf("waitStatefulSet(%s/%s) - wait timeout", namespace, name)) } // Wait some more time - log.V(2).Infof("pollStatefulSet(%s/%s)", namespace, name) - select { - case <-time.After(opts.Interval): - } + log.V(2).Info("pollStatefulSet(%s/%s)", namespace, name) + pollback(opts, backFn) } return fmt.Errorf("unexpected flow") } +func pollback(opts *StatefulSetPollOptions, fn func()) { + main := time.After(opts.MainInterval) + run := true + for run { + back := time.After(opts.BackgroundInterval) + select { + case <-main: + run = false + case <-back: + if fn != nil { + fn() + } + } + } +} + // pollHost polls host with poll callback function. func (c *Controller) pollHost(host *chop.ChiHost, opts *StatefulSetPollOptions, f func(host *chop.ChiHost) bool) error { if opts == nil { @@ -212,26 +266,26 @@ func (c *Controller) pollHost(host *chop.ChiHost, opts *StatefulSetPollOptions, for { if f(host) { // All is good, job done, exit - log.V(1).Infof("pollHost(%s/%s)-OK", namespace, name) + log.V(1).M(host).F().Info("%s/%s-OK", namespace, name) return nil } // Object is found, but function is not positive if time.Since(start) >= opts.StartBotheringAfterTimeout { // Start bothering with log messages after some time only - log.V(1).Infof("pollHost(%s/%s)-WAIT", namespace, name) + log.V(1).M(host).F().Info("%s/%s-WAIT", namespace, name) } if time.Since(start) >= opts.Timeout { // Timeout reached, no good result available, time to quit - log.V(1).Infof("ERROR pollHost(%s/%s) - TIMEOUT reached", namespace, name) + log.V(1).M(host).F().Error("%s/%s-TIMEOUT reached", namespace, name) return errors.New(fmt.Sprintf("pollHost(%s/%s) - wait timeout", namespace, name)) } // Wait some more time - log.V(2).Infof("pollHost(%s/%s)", namespace, name) + log.V(2).M(host).F().Info("%s/%s", namespace, name) select { - case <-time.After(opts.Interval): + case <-time.After(opts.MainInterval): } } diff --git a/pkg/controller/chi/volumes.go b/pkg/controller/chi/volumes.go index dfe17444a..a74c05ece 100644 --- a/pkg/controller/chi/volumes.go +++ b/pkg/controller/chi/volumes.go @@ -17,9 +17,7 @@ package chi import ( "k8s.io/api/core/v1" - log "github.com/golang/glog" - // log "k8s.io/klog" - + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopmodel "github.com/altinity/clickhouse-operator/pkg/model" ) @@ -29,7 +27,7 @@ func (c *Controller) walkPVCs(host *chop.ChiHost, f func(pvc *v1.PersistentVolum name := chopmodel.CreatePodName(host) pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(name, newGetOptions()) if err != nil { - log.Errorf("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) + log.M(host).A().Error("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) return } @@ -42,7 +40,7 @@ func (c *Controller) walkPVCs(host *chop.ChiHost, f func(pvc *v1.PersistentVolum pvcName := volume.PersistentVolumeClaim.ClaimName pvc, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(pvcName, newGetOptions()) if err != nil { - log.Errorf("FAIL get PVC %s/%s err:%v", namespace, pvcName, err) + log.M(host).A().Error("FAIL get PVC %s/%s err:%v", namespace, pvcName, err) continue } @@ -56,7 +54,7 @@ func (c *Controller) walkActualPVCs(host *chop.ChiHost, f func(pvc *v1.Persisten pvcList, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).List(newListOptions(labeler.GetSelectorHostScope(host))) if err != nil { - log.Errorf("FAIL get list of PVC for host %s/%s err:%v", namespace, host.Name, err) + log.M(host).A().Error("FAIL get list of PVC for host %s/%s err:%v", namespace, host.Name, err) return } @@ -72,7 +70,7 @@ func (c *Controller) walkPVs(host *chop.ChiHost, f func(pv *v1.PersistentVolume) c.walkPVCs(host, func(pvc *v1.PersistentVolumeClaim) { pv, err := c.kubeClient.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, newGetOptions()) if err != nil { - log.Errorf("FAIL get PV %s err:%v", pvc.Spec.VolumeName, err) + log.M(host).A().Error("FAIL get PV %s err:%v", pvc.Spec.VolumeName, err) return } f(pv) diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index 5ec446b94..81f052afe 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -49,7 +49,7 @@ type worker struct { func (c *Controller) newWorker(queue workqueue.RateLimitingInterface) *worker { return &worker{ c: c, - a: NewAnnouncer(c), + a: NewAnnouncer().WithController(c), queue: queue, normalizer: chopmodel.NewNormalizer(c.chop), schemer: chopmodel.NewSchemer( @@ -64,8 +64,8 @@ func (c *Controller) newWorker(queue workqueue.RateLimitingInterface) *worker { // run is an endless work loop, expected to be run in a thread func (w *worker) run() { - w.a.V(2).Info("run() - start") - defer w.a.V(2).Info("run() - end") + w.a.V(2).S().P() + defer w.a.V(2).E().P() for { // Get() blocks until it can return an item @@ -93,8 +93,8 @@ func (w *worker) run() { // processWorkItem processes one work item according to its type func (w *worker) processItem(item interface{}) error { - w.a.V(3).Info("processItem() - start") - defer w.a.V(3).Info("processItem() - end") + w.a.V(3).S().P() + defer w.a.V(3).E().P() switch item.(type) { @@ -153,10 +153,10 @@ func (w *worker) processItem(item interface{}) error { case *DropDns: drop, _ := item.(*DropDns) if chi, err := w.createCHIFromObjectMeta(drop.initiator); err == nil { - w.a.V(2).Info("endpointsInformer UpdateFunc(%s/%s) flushing DNS for CHI %s", drop.initiator.Namespace, drop.initiator.Name, chi.Name) + w.a.V(2).M(drop.initiator).Info("flushing DNS for CHI %s", chi.Name) _ = w.schemer.CHIDropDnsCache(chi) } else { - w.a.Error("endpointsInformer UpdateFunc(%s/%s) unable to find CHI by %v", drop.initiator.Namespace, drop.initiator.Name, drop.initiator.Labels) + w.a.M(drop.initiator).A().Error("unable to find CHI by %v", drop.initiator.Labels) } return nil } @@ -169,8 +169,8 @@ func (w *worker) processItem(item interface{}) error { // normalize func (w *worker) normalize(chi *chop.ClickHouseInstallation) *chop.ClickHouseInstallation { - w.a.V(3).Info("normalize() - start") - defer w.a.V(3).Info("normalize() - end") + w.a.V(3).M(chi).S().P() + defer w.a.V(3).M(chi).E().P() var withDefaultCluster bool @@ -185,6 +185,7 @@ func (w *worker) normalize(chi *chop.ClickHouseInstallation) *chop.ClickHouseIns if err != nil { w.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed). WithStatusError(chi). + M(chi).A(). Error("FAILED to normalize CHI : %v", err) } @@ -193,32 +194,30 @@ func (w *worker) normalize(chi *chop.ClickHouseInstallation) *chop.ClickHouseIns // ensureFinalizer func (w *worker) ensureFinalizer(chi *chop.ClickHouseInstallation) { - namespace, name := util.NamespaceName(chi.ObjectMeta) - // Check whether finalizer is already listed in CHI if util.InArray(FinalizerName, chi.ObjectMeta.Finalizers) { - w.a.V(2).Info("ensureFinalizer(%s/%s): finalizer already installed", namespace, name) + w.a.V(2).M(chi).F().Info("finalizer already installed") } // No finalizer found - need to install it if err := w.c.installFinalizer(chi); err != nil { - w.a.V(1).Info("ensureFinalizer(%s/%s): unable to install finalizer. err: %v", namespace, name, err) + w.a.V(1).M(chi).A().Error("unable to install finalizer. err: %v", err) } - w.a.V(3).Info("ensureFinalizer(%s/%s): finalizer installed", namespace, name) + w.a.V(3).M(chi).F().Info("finalizer installed") } // updateCHI sync CHI which was already created earlier func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { - w.a.V(3).Info("updateCHI() - start") - defer w.a.V(3).Info("updateCHI() - end") + w.a.V(3).M(new).S().P() + defer w.a.V(3).M(new).E().P() update := (old != nil) && (new != nil) if update && (old.ObjectMeta.ResourceVersion == new.ObjectMeta.ResourceVersion) { - w.a.V(3).Info("updateCHI(%s/%s): ResourceVersion did not change: %s", new.Namespace, new.Name, new.ObjectMeta.ResourceVersion) // No need to react + w.a.V(3).M(new).F().Info("ResourceVersion did not change: %s", new.ObjectMeta.ResourceVersion) return nil } @@ -238,28 +237,30 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { if !actionPlan.HasActionsToDo() { // Nothing to do - no changes found - no need to react - w.a.V(3).Info("updateCHI(%s/%s) - ResourceVersion changed, but no actual changes found", new.Namespace, new.Name) + w.a.V(3).M(new).F().Info("ResourceVersion changed, but no actual changes found") return nil } // Write desired normalized CHI with initialized .Status, so it would be possible to monitor progress (&new.Status).ReconcileStart(actionPlan.GetRemovedHostsNum()) if err := w.c.updateCHIObjectStatus(new, false); err != nil { - w.a.V(1).Info("UNABLE to write normalized CHI (%s/%s). It can trigger update action again. Error: %q", new.Namespace, new.Name, err) + w.a.V(1).M(new).A().Error("UNABLE to write normalized CHI. Can trigger update action. Err: %q", err) return nil } w.a.V(1). WithEvent(new, eventActionReconcile, eventReasonReconcileStarted). WithStatusAction(new). - Info("updateCHI(%s/%s) reconcile started", new.Namespace, new.Name) - w.a.V(2).Info("updateCHI(%s/%s) - action plan\n%s\n", new.Namespace, new.Name, actionPlan.String()) + M(new).F(). + Info("reconcile started") + w.a.V(2).M(new).F().Info("action plan\n%s\n", actionPlan.String()) if new.IsStopped() { w.a.V(1). WithEvent(new, eventActionReconcile, eventReasonReconcileInProgress). WithStatusAction(new). - Info("updateCHI(%s/%s) exclude CHI from monitoring", new.Namespace, new.Name) + M(new).F(). + Info("exclude CHI from monitoring") w.c.deleteWatch(new.Namespace, new.Name) } @@ -291,13 +292,6 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { }, ) - new.WalkHosts(func(host *chop.ChiHost) error { - if update { - host.ReconcileAttributes.SetMigrate() - } - return nil - }) - new.WalkHosts(func(host *chop.ChiHost) error { if host.ReconcileAttributes.IsAdd() { // Already added @@ -312,13 +306,13 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { new.WalkHosts(func(host *chop.ChiHost) error { if host.ReconcileAttributes.IsAdd() { - w.a.Info("ADD host: %s", host.Address.ShortString()) + w.a.M(host).Info("ADD host: %s", host.Address.CompactString()) } else if host.ReconcileAttributes.IsModify() { - w.a.Info("MODIFY host: %s", host.Address.ShortString()) + w.a.M(host).Info("MODIFY host: %s", host.Address.CompactString()) } else if host.ReconcileAttributes.IsUnclear() { - w.a.Info("UNCLEAR host: %s", host.Address.ShortString()) + w.a.M(host).Info("UNCLEAR host: %s", host.Address.CompactString()) } else { - w.a.Info("UNTOUCH host: %s", host.Address.ShortString()) + w.a.M(host).Info("UNTOUCHED host: %s", host.Address.CompactString()) } return nil }) @@ -326,6 +320,7 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { if err := w.reconcile(new); err != nil { w.a.WithEvent(new, eventActionReconcile, eventReasonReconcileFailed). WithStatusError(new). + M(new).A(). Error("FAILED update: %v", err) return nil } @@ -334,25 +329,27 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { w.a.V(1). WithEvent(new, eventActionReconcile, eventReasonReconcileInProgress). WithStatusAction(new). - Info("updateCHI(%s/%s) remove scheduled for deletion items", new.Namespace, new.Name) + M(new).F(). + Info("remove items scheduled for deletion") actionPlan.WalkAdded( func(cluster *chop.ChiCluster) { }, func(shard *chop.ChiShard) { }, func(host *chop.ChiHost) { - if update { - w.a.V(1). - WithEvent(new, eventActionCreate, eventReasonCreateStarted). - WithStatusAction(new). - Info("Adding tables on shard/host:%d/%d cluster:%s", host.Address.ShardIndex, host.Address.ReplicaIndex, host.Address.ClusterName) - if err := w.schemer.HostCreateTables(host); err != nil { - w.a.Error("ERROR create tables on host %s. err: %v", host.Name, err) - } - } else { - w.a.V(1). - Info("As CHI is just created, not need to add tables on host %d to shard %d in cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) - } + // + //if update { + // w.a.V(1). + // WithEvent(new, eventActionCreate, eventReasonCreateStarted). + // WithStatusAction(new). + // Info("Adding tables on shard/host:%d/%d cluster:%s", host.Address.ShardIndex, host.Address.ReplicaIndex, host.Address.ClusterName) + // if err := w.schemer.HostCreateTables(host); err != nil { + // w.a.Error("ERROR create tables on host %s. err: %v", host.Name, err) + // } + //} else { + // w.a.V(1). + // Info("As CHI is just created, not need to add tables on host %d to shard %d in cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) + //} }, ) @@ -360,7 +357,8 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { w.a.V(1). WithEvent(new, eventActionReconcile, eventReasonReconcileInProgress). WithStatusAction(new). - Info("updateCHI(%s/%s) remove scheduled for deletion items", new.Namespace, new.Name) + M(new).F(). + Info("remove items scheduled for deletion") actionPlan.WalkRemoved( func(cluster *chop.ChiCluster) { _ = w.deleteCluster(cluster) @@ -377,7 +375,8 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { w.a.V(1). WithEvent(new, eventActionReconcile, eventReasonReconcileInProgress). WithStatusAction(new). - Info("updateCHI(%s/%s) add CHI to monitoring", new.Namespace, new.Name) + M(new).F(). + Info("add CHI to monitoring") w.c.updateWatch(new.Namespace, new.Name, chopmodel.CreatePodFQDNsOfCHI(new)) } @@ -388,15 +387,16 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { w.a.V(1). WithEvent(new, eventActionReconcile, eventReasonReconcileCompleted). WithStatusActions(new). - Info("updateCHI(%s/%s) reconcile completed", new.Namespace, new.Name) + M(new).F(). + Info("reconcile completed") return nil } // reconcile reconciles ClickHouseInstallation func (w *worker) reconcile(chi *chop.ClickHouseInstallation) error { - w.a.V(2).Info("reconcile() - start") - defer w.a.V(2).Info("reconcile() - end") + w.a.V(2).M(chi).S().P() + defer w.a.V(2).M(chi).E().P() w.creator = chopmodel.NewCreator(w.c.chop, chi) return chi.WalkTillError( @@ -410,8 +410,8 @@ func (w *worker) reconcile(chi *chop.ClickHouseInstallation) error { // reconcileCHIAuxObjectsPreliminary reconciles CHI preliminary in order to ensure that ConfigMaps are in place func (w *worker) reconcileCHIAuxObjectsPreliminary(chi *chop.ClickHouseInstallation) error { - w.a.V(2).Info("reconcileCHIAuxObjectsPreliminary() - start") - defer w.a.V(2).Info("reconcileCHIAuxObjectsPreliminary() - end") + w.a.V(2).M(chi).S().P() + defer w.a.V(2).M(chi).E().P() // 1. CHI Service if chi.IsStopped() { @@ -424,42 +424,46 @@ func (w *worker) reconcileCHIAuxObjectsPreliminary(chi *chop.ClickHouseInstallat } } - // 2. CHI ConfigMaps without update - create only - return w.reconcileCHIConfigMaps(chi, nil, false) + // 2. CHI common ConfigMap without update - create only + w.reconcileCHIConfigMapCommon(chi, nil, false) + // 3. CHI users ConfigMap + w.reconcileCHIConfigMapUsers(chi, nil, true) + + return nil } // reconcileCHIAuxObjectsFinal reconciles CHI global objects func (w *worker) reconcileCHIAuxObjectsFinal(chi *chop.ClickHouseInstallation) error { - w.a.V(2).Info("reconcileCHIAuxObjectsFinal() - start") - defer w.a.V(2).Info("reconcileCHIAuxObjectsFinal() - end") + w.a.V(2).M(chi).S().P() + defer w.a.V(2).M(chi).E().P() // CHI ConfigMaps with update - return w.reconcileCHIConfigMaps(chi, nil, true) + return w.reconcileCHIConfigMapCommon(chi, nil, true) } -// reconcileCHIConfigMaps reconciles all CHI's ConfigMaps -func (w *worker) reconcileCHIConfigMaps(chi *chop.ClickHouseInstallation, options *chopmodel.ClickHouseConfigFilesGeneratorOptions, update bool) error { - // ConfigMap common for all resources in CHI - // contains several sections, mapped as separated chopConfig files, - // such as remote servers, zookeeper setup, etc +// reconcileCHIConfigMapCommon reconciles all CHI's common ConfigMap +func (w *worker) reconcileCHIConfigMapCommon(chi *chop.ClickHouseInstallation, options *chopmodel.ClickHouseConfigFilesGeneratorOptions, update bool) error { configMapCommon := w.creator.CreateConfigMapCHICommon(options) if err := w.reconcileConfigMap(chi, configMapCommon, update); err != nil { return err } + return nil +} +// reconcileCHIConfigMapUsers reconciles all CHI's users ConfigMap +func (w *worker) reconcileCHIConfigMapUsers(chi *chop.ClickHouseInstallation, options *chopmodel.ClickHouseConfigFilesGeneratorOptions, update bool) error { // ConfigMap common for all users resources in CHI configMapUsers := w.creator.CreateConfigMapCHICommonUsers() if err := w.reconcileConfigMap(chi, configMapUsers, update); err != nil { return err } - return nil } // reconcileCluster reconciles Cluster, excluding nested shards func (w *worker) reconcileCluster(cluster *chop.ChiCluster) error { - w.a.V(2).Info("reconcileCluster() - start") - defer w.a.V(2).Info("reconcileCluster() - end") + w.a.V(2).M(cluster).S().P() + defer w.a.V(2).M(cluster).E().P() // Add Cluster's Service service := w.creator.CreateServiceCluster(cluster) @@ -473,8 +477,8 @@ func (w *worker) reconcileCluster(cluster *chop.ChiCluster) error { // reconcileShard reconciles Shard, excluding nested replicas func (w *worker) reconcileShard(shard *chop.ChiShard) error { - w.a.V(2).Info("reconcileShard() - start") - defer w.a.V(2).Info("reconcileShard() - end") + w.a.V(2).M(shard).S().P() + defer w.a.V(2).M(shard).E().P() // Add Shard's Service service := w.creator.CreateServiceShard(shard) @@ -488,26 +492,27 @@ func (w *worker) reconcileShard(shard *chop.ChiShard) error { // reconcileHost reconciles ClickHouse host func (w *worker) reconcileHost(host *chop.ChiHost) error { - w.a.V(2).Info("reconcileHost() - start") - defer w.a.V(2).Info("reconcileHost() - end") + w.a.V(2).M(host).S().P() + defer w.a.V(2).M(host).E().P() w.a.V(1). - WithEvent(host.CHI, eventActionReconcile, eventReasonReconcileStarted). - WithStatusAction(host.CHI). + WithEvent(host.GetCHI(), eventActionReconcile, eventReasonReconcileStarted). + WithStatusAction(host.GetCHI()). + M(host).F(). Info("Reconcile Host %s started", host.Name) // Create artifacts configMap := w.creator.CreateConfigMapHost(host) statefulSet := w.creator.CreateStatefulSet(host) service := w.creator.CreateServiceHost(host) - status := w.getStatefulSetStatus(host.StatefulSet) + (&host.ReconcileAttributes).SetStatus(w.getStatefulSetStatus(statefulSet, host)) - if err := w.excludeHost(host, status); err != nil { + if err := w.excludeHost(host); err != nil { return err } // Reconcile host's ConfigMap - if err := w.reconcileConfigMap(host.CHI, configMap, true); err != nil { + if err := w.reconcileConfigMap(host.GetCHI(), configMap, true); err != nil { return err } @@ -520,89 +525,152 @@ func (w *worker) reconcileHost(host *chop.ChiHost) error { w.reconcilePersistentVolumes(host) // Reconcile host's Service - if err := w.reconcileService(host.CHI, service); err != nil { + if err := w.reconcileService(host.GetCHI(), service); err != nil { return err } host.ReconcileAttributes.UnsetAdd() - if host.ReconcileAttributes.IsMigrate() { + if w.migrateTables(host) { w.a.V(1). - WithEvent(host.CHI, eventActionCreate, eventReasonCreateStarted). - WithStatusAction(host.CHI). + WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateStarted). + WithStatusAction(host.GetCHI()). + M(host).F(). Info("Adding tables on shard/host:%d/%d cluster:%s", host.Address.ShardIndex, host.Address.ReplicaIndex, host.Address.ClusterName) if err := w.schemer.HostCreateTables(host); err != nil { - w.a.Error("ERROR create tables on host %s. err: %v", host.Name, err) + w.a.M(host).A().Error("ERROR create tables on host %s. err: %v", host.Name, err) } } else { w.a.V(1). - Info("As CHI is just created, not need to add tables on host %d to shard %d in cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) + M(host).F(). + Info("No need to add tables on host %d to shard %d in cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) } - if err := w.includeHost(host, status); err != nil { + if err := w.includeHost(host); err != nil { // If host is not ready - fallback return err } - host.ReconcileAttributes.SetReconciled() - w.a.V(1). WithEvent(host.CHI, eventActionReconcile, eventReasonReconcileCompleted). WithStatusAction(host.CHI). + M(host).F(). Info("Reconcile Host %s completed", host.Name) return nil } -// Exclude host from ClickHouse clusters -func (w *worker) excludeHost(host *chop.ChiHost, status StatefulSetStatus) error { - if w.waitExcludeHost(host, status) { +func (w *worker) migrateTables(host *chop.ChiHost) bool { + if host.GetCHI().IsStopped() { + return false + } + if host.ReconcileAttributes.GetStatus() == chop.StatefulSetStatusSame { + return false + } + return true +} + +// Exclude host from ClickHouse clusters if required +func (w *worker) excludeHost(host *chop.ChiHost) error { + if w.shouldExcludeHost(host) { w.a.V(1). + M(host).F(). Info("Exclude from cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) - options := chopmodel.NewClickHouseConfigFilesGeneratorOptions(). - SetRemoteServersGeneratorOptions(chopmodel.NewRemoteServersGeneratorOptions(). + w.excludeHostFromService(host) + w.excludeHostFromClickHouseCluster(host) + } + return nil +} + +// Always include host back to ClickHouse clusters +func (w *worker) includeHost(host *chop.ChiHost) error { + w.a.V(1). + M(host).F(). + Info("Include into cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) + + w.includeHostIntoClickHouseCluster(host) + w.includeHostIntoService(host) + + return nil +} + +func (w *worker) excludeHostFromService(host *chop.ChiHost) { + w.c.deleteLabelReady(host) +} + +func (w *worker) includeHostIntoService(host *chop.ChiHost) { + w.c.appendLabelReady(host) +} + +// excludeHostFromClickHouseCluster excludes host from ClickHouse configuration +func (w *worker) excludeHostFromClickHouseCluster(host *chop.ChiHost) { + // Specify in options to exclude host from ClickHouse config file + options := chopmodel.NewClickHouseConfigFilesGeneratorOptions(). + SetRemoteServersGeneratorOptions( + chopmodel.NewRemoteServersGeneratorOptions(). ExcludeHost(host). ExcludeReconcileAttributes( chop.NewChiHostReconcileAttributes().SetAdd(), ), - ) + ) - _ = w.reconcileCHIConfigMaps(host.CHI, options, true) // remove host from cluster config only if we are going to wait for exclusion + // Remove host from cluster config and wait for ClickHouse to pick-up the change + if w.waitExcludeHost(host) { + _ = w.reconcileCHIConfigMapCommon(host.GetCHI(), options, true) _ = w.waitHostNotInCluster(host) } +} - return nil +// includeHostIntoClickHouseCluster includes host to ClickHouse configuration +func (w *worker) includeHostIntoClickHouseCluster(host *chop.ChiHost) { + options := chopmodel.NewClickHouseConfigFilesGeneratorOptions(). + SetRemoteServersGeneratorOptions(chopmodel.NewRemoteServersGeneratorOptions(). + ExcludeReconcileAttributes( + chop.NewChiHostReconcileAttributes().SetAdd(), + ), + ) + // Add host to the cluster config (always) and wait for ClickHouse to pick-up the change + _ = w.reconcileCHIConfigMapCommon(host.GetCHI(), options, true) + if w.waitIncludeHost(host) { + _ = w.waitHostInCluster(host) + } } -// determines whether reconciler should wait for host to be excluded from/included into cluster -func (w *worker) waitExcludeHost(host *chop.ChiHost, status StatefulSetStatus) bool { - if (status == statefulSetStatusNew) || (status == statefulSetStatusSame) { - // No need to wait for new and non-modified StatefulSets +// shouldExcludeHost determines whether host to be excluded from cluster +func (w *worker) shouldExcludeHost(host *chop.ChiHost) bool { + status := host.ReconcileAttributes.GetStatus() + if (status == chop.StatefulSetStatusNew) || (status == chop.StatefulSetStatusSame) { + // No need to exclude for new and non-modified StatefulSets return false } if host.GetShard().HostsCount() == 1 { - // In case shard where current host is located has only one host (means no replication), no need to wait + // In case shard where current host is located has only one host (means no replication), no need to exclude return false } - if host.CHI.IsReconcilingPolicyWait() { - return true - } else if host.CHI.IsReconcilingPolicyNoWait() { - return false - } + return true +} - if !w.c.chop.Config().ReconcileWaitExclude { +// determines whether reconciler should wait for host to be excluded from cluster +func (w *worker) waitExcludeHost(host *chop.ChiHost) bool { + // Check CHI settings + switch { + case host.GetCHI().IsReconcilingPolicyWait(): + return true + case host.GetCHI().IsReconcilingPolicyNoWait(): return false } - return true + // Fallback to operator's settings + return w.c.chop.Config().ReconcileWaitExclude } -// determines whether reconciler should wait for host to be excluded from/included into cluster -func (w *worker) waitIncludeHost(host *chop.ChiHost, status StatefulSetStatus) bool { - if (status == statefulSetStatusNew) || (status == statefulSetStatusSame) { +// determines whether reconciler should wait for host to be included into cluster +func (w *worker) waitIncludeHost(host *chop.ChiHost) bool { + status := host.ReconcileAttributes.GetStatus() + if (status == chop.StatefulSetStatusNew) || (status == chop.StatefulSetStatusSame) { return false } @@ -611,41 +679,24 @@ func (w *worker) waitIncludeHost(host *chop.ChiHost, status StatefulSetStatus) b return false } - if host.CHI.IsReconcilingPolicyWait() { + // Check CHI settings + switch { + case host.GetCHI().IsReconcilingPolicyWait(): return true - } else if host.CHI.IsReconcilingPolicyNoWait() { + case host.GetCHI().IsReconcilingPolicyNoWait(): return false } - if w.c.chop.Config().ReconcileWaitInclude == false { - return false - } - - return true -} - -// Include host back to ClickHouse clusters -func (w *worker) includeHost(host *chop.ChiHost, status StatefulSetStatus) error { - w.a.V(1). - Info("Include into cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) - options := chopmodel.NewClickHouseConfigFilesGeneratorOptions(). - SetRemoteServersGeneratorOptions(chopmodel.NewRemoteServersGeneratorOptions(). - ExcludeReconcileAttributes( - chop.NewChiHostReconcileAttributes().SetAdd(), - ), - ) - _ = w.reconcileCHIConfigMaps(host.CHI, options, true) - if w.waitIncludeHost(host, status) { - _ = w.waitHostInCluster(host) - } - - return nil + // Fallback to operator's settings + return w.c.chop.Config().ReconcileWaitInclude } +// waitHostInCluster waits until host is a member of at least one ClickHouse cluster func (w *worker) waitHostInCluster(host *chop.ChiHost) error { return w.c.pollHost(host, nil, w.schemer.IsHostInCluster) } +// waitHostNotInCluster waits until host is not a member of any ClickHouse clusters func (w *worker) waitHostNotInCluster(host *chop.ChiHost) error { return w.c.pollHost(host, nil, func(host *chop.ChiHost) bool { return !w.schemer.IsHostInCluster(host) @@ -654,11 +705,10 @@ func (w *worker) waitHostNotInCluster(host *chop.ChiHost) error { // finalizeCHI func (w *worker) finalizeCHI(chi *chop.ClickHouseInstallation) error { - namespace, name := util.NamespaceName(chi.ObjectMeta) - w.a.V(3).Info("finalizeCHI(%s/%s) - start", namespace, name) - defer w.a.V(3).Info("finalizeCHI(%s/%s) - end", namespace, name) + w.a.V(3).M(chi).S().P() + defer w.a.V(3).M(chi).E().P() - cur, err := w.c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(name, newGetOptions()) + cur, err := w.c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.Namespace).Get(chi.Name, newGetOptions()) if (err != nil) || (cur == nil) { return nil } @@ -671,16 +721,16 @@ func (w *worker) finalizeCHI(chi *chop.ClickHouseInstallation) error { // Delete CHI (&chi.Status).DeleteStart() if err := w.c.updateCHIObjectStatus(chi, true); err != nil { - w.a.V(1).Info("UNABLE to write normalized CHI (%s/%s). err:%q", namespace, name, err) + w.a.V(1).M(chi).A().Error("UNABLE to write normalized CHI. err:%q", err) return nil } _ = w.deleteCHI(chi) // Uninstall finalizer - w.a.V(2).Info("finalizeCHI(%s/%s): uninstall finalizer", namespace, name) + w.a.V(2).M(chi).F().Info("uninstall finalizer") if err := w.c.uninstallFinalizer(chi); err != nil { - w.a.V(1).Info("finalizeCHI(%s/%s): unable to uninstall finalizer: err:%v", namespace, name, err) + w.a.V(1).M(chi).A().Error("unable to uninstall finalizer: err:%v", err) } return nil @@ -688,21 +738,23 @@ func (w *worker) finalizeCHI(chi *chop.ClickHouseInstallation) error { // deleteCHI deletes all kubernetes resources related to chi *chop.ClickHouseInstallation func (w *worker) deleteCHI(chi *chop.ClickHouseInstallation) error { - w.a.V(2).Info("deleteCHI() - start") - defer w.a.V(2).Info("deleteCHI() - end") + w.a.V(2).M(chi).S().P() + defer w.a.V(2).M(chi).E().P() var err error w.a.V(1). WithEvent(chi, eventActionDelete, eventReasonDeleteStarted). WithStatusAction(chi). - Info("Delete CHI %s/%s started", chi.Namespace, chi.Name) + M(chi).F(). + Info("Delete CHI started") chi, err = w.normalizer.CreateTemplatedCHI(chi, true) if err != nil { w.a.WithEvent(chi, eventActionDelete, eventReasonDeleteFailed). WithStatusError(chi). - Error("Delete CHI %s/%s failed - unable to normalize: %q", chi.Namespace, chi.Name, err) + M(chi).A(). + Error("Delete CHI failed - unable to normalize: %q", err) return err } @@ -723,7 +775,8 @@ func (w *worker) deleteCHI(chi *chop.ClickHouseInstallation) error { w.a.V(1). WithEvent(chi, eventActionDelete, eventReasonDeleteCompleted). WithStatusAction(chi). - Info("Delete CHI %s/%s - completed", chi.Namespace, chi.Name) + M(chi).F(). + Info("Delete CHI completed") return nil } @@ -739,11 +792,13 @@ func (w *worker) deleteTables(host *chop.ChiHost) error { w.a.V(1). WithEvent(host.CHI, eventActionDelete, eventReasonDeleteCompleted). WithStatusAction(host.CHI). + M(host).F(). Info("Deleted tables on host %s replica %d to shard %d in cluster %s", host.Name, host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) } else { w.a.WithEvent(host.CHI, eventActionDelete, eventReasonDeleteFailed). WithStatusError(host.CHI). + M(host).A(). Error("FAILED to delete tables on host %s with error %v", host.Name, err) } @@ -752,17 +807,19 @@ func (w *worker) deleteTables(host *chop.ChiHost) error { // deleteHost deletes all kubernetes resources related to replica *chop.ChiHost func (w *worker) deleteHost(host *chop.ChiHost) error { - w.a.V(2).Info("deleteHost() - start") - defer w.a.V(2).Info("deleteHost() - end") + w.a.V(2).M(host).S().Info(host.Address.HostName) + defer w.a.V(2).M(host).E().Info(host.Address.HostName) w.a.V(1). WithEvent(host.CHI, eventActionDelete, eventReasonDeleteStarted). WithStatusAction(host.CHI). + M(host).F(). Info("Delete host %s/%s - started", host.Address.ClusterName, host.Name) - if _, err := w.c.getStatefulSetByHost(host); err != nil { + if _, err := w.c.getStatefulSet(host); err != nil { w.a.WithEvent(host.CHI, eventActionDelete, eventReasonDeleteCompleted). WithStatusAction(host.CHI). + M(host).F(). Info("Delete host %s/%s - completed StatefulSet not found - already deleted? err: %v", host.Address.ClusterName, host.Name, err) return nil @@ -787,10 +844,12 @@ func (w *worker) deleteHost(host *chop.ChiHost) error { w.a.V(1). WithEvent(host.CHI, eventActionDelete, eventReasonDeleteCompleted). WithStatusAction(host.CHI). + M(host).F(). Info("Delete host %s/%s - completed", host.Address.ClusterName, host.Name) } else { w.a.WithEvent(host.CHI, eventActionDelete, eventReasonDeleteFailed). WithStatusError(host.CHI). + M(host).F(). Error("FAILED Delete host %s/%s - completed", host.Address.ClusterName, host.Name) } @@ -799,12 +858,13 @@ func (w *worker) deleteHost(host *chop.ChiHost) error { // deleteShard deletes all kubernetes resources related to shard *chop.ChiShard func (w *worker) deleteShard(shard *chop.ChiShard) error { - w.a.V(2).Info("deleteShard() - start") - defer w.a.V(2).Info("deleteShard() - end") + w.a.V(2).M(shard).S().P() + defer w.a.V(2).M(shard).E().P() w.a.V(1). WithEvent(shard.CHI, eventActionDelete, eventReasonDeleteStarted). WithStatusAction(shard.CHI). + M(shard).F(). Info("Delete shard %s/%s - started", shard.Address.Namespace, shard.Name) // Delete all replicas @@ -816,6 +876,7 @@ func (w *worker) deleteShard(shard *chop.ChiShard) error { w.a.V(1). WithEvent(shard.CHI, eventActionDelete, eventReasonDeleteCompleted). WithStatusAction(shard.CHI). + M(shard).F(). Info("Delete shard %s/%s - completed", shard.Address.Namespace, shard.Name) return nil @@ -823,12 +884,13 @@ func (w *worker) deleteShard(shard *chop.ChiShard) error { // deleteCluster deletes all kubernetes resources related to cluster *chop.ChiCluster func (w *worker) deleteCluster(cluster *chop.ChiCluster) error { - w.a.V(2).Info("deleteCluster() - start") - defer w.a.V(2).Info("deleteCluster() - end") + w.a.V(2).M(cluster).S().P() + defer w.a.V(2).M(cluster).E().P() w.a.V(1). WithEvent(cluster.CHI, eventActionDelete, eventReasonDeleteStarted). WithStatusAction(cluster.CHI). + M(cluster).F(). Info("Delete cluster %s/%s - started", cluster.Address.Namespace, cluster.Name) // Delete all shards @@ -842,6 +904,7 @@ func (w *worker) deleteCluster(cluster *chop.ChiCluster) error { w.a.V(1). WithEvent(cluster.CHI, eventActionDelete, eventReasonDeleteCompleted). WithStatusAction(cluster.CHI). + M(cluster).F(). Info("Delete cluster %s/%s - completed", cluster.Address.Namespace, cluster.Name) return nil @@ -849,8 +912,8 @@ func (w *worker) deleteCluster(cluster *chop.ChiCluster) error { // createCHIFromObjectMeta func (w *worker) createCHIFromObjectMeta(objectMeta *meta.ObjectMeta) (*chop.ClickHouseInstallation, error) { - w.a.V(3).Info("createCHIFromObjectMeta() - start") - defer w.a.V(3).Info("createCHIFromObjectMeta() - end") + w.a.V(3).M(objectMeta).S().P() + defer w.a.V(3).M(objectMeta).E().P() chi, err := w.c.GetCHIByObjectMeta(objectMeta) if err != nil { @@ -867,8 +930,8 @@ func (w *worker) createCHIFromObjectMeta(objectMeta *meta.ObjectMeta) (*chop.Cli // createClusterFromObjectMeta func (w *worker) createClusterFromObjectMeta(objectMeta *meta.ObjectMeta) (*chop.ChiCluster, error) { - w.a.V(3).Info("createClusterFromObjectMeta() - start") - defer w.a.V(3).Info("createClusterFromObjectMeta() - end") + w.a.V(3).M(objectMeta).S().P() + defer w.a.V(3).M(objectMeta).E().P() clusterName, err := chopmodel.GetClusterNameFromObjectMeta(objectMeta) if err != nil { @@ -896,11 +959,13 @@ func (w *worker) updateConfigMap(chi *chop.ClickHouseInstallation, configMap *co w.a.V(1). WithEvent(chi, eventActionUpdate, eventReasonUpdateCompleted). WithStatusAction(chi). + M(chi).F(). Info("Update ConfigMap %s/%s", configMap.Namespace, configMap.Name) } else { w.a.WithEvent(chi, eventActionUpdate, eventReasonUpdateFailed). WithStatusAction(chi). WithStatusError(chi). + M(chi).A(). Error("Update ConfigMap %s/%s failed with error %v", configMap.Namespace, configMap.Name, err) } @@ -915,11 +980,13 @@ func (w *worker) createConfigMap(chi *chop.ClickHouseInstallation, configMap *co w.a.V(1). WithEvent(chi, eventActionCreate, eventReasonCreateCompleted). WithStatusAction(chi). + M(chi).F(). Info("Create ConfigMap %s/%s", configMap.Namespace, configMap.Name) } else { w.a.WithEvent(chi, eventActionCreate, eventReasonCreateFailed). WithStatusAction(chi). WithStatusError(chi). + M(chi).A(). Error("Create ConfigMap %s/%s failed with error %v", configMap.Namespace, configMap.Name, err) } @@ -932,8 +999,8 @@ func (w *worker) reconcileConfigMap( configMap *core.ConfigMap, update bool, ) error { - w.a.V(2).Info("reconcileConfigMap() - start") - defer w.a.V(2).Info("reconcileConfigMap() - end") + w.a.V(2).M(chi).S().P() + defer w.a.V(2).M(chi).E().P() // Check whether this object already exists in k8s curConfigMap, err := w.c.getConfigMap(&configMap.ObjectMeta, false) @@ -955,6 +1022,7 @@ func (w *worker) reconcileConfigMap( w.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed). WithStatusAction(chi). WithStatusError(chi). + M(chi).A(). Error("FAILED to reconcile ConfigMap: %s CHI: %s ", configMap.Name, chi.Name) } @@ -986,7 +1054,7 @@ func (w *worker) updateService(chi *chop.ClickHouseInstallation, curService, new // Already have this port specified - reuse all internals, // due to limitations with auto-assigned values *newPort = *curPort - w.a.Info("reuse Port %d values", newPort.Port) + w.a.M(chi).F().Info("reuse Port %d values", newPort.Port) break } } @@ -1019,11 +1087,13 @@ func (w *worker) updateService(chi *chop.ClickHouseInstallation, curService, new w.a.V(1). WithEvent(chi, eventActionUpdate, eventReasonUpdateCompleted). WithStatusAction(chi). + M(chi).F(). Info("Update Service %s/%s", newService.Namespace, newService.Name) } else { w.a.WithEvent(chi, eventActionUpdate, eventReasonUpdateFailed). WithStatusAction(chi). WithStatusError(chi). + M(chi).A(). Error("Update Service %s/%s failed with error %v", newService.Namespace, newService.Name, err) } @@ -1038,11 +1108,13 @@ func (w *worker) createService(chi *chop.ClickHouseInstallation, service *core.S w.a.V(1). WithEvent(chi, eventActionCreate, eventReasonCreateCompleted). WithStatusAction(chi). + M(chi).F(). Info("Create Service %s/%s", service.Namespace, service.Name) } else { w.a.WithEvent(chi, eventActionCreate, eventReasonCreateFailed). WithStatusAction(chi). WithStatusError(chi). + M(chi).A(). Error("Create Service %s/%s failed with error %v", service.Namespace, service.Name, err) } @@ -1051,8 +1123,8 @@ func (w *worker) createService(chi *chop.ClickHouseInstallation, service *core.S // reconcileService reconciles core.Service func (w *worker) reconcileService(chi *chop.ClickHouseInstallation, service *core.Service) error { - w.a.V(2).Info("reconcileService() - start") - defer w.a.V(2).Info("reconcileService() - end") + w.a.V(2).M(chi).S().Info(service.Name) + defer w.a.V(2).M(chi).E().Info(service.Name) // Check whether this object already exists curService, err := w.c.getService(&service.ObjectMeta, false) @@ -1072,59 +1144,60 @@ func (w *worker) reconcileService(chi *chop.ClickHouseInstallation, service *cor w.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed). WithStatusAction(chi). WithStatusError(chi). + M(chi).A(). Error("FAILED to reconcile Service: %s CHI: %s ", service.Name, chi.Name) } return err } -type StatefulSetStatus string - -const ( - statefulSetStatusModified StatefulSetStatus = "modified" - statefulSetStatusNew StatefulSetStatus = "new" - statefulSetStatusSame StatefulSetStatus = "same" - statefulSetStatusUnknown StatefulSetStatus = "unknown" -) - -func (w *worker) getStatefulSetStatus(statefulSet *apps.StatefulSet) StatefulSetStatus { - w.a.V(2).Info("getStatefulSetStatus() - start") - defer w.a.V(2).Info("getStatefulSetStatus() - end") +func (w *worker) getStatefulSetStatus(statefulSet *apps.StatefulSet, host *chop.ChiHost) chop.StatefulSetStatus { + w.a.V(2).M(host).S().Info(util.NamespaceNameString(statefulSet.ObjectMeta)) + defer w.a.V(2).M(host).E().Info(util.NamespaceNameString(statefulSet.ObjectMeta)) // Check whether this object already exists in k8s curStatefulSet, err := w.c.getStatefulSet(&statefulSet.ObjectMeta, false) if curStatefulSet != nil { - if _cur, ok := curStatefulSet.Labels[chopmodel.LabelStatefulSetVersion]; ok { - if _new, _ok := statefulSet.Labels[chopmodel.LabelStatefulSetVersion]; _ok { - if _cur == _new { - w.a.Info("INFO StatefulSet ARE EQUAL no reconcile is actually needed") - return statefulSetStatusSame - } + // Try to perform label-based comparison + curLabel, curHasLabel := w.creator.GetStatefulSetVersion(curStatefulSet) + newLabel, newHasLabel := w.creator.GetStatefulSetVersion(statefulSet) + if curHasLabel && newHasLabel { + if curLabel == newLabel { + w.a.M(host).F().Info("INFO StatefulSet ARE EQUAL based on labels no reconcile is actually needed %s", util.NamespaceNameString(statefulSet.ObjectMeta)) + return chop.StatefulSetStatusSame + } else { + //if diff, equal := messagediff.DeepDiff(curStatefulSet.Spec, statefulSet.Spec); equal { + // w.a.Info("INFO StatefulSet ARE EQUAL based on diff no reconcile is actually needed") + // // return chop.StatefulSetStatusSame + //} else { + // w.a.Info("INFO StatefulSet ARE DIFFERENT based on diff reconcile is required: a:%v m:%v r:%v", diff.Added, diff.Modified, diff.Removed) + // // return chop.StatefulSetStatusModified + //} + w.a.M(host).F().Info("INFO StatefulSet ARE DIFFERENT based on labels reconcile needed %s", util.NamespaceNameString(statefulSet.ObjectMeta)) + return chop.StatefulSetStatusModified } } - if diff, equal := messagediff.DeepDiff(curStatefulSet.Spec, statefulSet.Spec); equal { - w.a.Info("INFO StatefulSet ARE DIFFERENT reconcile is required: a:%v m:%v r:%v", diff.Added, diff.Modified, diff.Removed) - return statefulSetStatusModified - } + // No labels to compare, we can not say for sure what exactly is going on + return chop.StatefulSetStatusUnknown } + // No cur StatefulSet available + if apierrors.IsNotFound(err) { - // StatefulSet not found - even during Update process - try to create it - return statefulSetStatusNew + return chop.StatefulSetStatusNew } - return statefulSetStatusUnknown + return chop.StatefulSetStatusUnknown } // reconcileStatefulSet reconciles apps.StatefulSet func (w *worker) reconcileStatefulSet(newStatefulSet *apps.StatefulSet, host *chop.ChiHost) error { - w.a.V(2).Info("reconcileStatefulSet() - start") - defer w.a.V(2).Info("reconcileStatefulSet() - end") + w.a.V(2).M(host).S().Info(util.NamespaceNameString(newStatefulSet.ObjectMeta)) + defer w.a.V(2).M(host).E().Info(util.NamespaceNameString(newStatefulSet.ObjectMeta)) - status := w.getStatefulSetStatus(host.StatefulSet) - if status == statefulSetStatusSame { - defer w.a.V(2).Info("reconcileStatefulSet() - no need to reconcile the same StaetfulSet") + if host.ReconcileAttributes.GetStatus() == chop.StatefulSetStatusSame { + defer w.a.V(2).M(host).F().Info("no need to reconcile the same StatefulSet %s", util.NamespaceNameString(newStatefulSet.ObjectMeta)) return nil } @@ -1145,6 +1218,7 @@ func (w *worker) reconcileStatefulSet(newStatefulSet *apps.StatefulSet, host *ch w.a.WithEvent(host.CHI, eventActionReconcile, eventReasonReconcileFailed). WithStatusAction(host.CHI). WithStatusError(host.CHI). + M(host).A(). Error("FAILED to reconcile StatefulSet: %s CHI: %s ", newStatefulSet.Name, host.CHI.Name) } @@ -1153,12 +1227,13 @@ func (w *worker) reconcileStatefulSet(newStatefulSet *apps.StatefulSet, host *ch // createStatefulSet func (w *worker) createStatefulSet(statefulSet *apps.StatefulSet, host *chop.ChiHost) error { - w.a.V(2).Info("createStatefulSet() - start") - defer w.a.V(2).Info("createStatefulSet() - end") + w.a.V(2).M(host).S().Info(util.NamespaceNameString(statefulSet.ObjectMeta)) + defer w.a.V(2).M(host).E().Info(util.NamespaceNameString(statefulSet.ObjectMeta)) w.a.V(1). WithEvent(host.CHI, eventActionCreate, eventReasonCreateStarted). WithStatusAction(host.CHI). + M(host).F(). Info("Create StatefulSet %s/%s - started", statefulSet.Namespace, statefulSet.Name) err := w.c.createStatefulSet(statefulSet, host) @@ -1170,11 +1245,13 @@ func (w *worker) createStatefulSet(statefulSet *apps.StatefulSet, host *chop.Chi w.a.V(1). WithEvent(host.CHI, eventActionCreate, eventReasonCreateCompleted). WithStatusAction(host.CHI). + M(host).F(). Info("Create StatefulSet %s/%s - completed", statefulSet.Namespace, statefulSet.Name) } else { w.a.WithEvent(host.CHI, eventActionCreate, eventReasonCreateFailed). WithStatusAction(host.CHI). WithStatusError(host.CHI). + M(host).A(). Error("Create StatefulSet %s/%s - failed with error %v", statefulSet.Namespace, statefulSet.Name, err) } @@ -1183,8 +1260,8 @@ func (w *worker) createStatefulSet(statefulSet *apps.StatefulSet, host *chop.Chi // updateStatefulSet func (w *worker) updateStatefulSet(curStatefulSet, newStatefulSet *apps.StatefulSet, host *chop.ChiHost) error { - w.a.V(2).Info("updateStatefulSet() - start") - defer w.a.V(2).Info("updateStatefulSet() - end") + w.a.V(2).M(host).S().Info(newStatefulSet.Name) + defer w.a.V(2).M(host).E().Info(newStatefulSet.Name) namespace := newStatefulSet.Namespace name := newStatefulSet.Name @@ -1192,6 +1269,7 @@ func (w *worker) updateStatefulSet(curStatefulSet, newStatefulSet *apps.Stateful w.a.V(1). WithEvent(host.CHI, eventActionCreate, eventReasonCreateStarted). WithStatusAction(host.CHI). + M(host).F(). Info("Update StatefulSet(%s/%s) - started", namespace, name) err := w.c.updateStatefulSet(curStatefulSet, newStatefulSet, host) @@ -1201,6 +1279,7 @@ func (w *worker) updateStatefulSet(curStatefulSet, newStatefulSet *apps.Stateful w.a.V(1). WithEvent(host.CHI, eventActionUpdate, eventReasonUpdateCompleted). WithStatusAction(host.CHI). + M(host).F(). Info("Update StatefulSet(%s/%s) - completed", namespace, name) return nil } @@ -1208,11 +1287,12 @@ func (w *worker) updateStatefulSet(curStatefulSet, newStatefulSet *apps.Stateful w.a.WithEvent(host.CHI, eventActionUpdate, eventReasonUpdateFailed). WithStatusAction(host.CHI). WithStatusError(host.CHI). + M(host).A(). Error("Update StatefulSet(%s/%s) - failed with error\n---\n%v\n--\nContinue with recreate", namespace, name, err) diff, equal := messagediff.DeepDiff(curStatefulSet.Spec, newStatefulSet.Spec) - w.a.Info("StatefulSet.Spec diff:") - w.a.Info(util.MessageDiffString(diff, equal)) + w.a.M(host).Info("StatefulSet.Spec diff:") + w.a.M(host).Info(util.MessageDiffString(diff, equal)) err = w.c.deleteStatefulSet(host) err = w.reconcilePersistentVolumeClaims(host) @@ -1230,8 +1310,8 @@ func (w *worker) reconcilePersistentVolumes(host *chop.ChiHost) { // reconcilePersistentVolumeClaims func (w *worker) reconcilePersistentVolumeClaims(host *chop.ChiHost) error { namespace := host.Address.Namespace - w.a.V(2).Info("reconcilePersistentVolumeClaims for host %s/%s - start", namespace, host.Name) - defer w.a.V(2).Info("reconcilePersistentVolumeClaims for host %s/%s - end", namespace, host.Name) + w.a.V(2).M(host).S().Info("host %s/%s", namespace, host.Name) + defer w.a.V(2).M(host).E().Info("host %s/%s", namespace, host.Name) host.WalkVolumeMounts(func(volumeMount *core.VolumeMount) { volumeClaimTemplateName := volumeMount.Name @@ -1242,15 +1322,15 @@ func (w *worker) reconcilePersistentVolumeClaims(host *chop.ChiHost) error { } pvcName := chopmodel.CreatePVCName(host, volumeMount, volumeClaimTemplate) - w.a.V(2).Info("reconcile volumeMount (%s/%s/%s/%s) - start", namespace, host.Name, volumeMount.Name, pvcName) - defer w.a.V(2).Info("reconcile volumeMount (%s/%s/%s/%s) - end", namespace, host.Name, volumeMount.Name, pvcName) + w.a.V(2).M(host).Info("reconcile volumeMount (%s/%s/%s/%s) - start", namespace, host.Name, volumeMount.Name, pvcName) + defer w.a.V(2).M(host).Info("reconcile volumeMount (%s/%s/%s/%s) - end", namespace, host.Name, volumeMount.Name, pvcName) pvc, err := w.c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(pvcName, newGetOptions()) if err != nil { if apierrors.IsNotFound(err) { // This is not an error per se, means PVC is not created (yet)? } else { - w.a.Error("ERROR unable to get PVC(%s/%s) err: %v", namespace, pvcName, err) + w.a.M(host).A().Error("ERROR unable to get PVC(%s/%s) err: %v", namespace, pvcName, err) } return } @@ -1291,8 +1371,8 @@ func (w *worker) reconcileResource( desiredResourceList core.ResourceList, resourceName core.ResourceName, ) { - w.a.V(2).Info("reconcileResource(%s/%s/%s) - start", pvc.Namespace, pvc.Name, resourceName) - defer w.a.V(2).Info("reconcileResource(%s/%s/%s) - end", pvc.Namespace, pvc.Name, resourceName) + w.a.V(2).M(pvc).Info("reconcileResource(%s/%s/%s) - start", pvc.Namespace, pvc.Name, resourceName) + defer w.a.V(2).M(pvc).Info("reconcileResource(%s/%s/%s) - end", pvc.Namespace, pvc.Name, resourceName) var ok bool if (pvcResourceList == nil) || (desiredResourceList == nil) { @@ -1312,11 +1392,11 @@ func (w *worker) reconcileResource( return } - w.a.V(2).Info("reconcileResource(%s/%s/%s) - unequal requests, want to update", pvc.Namespace, pvc.Name, resourceName) + w.a.V(2).M(pvc).Info("reconcileResource(%s/%s/%s) - unequal requests, want to update", pvc.Namespace, pvc.Name, resourceName) pvcResourceList[resourceName] = desiredResourceList[resourceName] _, err := w.c.kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(pvc) if err != nil { - w.a.Error("unable to reconcileResource(%s/%s/%s) err: %v", pvc.Namespace, pvc.Name, resourceName, err) + w.a.M(pvc).A().Error("unable to reconcileResource(%s/%s/%s) err: %v", pvc.Namespace, pvc.Name, resourceName, err) return } } diff --git a/pkg/model/builder/xml/xml.go b/pkg/model/builder/xml/xml.go index 9e4765fa4..68a332fd2 100644 --- a/pkg/model/builder/xml/xml.go +++ b/pkg/model/builder/xml/xml.go @@ -91,7 +91,7 @@ func normalizePath(prefix, path string) string { } } -// addBranch ensures branch esists and assign value to the last tagged node +// addBranch ensures branch exists and assign value to the last tagged node func (n *xmlNode) addBranch(tags []string, setting *chiv1.Setting) { node := n for _, tag := range tags { diff --git a/pkg/model/ch_config_const.go b/pkg/model/ch_config_const.go index 07e482205..aacd8e1e1 100644 --- a/pkg/model/ch_config_const.go +++ b/pkg/model/ch_config_const.go @@ -85,6 +85,7 @@ const ( chDefaultInterserverHTTPPortName = "interserver" chDefaultInterserverHTTPPortNumber = int32(9009) ) + const ( zkDefaultPort = 2181 // zkDefaultRootTemplate specifies default ZK root - /clickhouse/{namespace}/{chi name} diff --git a/pkg/model/ch_config_generator.go b/pkg/model/ch_config_generator.go index b5c7a429b..abcc7f31a 100644 --- a/pkg/model/ch_config_generator.go +++ b/pkg/model/ch_config_generator.go @@ -17,6 +17,7 @@ package model import ( "bytes" "fmt" + chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" xmlbuilder "github.com/altinity/clickhouse-operator/pkg/model/builder/xml" "github.com/altinity/clickhouse-operator/pkg/util" diff --git a/pkg/model/clickhouse/connection.go b/pkg/model/clickhouse/connection.go index 971bde728..37e83310f 100644 --- a/pkg/model/clickhouse/connection.go +++ b/pkg/model/clickhouse/connection.go @@ -20,8 +20,7 @@ import ( "fmt" "time" - log "github.com/golang/glog" - // log "k8s.io/klog" + log "github.com/altinity/clickhouse-operator/pkg/announcer" _ "github.com/mailru/go-clickhouse" ) @@ -32,18 +31,17 @@ type CHConnection struct { } func NewConnection(params *CHConnectionParams) *CHConnection { - // DO not perform connection immediately, do it in lazy manner + // Do not establish connection immediately, do it in a lazy manner return &CHConnection{ params: params, } } func (c *CHConnection) connect() { - - log.V(2).Infof("Establishing connection: %s", c.params.GetDSNWithHiddenCredentials()) + log.V(2).Info("Establishing connection: %s", c.params.GetDSNWithHiddenCredentials()) dbConnection, err := databasesql.Open("clickhouse", c.params.GetDSN()) if err != nil { - log.V(1).Infof("FAILED Open(%s) %v", c.params.GetDSNWithHiddenCredentials(), err) + log.V(1).A().Error("FAILED Open(%s). Err: %v", c.params.GetDSNWithHiddenCredentials(), err) return } @@ -52,7 +50,7 @@ func (c *CHConnection) connect() { defer cancel() if err := dbConnection.PingContext(ctx); err != nil { - log.V(1).Infof("FAILED Ping(%s) %v", c.params.GetDSNWithHiddenCredentials(), err) + log.V(1).A().Error("FAILED Ping(%s). Err: %v", c.params.GetDSNWithHiddenCredentials(), err) _ = dbConnection.Close() return } @@ -62,7 +60,7 @@ func (c *CHConnection) connect() { func (c *CHConnection) ensureConnected() bool { if c.conn != nil { - log.V(2).Infof("Already connected: %s", c.params.GetDSNWithHiddenCredentials()) + log.V(2).F().Info("Already connected: %s", c.params.GetDSNWithHiddenCredentials()) return true } @@ -71,34 +69,6 @@ func (c *CHConnection) ensureConnected() bool { return c.conn != nil } -// Query -type Query struct { - ctx context.Context - cancelFunc context.CancelFunc - - Rows *databasesql.Rows -} - -// Close -func (q *Query) Close() { - if q == nil { - return - } - - if q.Rows != nil { - err := q.Rows.Close() - q.Rows = nil - if err != nil { - log.V(1).Infof("UNABLE to close rows. err: %v", err) - } - } - - if q.cancelFunc != nil { - q.cancelFunc() - q.cancelFunc = nil - } -} - // Query runs given sql query func (c *CHConnection) Query(sql string) (*Query, error) { if len(sql) == 0 { @@ -110,7 +80,7 @@ func (c *CHConnection) Query(sql string) (*Query, error) { if !c.ensureConnected() { cancel() s := fmt.Sprintf("FAILED connect(%s) for SQL: %s", c.params.GetDSNWithHiddenCredentials(), sql) - log.V(1).Info(s) + log.V(1).A().Error(s) return nil, fmt.Errorf(s) } @@ -118,17 +88,13 @@ func (c *CHConnection) Query(sql string) (*Query, error) { if err != nil { cancel() s := fmt.Sprintf("FAILED Query(%s) %v for SQL: %s", c.params.GetDSNWithHiddenCredentials(), err, sql) - log.V(1).Info(s) + log.V(1).A().Error(s) return nil, err } - log.V(2).Infof("clickhouse.QueryContext():'%s'", sql) + log.V(2).Info("clickhouse.QueryContext():'%s'", sql) - return &Query{ - ctx: ctx, - cancelFunc: cancel, - Rows: rows, - }, nil + return NewQuery(ctx, cancel, rows), nil } // Exec runs given sql query @@ -142,18 +108,18 @@ func (c *CHConnection) Exec(sql string) error { if !c.ensureConnected() { s := fmt.Sprintf("FAILED connect(%s) for SQL: %s", c.params.GetDSNWithHiddenCredentials(), sql) - log.V(1).Info(s) + log.V(1).A().Error(s) return fmt.Errorf(s) } _, err := c.conn.ExecContext(ctx, sql) if err != nil { - log.V(1).Infof("FAILED Exec(%s) %v for SQL: %s", c.params.GetDSNWithHiddenCredentials(), err, sql) + log.V(1).A().Error("FAILED Exec(%s) %v for SQL: %s", c.params.GetDSNWithHiddenCredentials(), err, sql) return err } - log.V(2).Infof("clickhouse.Exec():\n", sql) + log.V(2).F().Info("\n%s", sql) return nil } diff --git a/pkg/model/clickhouse/pool.go b/pkg/model/clickhouse/pool.go index 1d3fdd71f..459a074b6 100644 --- a/pkg/model/clickhouse/pool.go +++ b/pkg/model/clickhouse/pool.go @@ -15,8 +15,7 @@ package clickhouse import ( - log "github.com/golang/glog" - // log "k8s.io/klog" + log "github.com/altinity/clickhouse-operator/pkg/announcer" "sync" ) @@ -32,7 +31,7 @@ func GetPooledDBConnection(params *CHConnectionParams) *CHConnection { key := makePoolKey(params) if connection, existed := dbConnectionPool.Load(key); existed { - log.V(2).Infof("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) + log.V(2).F().Info("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) return connection.(*CHConnection) } @@ -43,16 +42,16 @@ func GetPooledDBConnection(params *CHConnectionParams) *CHConnection { // Double check for race condition if connection, existed := dbConnectionPool.Load(key); existed { - log.V(2).Infof("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) + log.V(2).F().Info("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) return connection.(*CHConnection) } - log.V(2).Infof("Add connection to the pool: %s", params.GetDSNWithHiddenCredentials()) + log.V(2).F().Info("Add connection to the pool: %s", params.GetDSNWithHiddenCredentials()) dbConnectionPool.Store(key, NewConnection(params)) // Fetch from the pool if connection, existed := dbConnectionPool.Load(key); existed { - log.V(2).Infof("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) + log.V(2).F().Info("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) return connection.(*CHConnection) } diff --git a/pkg/model/clickhouse/query.go b/pkg/model/clickhouse/query.go new file mode 100644 index 000000000..de6ecc5ff --- /dev/null +++ b/pkg/model/clickhouse/query.go @@ -0,0 +1,58 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clickhouse + +import ( + "context" + databasesql "database/sql" + + log "github.com/altinity/clickhouse-operator/pkg/announcer" +) + +// Query +type Query struct { + ctx context.Context + cancelFunc context.CancelFunc + Rows *databasesql.Rows +} + +// NewQuery +func NewQuery(ctx context.Context, cancelFunc context.CancelFunc, rows *databasesql.Rows) *Query { + return &Query{ + ctx: ctx, + cancelFunc: cancelFunc, + Rows: rows, + } +} + +// Close +func (q *Query) Close() { + if q == nil { + return + } + + if q.Rows != nil { + err := q.Rows.Close() + q.Rows = nil + if err != nil { + log.A().Error("UNABLE to close rows. Err: %v", err) + } + } + + if q.cancelFunc != nil { + q.cancelFunc() + q.cancelFunc = nil + } +} diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 95e3519dc..192d7d0a5 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -18,13 +18,12 @@ import ( "fmt" // "net/url" - log "github.com/golang/glog" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - // log "k8s.io/klog" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" "github.com/altinity/clickhouse-operator/pkg/util" @@ -35,17 +34,16 @@ type Creator struct { chi *chiv1.ClickHouseInstallation chConfigFilesGenerator *ClickHouseConfigFilesGenerator labeler *Labeler + a log.Announcer } -func NewCreator( - chop *chop.CHOp, - chi *chiv1.ClickHouseInstallation, -) *Creator { +func NewCreator(chop *chop.CHOp, chi *chiv1.ClickHouseInstallation) *Creator { return &Creator{ chop: chop, chi: chi, chConfigFilesGenerator: NewClickHouseConfigFilesGenerator(NewClickHouseConfigGenerator(chi), chop.Config()), labeler: NewLabeler(chop, chi), + a: log.M(chi), } } @@ -53,7 +51,7 @@ func NewCreator( func (c *Creator) CreateServiceCHI() *corev1.Service { serviceName := CreateCHIServiceName(c.chi) - log.V(1).Infof("CreateServiceCHI(%s/%s)", c.chi.Namespace, serviceName) + c.a.V(1).F().Info("%s/%s", c.chi.Namespace, serviceName) if template, ok := c.chi.GetCHIServiceTemplate(); ok { // .templates.ServiceTemplate specified return c.createServiceFromTemplate( @@ -61,7 +59,7 @@ func (c *Creator) CreateServiceCHI() *corev1.Service { c.chi.Namespace, serviceName, c.labeler.getLabelsServiceCHI(), - c.labeler.getSelectorCHIScope(), + c.labeler.getSelectorCHIScopeReady(), ) } else { // Incorrect/unknown .templates.ServiceTemplate specified @@ -88,7 +86,7 @@ func (c *Creator) CreateServiceCHI() *corev1.Service { TargetPort: intstr.FromString(chDefaultTCPPortName), }, }, - Selector: c.labeler.getSelectorCHIScope(), + Selector: c.labeler.getSelectorCHIScopeReady(), Type: corev1.ServiceTypeLoadBalancer, ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeLocal, }, @@ -100,7 +98,7 @@ func (c *Creator) CreateServiceCHI() *corev1.Service { func (c *Creator) CreateServiceCluster(cluster *chiv1.ChiCluster) *corev1.Service { serviceName := CreateClusterServiceName(cluster) - log.V(1).Infof("CreateServiceCluster(%s/%s)", cluster.Address.Namespace, serviceName) + c.a.V(1).F().Info("%s/%s", cluster.Address.Namespace, serviceName) if template, ok := cluster.GetServiceTemplate(); ok { // .templates.ServiceTemplate specified return c.createServiceFromTemplate( @@ -108,7 +106,7 @@ func (c *Creator) CreateServiceCluster(cluster *chiv1.ChiCluster) *corev1.Servic cluster.Address.Namespace, serviceName, c.labeler.getLabelsServiceCluster(cluster), - c.labeler.getSelectorClusterScope(cluster), + c.labeler.getSelectorClusterScopeReady(cluster), ) } else { return nil @@ -119,7 +117,7 @@ func (c *Creator) CreateServiceCluster(cluster *chiv1.ChiCluster) *corev1.Servic func (c *Creator) CreateServiceShard(shard *chiv1.ChiShard) *corev1.Service { serviceName := CreateShardServiceName(shard) - log.V(1).Infof("CreateServiceShard(%s/%s)", shard.Address.Namespace, serviceName) + c.a.V(1).F().Info("%s/%s", shard.Address.Namespace, serviceName) if template, ok := shard.GetServiceTemplate(); ok { // .templates.ServiceTemplate specified return c.createServiceFromTemplate( @@ -127,7 +125,7 @@ func (c *Creator) CreateServiceShard(shard *chiv1.ChiShard) *corev1.Service { shard.Address.Namespace, serviceName, c.labeler.getLabelsServiceShard(shard), - c.labeler.getSelectorShardScope(shard), + c.labeler.getSelectorShardScopeReady(shard), ) } else { return nil @@ -139,7 +137,7 @@ func (c *Creator) CreateServiceHost(host *chiv1.ChiHost) *corev1.Service { serviceName := CreateStatefulSetServiceName(host) statefulSetName := CreateStatefulSetName(host) - log.V(1).Infof("CreateServiceHost(%s/%s) for Set %s", host.Address.Namespace, serviceName, statefulSetName) + c.a.V(1).F().Info("%s/%s for Set %s", host.Address.Namespace, serviceName, statefulSetName) if template, ok := host.GetServiceTemplate(); ok { // .templates.ServiceTemplate specified return c.createServiceFromTemplate( @@ -193,12 +191,11 @@ func (c *Creator) verifyServiceTemplatePorts(template *chiv1.ChiServiceTemplate) for i := range template.Spec.Ports { servicePort := &template.Spec.Ports[i] if (servicePort.Port < 1) || (servicePort.Port > 65535) { - msg := fmt.Sprintf("verifyServiceTemplatePorts(%s) INCORRECT PORT: %d ", template.Name, servicePort.Port) - log.V(1).Infof(msg) + msg := fmt.Sprintf("template:%s INCORRECT PORT:%d", template.Name, servicePort.Port) + c.a.V(1).F().Warning(msg) return fmt.Errorf(msg) } } - return nil } @@ -313,13 +310,35 @@ func (c *Creator) CreateStatefulSet(host *chiv1.ChiHost) *apps.StatefulSet { c.setupStatefulSetPodTemplate(statefulSet, host) c.setupStatefulSetVolumeClaimTemplates(statefulSet, host) + c.setupStatefulSetVersion(statefulSet) - statefulSet.Labels = util.MergeStringMapsOverwrite(statefulSet.Labels, map[string]string{LabelStatefulSetVersion: util.Fingerprint(statefulSet)}) host.StatefulSet = statefulSet return statefulSet } +// setupStatefulSetVersion +// TODO property of the labeler? +func (c *Creator) setupStatefulSetVersion(statefulSet *apps.StatefulSet) { + statefulSet.Labels = util.MergeStringMapsOverwrite( + statefulSet.Labels, + map[string]string{ + LabelObjectVersion: util.Fingerprint(statefulSet), + }, + ) + c.a.V(2).F().Info("StatefulSet(%s/%s)\n%s", statefulSet.Namespace, statefulSet.Name, util.Dump(statefulSet)) +} + +// GetStatefulSetVersion +// TODO property of the labeler? +func (c *Creator) GetStatefulSetVersion(statefulSet *apps.StatefulSet) (string, bool) { + if statefulSet == nil { + return "", false + } + label, ok := statefulSet.Labels[LabelObjectVersion] + return label, ok +} + // PreparePersistentVolume func (c *Creator) PreparePersistentVolume(pv *corev1.PersistentVolume, host *chiv1.ChiHost) *corev1.PersistentVolume { pv.Labels = util.MergeStringMapsOverwrite(pv.Labels, c.labeler.getLabelsHostScope(host, false)) @@ -386,7 +405,7 @@ func (c *Creator) personalizeStatefulSetTemplate(statefulSet *apps.StatefulSet, // In case we have default LogVolumeClaimTemplate specified - need to append log container to Pod Template if host.Templates.LogVolumeClaimTemplate != "" { addContainer(&statefulSet.Spec.Template.Spec, newDefaultLogContainer()) - log.V(1).Infof("setupStatefulSetPodTemplate() add log container for statefulSet %s", statefulSetName) + c.a.V(1).F().Info("add log container for statefulSet %s", statefulSetName) } } @@ -400,11 +419,11 @@ func (c *Creator) getPodTemplate(host *chiv1.ChiHost) *chiv1.ChiPodTemplate { // Host references known PodTemplate // Make local copy of this PodTemplate, in order not to spoil the original common-used template podTemplate = podTemplate.DeepCopy() - log.V(1).Infof("getPodTemplate() statefulSet %s use custom template %s", statefulSetName, podTemplate.Name) + c.a.V(1).F().Info("statefulSet %s use custom template %s", statefulSetName, podTemplate.Name) } else { // Host references UNKNOWN PodTemplate, will use default one podTemplate = c.newDefaultPodTemplate(statefulSetName) - log.V(1).Infof("getPodTemplate() statefulSet %s use default generated template", statefulSetName) + c.a.V(1).F().Info("statefulSet %s use default generated template", statefulSetName) } // Here we have local copy of Pod Template, to be used to create StatefulSet @@ -493,7 +512,7 @@ func (c *Creator) statefulSetApplyPodTemplate( ObjectMeta: metav1.ObjectMeta{ Name: template.Name, Labels: util.MergeStringMapsOverwrite( - c.labeler.getLabelsHostScope(host, true), + c.labeler.getLabelsHostScopeReady(host, true), template.ObjectMeta.Labels, ), Annotations: util.MergeStringMapsOverwrite( @@ -649,14 +668,14 @@ func (c *Creator) setupStatefulSetApplyVolumeMount( // 3. Specified (by volumeClaimTemplateName) VolumeClaimTemplate has to be available as well if _, ok := c.chi.GetVolumeClaimTemplate(volumeClaimTemplateName); !ok { // Incorrect/unknown .templates.VolumeClaimTemplate specified - log.V(1).Infof("Can not find volumeClaimTemplate %s. Volume claim can not be mounted", volumeClaimTemplateName) + c.a.V(1).F().Warning("Can not find volumeClaimTemplate %s. Volume claim can not be mounted", volumeClaimTemplateName) return nil } // 4. Specified container has to be available container := getContainerByName(statefulSet, containerName) if container == nil { - log.V(1).Infof("Can not find container %s. Volume claim can not be mounted", containerName) + c.a.V(1).F().Warning("Can not find container %s. Volume claim can not be mounted", containerName) return nil } @@ -675,8 +694,8 @@ func (c *Creator) setupStatefulSetApplyVolumeMount( // 1. Check whether this VolumeClaimTemplate is already listed in VolumeMount of this container if volumeMount.Name == existingVolumeMount.Name { // This .templates.VolumeClaimTemplate is already used in VolumeMount - log.V(1).Infof( - "setupStatefulSetApplyVolumeClaim(%s) container %s volumeClaimTemplateName %s already used", + c.a.V(1).F().Warning( + "StatefulSet:%s container:%s volumeClaimTemplateName:%s already used", statefulSet.Name, container.Name, volumeMount.Name, @@ -687,8 +706,8 @@ func (c *Creator) setupStatefulSetApplyVolumeMount( // 2. Check whether `mountPath` (say '/var/lib/clickhouse') is already mounted if volumeMount.MountPath == existingVolumeMount.MountPath { // `mountPath` (say /var/lib/clickhouse) is already mounted - log.V(1).Infof( - "setupStatefulSetApplyVolumeClaim(%s) container %s mountPath %s already used", + c.a.V(1).F().Warning( + "StatefulSet:%s container:%s mountPath:%s already used", statefulSet.Name, container.Name, volumeMount.MountPath, @@ -709,7 +728,8 @@ func (c *Creator) setupStatefulSetApplyVolumeMount( ) } - log.V(1).Infof("setupStatefulSetApplyVolumeClaim(%s) container %s mounted %s on %s", + c.a.V(1).F().Info( + "StatefulSet:%s container:%s mounted %s on %s", statefulSet.Name, container.Name, volumeMount.Name, @@ -829,26 +849,28 @@ func newDefaultLivenessProbe() *corev1.Probe { Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/ping", - Port: intstr.Parse(chDefaultHTTPPortName), + Port: intstr.Parse(chDefaultHTTPPortName), // What if it is not a default? }, }, - InitialDelaySeconds: 10, + InitialDelaySeconds: 60, PeriodSeconds: 3, + FailureThreshold: 10, } } // newDefaultReadinessProbe func (c *Creator) newDefaultReadinessProbe() *corev1.Probe { - return &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/replicas_status", - Port: intstr.Parse(chDefaultHTTPPortName), - }, - }, - InitialDelaySeconds: 10, - PeriodSeconds: 3, - } + return nil + // return &corev1.Probe{ + // Handler: corev1.Handler{ + // HTTPGet: &corev1.HTTPGetAction{ + // Path: "/replicas_status", + // Port: intstr.Parse(chDefaultHTTPPortName), + // }, + // }, + // InitialDelaySeconds: 10, + // PeriodSeconds: 3, + // } } // newDefaultClickHouseContainer returns default ClickHouse Container diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go index 9e34a0063..2d6481cd0 100644 --- a/pkg/model/labeler.go +++ b/pkg/model/labeler.go @@ -16,17 +16,21 @@ package model import ( "fmt" + + "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + kublabels "k8s.io/apimachinery/pkg/labels" + "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com" chi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" "github.com/altinity/clickhouse-operator/pkg/util" - "k8s.io/api/core/v1" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" - kublabels "k8s.io/apimachinery/pkg/labels" ) const ( // Kubernetes labels + LabelReadyName = clickhousealtinitycom.GroupName + "/ready" + LabelReadyValue = "yes" LabelAppName = clickhousealtinitycom.GroupName + "/app" LabelAppValue = "chop" LabelCHOP = clickhousealtinitycom.GroupName + "/chop" @@ -58,7 +62,7 @@ const ( // Supplementary service labels - used to cooperate with k8s LabelZookeeperConfigVersion = clickhousealtinitycom.GroupName + "/zookeeper-version" LabelSettingsConfigVersion = clickhousealtinitycom.GroupName + "/settings-version" - LabelStatefulSetVersion = clickhousealtinitycom.GroupName + "/statefulset-version" + LabelObjectVersion = clickhousealtinitycom.GroupName + "/object-version" ) // Labeler is an entity which can label CHI artifacts @@ -160,6 +164,11 @@ func (l *Labeler) getSelectorCHIScope() map[string]string { } } +// getSelectorCHIScopeReady gets labels to select a ready-labelled CHI-scoped object +func (l *Labeler) getSelectorCHIScopeReady() map[string]string { + return l.appendReadyLabels(l.getSelectorCHIScope()) +} + // getLabelsClusterScope gets labels for Cluster-scoped object func (l *Labeler) getLabelsClusterScope(cluster *chi.ChiCluster) map[string]string { // Combine generated labels and CHI-provided labels @@ -182,6 +191,11 @@ func (l *Labeler) getSelectorClusterScope(cluster *chi.ChiCluster) map[string]st } } +// getSelectorClusterScope gets labels to select a ready-labelled Cluster-scoped object +func (l *Labeler) getSelectorClusterScopeReady(cluster *chi.ChiCluster) map[string]string { + return l.appendReadyLabels(l.getSelectorClusterScope(cluster)) +} + // getLabelsShardScope gets labels for Shard-scoped object func (l *Labeler) getLabelsShardScope(shard *chi.ChiShard) map[string]string { // Combine generated labels and CHI-provided labels @@ -206,6 +220,11 @@ func (l *Labeler) getSelectorShardScope(shard *chi.ChiShard) map[string]string { } } +// getSelectorShardScope gets labels to select a ready-labelled Shard-scoped object +func (l *Labeler) getSelectorShardScopeReady(shard *chi.ChiShard) map[string]string { + return l.appendReadyLabels(l.getSelectorShardScope(shard)) +} + // getLabelsHostScope gets labels for Host-scoped object func (l *Labeler) getLabelsHostScope(host *chi.ChiHost, applySupplementaryServiceLabels bool) map[string]string { // Combine generated labels and CHI-provided labels @@ -236,6 +255,11 @@ func (l *Labeler) getLabelsHostScope(host *chi.ChiHost, applySupplementaryServic return l.appendCHILabels(labels) } +// getLabelsHostScopeReady gets labels for Host-scoped object including Ready label +func (l *Labeler) getLabelsHostScopeReady(host *chi.ChiHost, applySupplementaryServiceLabels bool) map[string]string { + return l.appendReadyLabels(l.getLabelsHostScope(host, applySupplementaryServiceLabels)) +} + // getSelectorShardScope gets labels to select a Host-scoped object func (l *Labeler) GetSelectorHostScope(host *chi.ChiHost) map[string]string { // Do not include CHI-provided labels @@ -256,6 +280,13 @@ func (l *Labeler) appendCHILabels(dst map[string]string) map[string]string { return util.MergeStringMapsOverwrite(dst, l.chi.Labels) } +// appendReadyLabels appends "Ready" label to labels set +func (l *Labeler) appendReadyLabels(dst map[string]string) map[string]string { + return util.MergeStringMapsOverwrite(dst, map[string]string{ + LabelReadyName: LabelReadyValue, + }) +} + // getAnnotationsHostScope gets annotations for Host-scoped object func (l *Labeler) getAnnotationsHostScope(host *chi.ChiHost) map[string]string { // We may want to append some annotations in here @@ -439,3 +470,23 @@ func GetClusterNameFromObjectMeta(meta *meta.ObjectMeta) (string, error) { } return meta.Labels[LabelClusterName], nil } + +// AppendLabelReady adds "ready" label with value = UTC now +func AppendLabelReady(meta *meta.ObjectMeta) { + if meta == nil { + return + } + util.MergeStringMapsOverwrite( + meta.Labels, + map[string]string{ + LabelReadyName: LabelReadyValue, + }) +} + +// DeleteLabelReady deletes "ready" label +func DeleteLabelReady(meta *meta.ObjectMeta) { + if meta == nil { + return + } + util.MapDeleteKeys(meta.Labels, LabelReadyName) +} diff --git a/pkg/model/namer.go b/pkg/model/namer.go index 2c9221ddb..5eec606c6 100644 --- a/pkg/model/namer.go +++ b/pkg/model/namer.go @@ -16,13 +16,14 @@ package model import ( "fmt" - "k8s.io/api/core/v1" "strconv" "strings" + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/util" - apps "k8s.io/api/apps/v1" ) const ( diff --git a/pkg/model/normalizer.go b/pkg/model/normalizer.go index 4bb46ef56..e985ccca1 100644 --- a/pkg/model/normalizer.go +++ b/pkg/model/normalizer.go @@ -20,16 +20,14 @@ import ( "fmt" "strings" - log "github.com/golang/glog" - // log "k8s.io/klog" + log "github.com/altinity/clickhouse-operator/pkg/announcer" + chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + "github.com/altinity/clickhouse-operator/pkg/chop" + "github.com/altinity/clickhouse-operator/pkg/util" "gopkg.in/d4l3k/messagediff.v1" "k8s.io/api/core/v1" v12 "k8s.io/apimachinery/pkg/apis/meta/v1" - - chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" - "github.com/altinity/clickhouse-operator/pkg/chop" - "github.com/altinity/clickhouse-operator/pkg/util" ) // Normalizer @@ -71,12 +69,16 @@ func (n *Normalizer) CreateTemplatedCHI(chi *chiv1.ClickHouseInstallation, withD var useTemplates []chiv1.ChiUseTemplate - for _, template := range n.chop.Config().FindAutoTemplates() { - useTemplates = append(useTemplates, chiv1.ChiUseTemplate{ - Name: template.Name, - Namespace: template.Namespace, - UseType: useTypeMerge, - }) + if autoTemplates := n.chop.Config().FindAutoTemplates(); len(autoTemplates) > 0 { + log.V(2).M(chi).F().Info("Found auto-templates num: %d", len(autoTemplates)) + for _, template := range autoTemplates { + log.V(3).M(chi).F().Info("Adding auto-template to merge list: %s/%s ", template.Name, template.Namespace) + useTemplates = append(useTemplates, chiv1.ChiUseTemplate{ + Name: template.Name, + Namespace: template.Namespace, + UseType: useTypeMerge, + }) + } } if len(chi.Spec.UseTemplates) > 0 { @@ -91,10 +93,10 @@ func (n *Normalizer) CreateTemplatedCHI(chi *chiv1.ClickHouseInstallation, withD for i := range useTemplates { useTemplate := &useTemplates[i] if template := n.chop.Config().FindTemplate(useTemplate, chi.Namespace); template == nil { - log.V(1).Infof("UNABLE to find template %s/%s referenced in useTemplates. Skip it.", useTemplate.Namespace, useTemplate.Name) + log.V(1).M(chi).A().Warning("UNABLE to find template %s/%s referenced in useTemplates. Skip it.", useTemplate.Namespace, useTemplate.Name) } else { (&n.chi.Spec).MergeFrom(&template.Spec, chiv1.MergeTypeOverrideByNonEmptyValues) - log.V(2).Infof("Merge template %s/%s referenced in useTemplates", useTemplate.Namespace, useTemplate.Name) + log.V(2).M(chi).F().Info("Merge template %s/%s referenced in useTemplates", useTemplate.Namespace, useTemplate.Name) } } @@ -182,7 +184,7 @@ func (n *Normalizer) getHostTemplate(host *chiv1.ChiHost) *chiv1.ChiHostTemplate hostTemplate, ok := host.GetHostTemplate() if ok { // Host references known HostTemplate - log.V(2).Infof("getHostTemplate() statefulSet %s use custom host template %s", statefulSetName, hostTemplate.Name) + log.V(2).M(host).F().Info("StatefulSet %s uses custom hostTemplate %s", statefulSetName, hostTemplate.Name) return hostTemplate } @@ -203,7 +205,7 @@ func (n *Normalizer) getHostTemplate(host *chiv1.ChiHost) *chiv1.ChiHostTemplate hostTemplate = newDefaultHostTemplate(statefulSetName) } - log.V(3).Infof("getHostTemplate() statefulSet %s use default host template", statefulSetName) + log.V(3).M(host).F().Info("StatefulSet %s use default hostTemplate", statefulSetName) return hostTemplate } @@ -1280,8 +1282,9 @@ func (n *Normalizer) normalizeConfigurationUsers(users *chiv1.Settings) { } _, okPasswordSHA256 := (*users)[username+"/password_sha256_hex"] - // if SHA256 is not set, initialize it from the password - if pass != "" && !okPasswordSHA256 { + _, okPasswordDoubleSHA1 := (*users)[username+"/password_double_sha1_hex"] + // if SHA256 or DoubleSHA1 are not set, initialize SHA256 from the password + if pass != "" && !okPasswordSHA256 && !okPasswordDoubleSHA1 { pass_sha256 := sha256.Sum256([]byte(pass)) (*users)[username+"/password_sha256_hex"] = chiv1.NewScalarSetting(hex.EncodeToString(pass_sha256[:])) okPasswordSHA256 = true diff --git a/pkg/model/schemer.go b/pkg/model/schemer.go index b055ac3a2..c7a9aa589 100644 --- a/pkg/model/schemer.go +++ b/pkg/model/schemer.go @@ -19,9 +19,8 @@ import ( "strings" "github.com/MakeNowJust/heredoc" - log "github.com/golang/glog" - // log "k8s.io/klog" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/model/clickhouse" "github.com/altinity/clickhouse-operator/pkg/util" @@ -71,18 +70,18 @@ func (s *Schemer) getObjectListFromClickHouse(endpoints []string, sql string) ([ // Fetch data from any of specified services var query *clickhouse.Query = nil for _, endpoint := range endpoints { - log.V(1).Infof("Run query on: %s of %v", endpoint, endpoints) + log.V(1).Info("Run query on: %s of %v", endpoint, endpoints) query, err = s.getCHConnection(endpoint).Query(sql) if err == nil { // One of specified services returned result, no need to iterate more break } else { - log.V(1).Infof("Run query on: %s of %v FAILED skip to next. err: %v", endpoint, endpoints, err) + log.V(1).A().Warning("FAILED to run query on: %s of %v skip to next. err: %v", endpoint, endpoints, err) } } if err != nil { - log.V(1).Infof("Run query FAILED on all %v", endpoints) + log.V(1).A().Error("FAILED to run query on all endpoints %v", endpoints) return nil, nil, err } @@ -95,7 +94,7 @@ func (s *Schemer) getObjectListFromClickHouse(endpoints []string, sql string) ([ names = append(names, name) statements = append(statements, statement) } else { - log.V(1).Infof("UNABLE to scan row err: %v", err) + log.V(1).A().Error("UNABLE to scan row err: %v", err) } } @@ -108,7 +107,7 @@ func (s *Schemer) getCreateDistributedObjects(host *chop.ChiHost) ([]string, []s hosts := CreatePodFQDNsOfCluster(host.GetCluster()) nHosts := len(hosts) if nHosts <= 1 { - log.V(1).Info("Single host in a cluster. Nothing to create a schema from.") + log.V(1).M(host).F().Info("Single host in a cluster. Nothing to create a schema from.") return nil, nil, nil } @@ -123,7 +122,7 @@ func (s *Schemer) getCreateDistributedObjects(host *chop.ChiHost) ([]string, []s // remove new host from the list. See https://stackoverflow.com/questions/37334119/how-to-delete-an-element-from-a-slice-in-golang hosts[hostIndex] = hosts[nHosts-1] hosts = hosts[:nHosts-1] - log.V(1).Infof("Extracting distributed table definitions from hosts: %v", hosts) + log.V(1).M(host).F().Info("Extracting distributed table definitions from hosts: %v", hosts) cluster_tables := fmt.Sprintf("remote('%s', system, tables)", strings.Join(hosts, ",")) @@ -172,28 +171,28 @@ func (s *Schemer) getCreateDistributedObjects(host *chop.ChiHost) ([]string, []s cluster_tables, )) - log.V(1).Infof("fetch dbs list") - log.V(1).Infof("dbs sql\n%v", sqlDBs) + log.V(1).M(host).F().Info("fetch dbs list") + log.V(1).M(host).F().Info("dbs sql\n%v", sqlDBs) names1, sqlStatements1, _ := s.getObjectListFromClickHouse(CreatePodFQDNsOfCHI(host.GetCHI()), sqlDBs) - log.V(1).Infof("names1:") + log.V(1).M(host).F().Info("names1:") for _, v := range names1 { - log.V(1).Infof("names1: %s", v) + log.V(1).M(host).F().Info("names1: %s", v) } - log.V(1).Infof("sql1:") + log.V(1).M(host).F().Info("sql1:") for _, v := range sqlStatements1 { - log.V(1).Infof("sql1: %s", v) + log.V(1).M(host).F().Info("sql1: %s", v) } - log.V(1).Infof("fetch table list") - log.V(1).Infof("tbl sql\n%v", sqlTables) + log.V(1).M(host).F().Info("fetch table list") + log.V(1).M(host).F().Info("tbl sql\n%v", sqlTables) names2, sqlStatements2, _ := s.getObjectListFromClickHouse(CreatePodFQDNsOfCHI(host.GetCHI()), sqlTables) - log.V(1).Infof("names2:") + log.V(1).M(host).F().Info("names2:") for _, v := range names2 { - log.V(1).Infof("names2: %s", v) + log.V(1).M(host).F().Info("names2: %s", v) } - log.V(1).Infof("sql2:") + log.V(1).M(host).F().Info("sql2:") for _, v := range sqlStatements2 { - log.V(1).Infof("sql2: %s", v) + log.V(1).M(host).F().Info("sql2: %s", v) } return append(names1, names2...), append(sqlStatements1, sqlStatements2...), nil @@ -214,19 +213,19 @@ func (s *Schemer) getCreateReplicaObjects(host *chop.ChiHost) ([]string, []strin } } if shard == nil { - log.V(1).Info("Can not find shard for replica") + log.V(1).M(host).F().Info("Can not find shard for replica") return nil, nil, nil } replicas := CreatePodFQDNsOfShard(shard) nReplicas := len(replicas) if nReplicas <= 1 { - log.V(1).Info("Single replica in a shard. Nothing to create a schema from.") + log.V(1).M(host).F().Info("Single replica in a shard. Nothing to create a schema from.") return nil, nil, nil } // remove new replica from the list. See https://stackoverflow.com/questions/37334119/how-to-delete-an-element-from-a-slice-in-golang replicas[replicaIndex] = replicas[nReplicas-1] replicas = replicas[:nReplicas-1] - log.V(1).Infof("Extracting replicated table definitions from %v", replicas) + log.V(1).M(host).F().Info("Extracting replicated table definitions from %v", replicas) system_tables := fmt.Sprintf("remote('%s', system, tables)", strings.Join(replicas, ",")) @@ -274,28 +273,28 @@ func (s *Schemer) hostGetDropTables(host *chop.ChiHost) ([]string, []string, err // HostDeleteTables func (s *Schemer) HostDeleteTables(host *chop.ChiHost) error { tableNames, dropTableSQLs, _ := s.hostGetDropTables(host) - log.V(1).Infof("Drop tables: %v as %v", tableNames, dropTableSQLs) + log.V(1).M(host).F().Info("Drop tables: %v as %v", tableNames, dropTableSQLs) return s.hostApplySQLs(host, dropTableSQLs, false) } // HostCreateTables func (s *Schemer) HostCreateTables(host *chop.ChiHost) error { - log.V(1).Infof("Migrating schema objects to host %s", host.Address.HostName) + log.V(1).M(host).F().Info("Migrating schema objects to host %s", host.Address.HostName) var err1, err2 error if names, createSQLs, err := s.getCreateReplicaObjects(host); err == nil { if len(createSQLs) > 0 { - log.V(1).Infof("Creating replica objects at %s: %v", host.Address.HostName, names) - log.V(1).Infof("\n%v", createSQLs) + log.V(1).M(host).F().Info("Creating replica objects at %s: %v", host.Address.HostName, names) + log.V(1).M(host).F().Info("\n%v", createSQLs) err1 = s.hostApplySQLs(host, createSQLs, true) } } if names, createSQLs, err := s.getCreateDistributedObjects(host); err == nil { if len(createSQLs) > 0 { - log.V(1).Infof("Creating distributed objects at %s: %v", host.Address.HostName, names) - log.V(1).Infof("\n%v", createSQLs) + log.V(1).M(host).F().Info("Creating distributed objects at %s: %v", host.Address.HostName, names) + log.V(1).M(host).F().Info("\n%v", createSQLs) err2 = s.hostApplySQLs(host, createSQLs, true) } } @@ -310,7 +309,7 @@ func (s *Schemer) HostCreateTables(host *chop.ChiHost) error { return nil } -// IsHostInCluster +// IsHostInCluster checks whether host is a member of at least one ClickHouse cluster func (s *Schemer) IsHostInCluster(host *chop.ChiHost) bool { sqls := []string{heredoc.Docf( `SELECT throwIf(count()=0) FROM system.clusters WHERE cluster='%s' AND is_local`, @@ -362,7 +361,7 @@ func (s *Schemer) applySQLs(hosts []string, sqls []string, retry bool) error { for _, host := range hosts { conn := s.getCHConnection(host) if conn == nil { - log.V(1).Infof("Unable to get conn to host %s", host) + log.V(1).M(host).F().Warning("Unable to get conn to host %s", host) continue } err := util.Retry(maxTries, "Applying sqls", func() error { @@ -374,7 +373,7 @@ func (s *Schemer) applySQLs(hosts []string, sqls []string, retry bool) error { } err := conn.Exec(sql) if err != nil && strings.Contains(err.Error(), "Code: 253,") && strings.Contains(sql, "CREATE TABLE") { - log.V(1).Info("Replica is already in ZooKeeper. Trying ATTACH TABLE instead") + log.V(1).M(host).F().Info("Replica is already in ZooKeeper. Trying ATTACH TABLE instead") sqlAttach := strings.ReplaceAll(sql, "CREATE TABLE", "ATTACH TABLE") err = conn.Exec(sqlAttach) } @@ -389,7 +388,9 @@ func (s *Schemer) applySQLs(hosts []string, sqls []string, retry bool) error { return errors[0] } return nil - }) + }, + log.V(1).M(host).F().Info, + ) if err != nil { errors = append(errors, err) diff --git a/pkg/util/dump.go b/pkg/util/dump.go new file mode 100644 index 000000000..a63f63eed --- /dev/null +++ b/pkg/util/dump.go @@ -0,0 +1,24 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import dumper "github.com/sanity-io/litter" + +func Dump(obj interface{}) string { + d := dumper.Options{ + Separator: " ", + } + return d.Sdump(obj) +} diff --git a/pkg/util/fingerprint.go b/pkg/util/fingerprint.go index 495539e94..46e51ecfd 100644 --- a/pkg/util/fingerprint.go +++ b/pkg/util/fingerprint.go @@ -15,5 +15,5 @@ package util func Fingerprint(obj interface{}) string { - return HashIntoString(serialize(obj)) + return HashIntoString(serializeRepeatable(obj)) } diff --git a/pkg/util/fs.go b/pkg/util/fs.go index 89a4e7026..7e0843089 100644 --- a/pkg/util/fs.go +++ b/pkg/util/fs.go @@ -15,9 +15,6 @@ package util import ( - log "github.com/golang/glog" - // log "k8s.io/klog" - "io/ioutil" "os" "path/filepath" @@ -55,7 +52,6 @@ func ReadFilesIntoMap(path string, isOurFile func(string) bool) map[string]strin file := matches[i] if isOurFile(file) { // Pick our files only - log.V(2).Infof("Reading file %s\n", file) if content, err := ioutil.ReadFile(file); (err == nil) && (len(content) > 0) { // File content read successfully and file has some content if files == nil { diff --git a/pkg/util/hash.go b/pkg/util/hash.go index 209e2a0fa..977d333db 100644 --- a/pkg/util/hash.go +++ b/pkg/util/hash.go @@ -21,9 +21,12 @@ import ( "encoding/hex" "fmt" "hash/fnv" + + dumper "github.com/sanity-io/litter" + // "github.com/davecgh/go-spew/spew" ) -func serialize(obj interface{}) []byte { +func serializeUnrepeatable(obj interface{}) []byte { b := bytes.Buffer{} encoder := gob.NewEncoder(&b) err := encoder.Encode(obj) @@ -34,6 +37,15 @@ func serialize(obj interface{}) []byte { return b.Bytes() } +func serializeRepeatable(obj interface{}) []byte { + //s := spew.NewDefaultConfig() + //s.SortKeys = true + d := dumper.Options{ + Separator: " ", + } + return []byte(d.Sdump(obj)) +} + func HashIntoString(b []byte) string { hasher := sha1.New() hasher.Write(b) diff --git a/pkg/util/k8s.go b/pkg/util/k8s.go index 2aa712f3e..97f85e517 100644 --- a/pkg/util/k8s.go +++ b/pkg/util/k8s.go @@ -20,6 +20,10 @@ func NamespaceName(meta v1.ObjectMeta) (string, string) { return meta.Namespace, meta.Name } +func NamespaceNameString(meta v1.ObjectMeta) string { + return meta.Namespace + "/" + meta.Name +} + // IsAnnotationToBeSkipped checks whether an annotation should be skipped func IsAnnotationToBeSkipped(annotation string) bool { switch annotation { diff --git a/pkg/util/map.go b/pkg/util/map.go index 4a1d664eb..80184e555 100644 --- a/pkg/util/map.go +++ b/pkg/util/map.go @@ -108,6 +108,48 @@ func MergeStringMapsPreserve(dst, src map[string]string, keys ...string) map[str } } +// SubtractStringMaps subtracts "delta" from "base" by keys +func SubtractStringMaps(base, delta map[string]string) map[string]string { + if len(delta) == 0 { + // Nothing to delete + return base + } + if len(base) == 0 { + // Nowhere to delete from + return base + } + + // Extract keys from delta and delete them from base + for _, key := range delta { + if _, ok := base[key]; ok { + delete(base, key) + } + } + + return base +} + +// MapDeleteKeys deletes multiple keys from the map +func MapDeleteKeys(base map[string]string, keys ...string) map[string]string { + if len(keys) == 0 { + // Nothing to delete + return base + } + if len(base) == 0 { + // Nowhere to delete from + return base + } + + // Extract delete keys from base + for _, key := range keys { + if _, ok := base[key]; ok { + delete(base, key) + } + } + + return base +} + // MapHasKeys checks whether map has all keys from specified list func MapHasKeys(m map[string]string, keys ...string) bool { for _, needle := range keys { diff --git a/pkg/util/retry.go b/pkg/util/retry.go index 143b3114c..a4db22570 100644 --- a/pkg/util/retry.go +++ b/pkg/util/retry.go @@ -15,14 +15,11 @@ package util import ( - log "github.com/golang/glog" - // log "k8s.io/klog" - "time" ) // Retry -func Retry(tries int, desc string, f func() error) error { +func Retry(tries int, desc string, f func() error, log func(format string, args ...interface{})) error { var err error for try := 1; try <= tries; try++ { err = f() @@ -30,7 +27,7 @@ func Retry(tries int, desc string, f func() error) error { // All ok, no need to retry more if try > 1 { // Done, but after some retries, this is not 'clean' - log.V(1).Infof("DONE attempt %d of %d: %s", try, tries, desc) + log("DONE attempt %d of %d: %s", try, tries, desc) } return nil } @@ -38,16 +35,16 @@ func Retry(tries int, desc string, f func() error) error { if try < tries { // Try failed, need to sleep and retry seconds := try * 5 - log.V(1).Infof("FAILED attempt %d of %d, sleep %d sec and retry: %s", try, tries, seconds, desc) + log("FAILED attempt %d of %d, sleep %d sec and retry: %s", try, tries, seconds, desc) select { case <-time.After(time.Duration(seconds) * time.Second): } } else if tries == 1 { // On single try do not put so much emotion. It just failed and user is not intended to retry - log.V(1).Infof("FAILED single try. No retries will be made for %s", desc) + log("FAILED single try. No retries will be made for %s", desc) } else { // On last try no need to wait more - log.V(1).Infof("FAILED AND ABORT. All %d attempts: %s", tries, desc) + log("FAILED AND ABORT. All %d attempts: %s", tries, desc) } } diff --git a/pkg/util/runtime.go b/pkg/util/runtime.go new file mode 100644 index 000000000..d34bb0b98 --- /dev/null +++ b/pkg/util/runtime.go @@ -0,0 +1,47 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "path" + "runtime" + "strings" +) + +func Caller(skip string) (string, int, string) { + pc := make([]uintptr, 7) + n := runtime.Callers(2, pc) + frames := runtime.CallersFrames(pc[:n]) + for { + frame, more := frames.Next() + // frame.File = /tmp/sandbox469341579/prog.go + // frame.Line = 28 + // frame.Function = main.Announcer.Info + + // file = prog.go + file := path.Base(frame.File) + // function = Info + function := path.Base(strings.Replace(frame.Function, ".", "/", -1)) + + if file != skip { + return file, frame.Line, function + } + + if !more { + break + } + } + return "", 0, "" +} diff --git a/release b/release index 54d1a4f2a..c37136a84 100644 --- a/release +++ b/release @@ -1 +1 @@ -0.13.0 +0.13.5 diff --git a/tests/configs/test-017-multi-version.yaml b/tests/configs/test-017-multi-version.yaml index 09ae4f01e..df122b7bf 100644 --- a/tests/configs/test-017-multi-version.yaml +++ b/tests/configs/test-017-multi-version.yaml @@ -25,7 +25,7 @@ spec: - templates: podTemplate: v20.8 files: - remove_database_ordinary.xml: | + users.d/remove_database_ordinary.xml: | diff --git a/tests/kubectl.py b/tests/kubectl.py index 26fe99873..1d3d8c1e7 100644 --- a/tests/kubectl.py +++ b/tests/kubectl.py @@ -4,7 +4,7 @@ import manifest import util -from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module +from testflows.core import TestScenario, Name, When, Then, Given, And, main, Module from testflows.asserts import error from testflows.connect import Shell diff --git a/tests/requirements.txt b/tests/requirements.txt index 28afadbc5..0aeafdbee 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1,3 +1,2 @@ -testflows>=1.3.44,<1.4 -testflows.core>=1.3,<1.4 +testflows==1.6.72 PyYAML \ No newline at end of file diff --git a/tests/test.py b/tests/test.py index b8a1e5792..146a79705 100644 --- a/tests/test.py +++ b/tests/test.py @@ -4,7 +4,7 @@ import test_clickhouse import util -from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module, TE, args, Fail, Error +from testflows.core import TestScenario, Name, When, Then, Given, And, main, Scenario, Module, TE, args, Fail, Error from testflows.asserts import error if main(): @@ -37,6 +37,7 @@ # python3 tests/test.py --only operator* xfails = { + "/main/operator/test_009. Test operator upgrade": [(Fail, "May fail due to label changes")], "/main/operator/test_022. Test that chi with broken image can be deleted": [(Error, "Not supported yet. Timeout")], "/main/operator/test_024. Test annotations for various template types/PV annotations should be populated": [(Fail, "Not supported yet")], } @@ -49,7 +50,7 @@ test_operator.test_006, test_operator.test_007, test_operator.test_008, - (test_operator.test_009, {"version_from": "0.12.0"}), + (test_operator.test_009, {"version_from": "0.13.0"}), test_operator.test_010, test_operator.test_011, test_operator.test_011_1, @@ -63,10 +64,10 @@ test_operator.test_019, test_operator.test_020, test_operator.test_021, - test_operator.test_022, test_operator.test_023, test_operator.test_024, test_operator.test_025, + test_operator.test_022, # this should go last while failing ] run_tests = all_tests @@ -76,9 +77,9 @@ for t in run_tests: if callable(t): - run(test=t) + Scenario(test=t)() else: - run(test=t[0], args=t[1]) + Scenario(test=t[0], args=t[1])() # python3 tests/test.py --only clickhouse* with Module("clickhouse"): @@ -93,4 +94,4 @@ # run_test = [test_ch_002] for t in run_test: - run(test=t) + Scenario(test=t)() diff --git a/tests/test_clickhouse.py b/tests/test_clickhouse.py index 01b0189d7..43ddccdab 100644 --- a/tests/test_clickhouse.py +++ b/tests/test_clickhouse.py @@ -3,13 +3,13 @@ import settings from test_operator import require_zookeeper -from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module, TE +from testflows.core import TestScenario, Name, When, Then, Given, And, main, Scenario, Module, TE from testflows.asserts import error @TestScenario @Name("test_ch_001. Insert quorum") -def test_ch_001(): +def test_ch_001(self): require_zookeeper() create_and_check( @@ -114,7 +114,7 @@ def test_ch_001(): @TestScenario @Name("test_ch_002. Row-level security") -def test_ch_002(): +def test_ch_002(self): create_and_check( "configs/test-ch-002-row-level.yaml", { diff --git a/tests/test_metrics_exporter.py b/tests/test_metrics_exporter.py index 412c1c40e..597cebfaa 100644 --- a/tests/test_metrics_exporter.py +++ b/tests/test_metrics_exporter.py @@ -2,7 +2,7 @@ import re import json -from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module, TE +from testflows.core import TestScenario, Name, When, Then, Given, And, main, Scenario, Module, TE from testflows.asserts import error import kubectl @@ -17,18 +17,18 @@ def set_metrics_exporter_version(version, ns=settings.operator_namespace): @TestScenario @Name("Check metrics server setup and version") -def test_metrics_exporter_setup(): +def test_metrics_exporter_setup(self): with Given("clickhouse-operator is installed"): assert kubectl.get_count("pod", ns='--all-namespaces', label="-l app=clickhouse-operator") > 0, error() - with And(f"Set metrics-exporter version {settings.operator_version}"): + with Then(f"Set metrics-exporter version {settings.operator_version}"): set_metrics_exporter_version(settings.operator_version) @TestScenario @Name("Check metrics server state after reboot") -def test_metrics_exporter_reboot(): +def test_metrics_exporter_reboot(self): def check_monitoring_chi(operator_namespace, operator_pod, expect_result, max_retries=10): - with And(f"metrics-exporter /chi enpoint result should return {expect_result}"): + with Then(f"metrics-exporter /chi enpoint result should return {expect_result}"): for i in range(1, max_retries): # check /metrics for try to refresh monitored instances kubectl.launch( @@ -89,9 +89,9 @@ def check_monitoring_chi(operator_namespace, operator_pod, expect_result, max_re @TestScenario @Name("Check metrics server help with different clickhouse version") -def test_metrics_exporter_with_multiple_clickhouse_version(): +def test_metrics_exporter_with_multiple_clickhouse_version(self): def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, max_retries=10): - with And(f"metrics-exporter /metrics enpoint result should match with {expect_result}"): + with Then(f"metrics-exporter /metrics enpoint result should match with {expect_result}"): for i in range(1, max_retries): out = kubectl.launch( f"exec {operator_pod} -c metrics-exporter -- wget -O- -q http://127.0.0.1:8888/metrics", @@ -127,9 +127,9 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma config=config, check={ "object_counts": { - "statefulset": 4, - "pod": 4, - "service": 5, + "statefulset": 2, + "pod": 2, + "service": 3, }, "do_not_delete": True, }) @@ -139,9 +139,6 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma '# TYPE chi_clickhouse_metric_VersionInteger gauge': True, 'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-0-0': True, 'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-1-0': True, - 'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-2-0': True, - 'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-3-0': True, - }) with Then("check empty /metrics after delete namespace"): @@ -159,4 +156,4 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma test_metrics_exporter_with_multiple_clickhouse_version, ] for t in test_cases: - run(test=t, flags=TE) + Scenario(test=t, flags=TE)() diff --git a/tests/test_operator.py b/tests/test_operator.py index 39a23d966..a3f682da1 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -6,13 +6,13 @@ import util import manifest -from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module, TE +from testflows.core import TestScenario, Name, When, Then, Given, And, main, Scenario, Module, TE from testflows.asserts import error @TestScenario @Name("test_001. 1 node") -def test_001(): +def test_001(self): kubectl.create_and_check( config="configs/test-001.yaml", check={ @@ -28,7 +28,7 @@ def test_001(): @TestScenario @Name("test_002. useTemplates for pod, volume templates, and distribution") -def test_002(): +def test_002(self): kubectl.create_and_check( config="configs/test-002-tpl.yaml", check={ @@ -49,7 +49,7 @@ def test_002(): @TestScenario @Name("test_003. 4 nodes with custom layout definition") -def test_003(): +def test_003(self): kubectl.create_and_check( config="configs/test-003-complex-layout.yaml", check={ @@ -64,7 +64,7 @@ def test_003(): @TestScenario @Name("test_004. Compatibility test if old syntax with volumeClaimTemplate is still supported") -def test_004(): +def test_004(self): kubectl.create_and_check( config="configs/test-004-tpl.yaml", check={ @@ -78,7 +78,7 @@ def test_004(): @TestScenario @Name("test_005. Test manifest created by ACM") -def test_005(): +def test_005(self): kubectl.create_and_check( config="configs/test-005-acm.yaml", check={ @@ -93,7 +93,7 @@ def test_005(): @TestScenario @Name("test_006. Test clickhouse version upgrade from one version to another using podTemplate change") -def test_006(): +def test_006(self): old_version = "yandex/clickhouse-server:20.8.6.6" new_version = "yandex/clickhouse-server:20.8.7.15" with Then("Create initial position"): @@ -128,7 +128,7 @@ def test_006(): @TestScenario @Name("test_007. Test template with custom clickhouse ports") -def test_007(): +def test_007(self): kubectl.create_and_check( config="configs/test-007-custom-ports.yaml", check={ @@ -160,11 +160,12 @@ def test_operator_upgrade(config, version_from, version_to=settings.operator_ver with When(f"upgrade operator to {version_to}"): set_operator_version(version_to, timeout=120) - time.sleep(5) + time.sleep(10) kubectl.wait_chi_status(chi, "Completed", retries=6) kubectl.wait_objects(chi, {"statefulset": 1, "pod": 1, "service": 2}) - new_start_time = kubectl.get_field("pod", f"chi-{chi}-{chi}-0-0-0", ".status.startTime") - assert start_time == new_start_time + with Then("ClickHouse pods should not be restarted"): + new_start_time = kubectl.get_field("pod", f"chi-{chi}-{chi}-0-0-0", ".status.startTime") + assert start_time == new_start_time kubectl.delete_chi(chi) @@ -209,7 +210,7 @@ def test_operator_restart(config, version=settings.operator_version): @TestScenario @Name("test_008. Test operator restart") -def test_008(): +def test_008(self): with Then("Test simple chi for operator restart"): test_operator_restart("configs/test-008-operator-restart-1.yaml") with Then("Test advanced chi for operator restart"): @@ -218,7 +219,7 @@ def test_008(): @TestScenario @Name("test_009. Test operator upgrade") -def test_009(version_from="0.11.0", version_to=settings.operator_version): +def test_009(self, version_from="0.11.0", version_to=settings.operator_version): with Then("Test simple chi for operator upgrade"): test_operator_upgrade("configs/test-009-operator-upgrade-1.yaml", version_from, version_to) with Then("Test advanced chi for operator upgrade"): @@ -254,7 +255,7 @@ def require_zookeeper(): @TestScenario @Name("test_010. Test zookeeper initialization") -def test_010(): +def test_010(self): set_operator_version(settings.operator_version) require_zookeeper() @@ -278,7 +279,7 @@ def test_010(): @TestScenario @Name("test_011. Test user security and network isolation") -def test_011(): +def test_011(self): with Given("test-011-secured-cluster.yaml and test-011-insecured-cluster.yaml"): kubectl.create_and_check( config="configs/test-011-secured-cluster.yaml", @@ -385,7 +386,7 @@ def test_011(): @TestScenario @Name("test_011_1. Test default user security") -def test_011_1(): +def test_011_1(self): with Given("test-011-secured-default.yaml with password_sha256_hex for default user"): kubectl.create_and_check( config="configs/test-011-secured-default.yaml", @@ -443,7 +444,7 @@ def test_011_1(): @TestScenario @Name("test_012. Test service templates") -def test_012(): +def test_012(self): kubectl.create_and_check( config="configs/test-012-service-template.yaml", check={ @@ -489,7 +490,7 @@ def test_012(): @TestScenario @Name("test_013. Test adding shards and creating local and distributed tables automatically") -def test_013(): +def test_013(self): config = "configs/test-013-add-shards-1.yaml" chi = manifest.get_chi_name(util.get_full_path(config)) cluster = "default" @@ -624,7 +625,7 @@ def test_013(): @TestScenario @Name("test_014. Test that replication works") -def test_014(): +def test_014(self): require_zookeeper() create_table = """ @@ -784,7 +785,7 @@ def test_014(): @TestScenario @Name("test_015. Test circular replication with hostNetwork") -def test_015(): +def test_015(self): kubectl.create_and_check( config="configs/test-015-host-network.yaml", check={ @@ -824,7 +825,7 @@ def test_015(): @TestScenario @Name("test_016. Test advanced settings options") -def test_016(): +def test_016(self): chi = "test-016-settings" kubectl.create_and_check( config="configs/test-016-settings-01.yaml", @@ -943,7 +944,7 @@ def test_016(): @TestScenario @Name("test_017. Test deployment of multiple versions in a cluster") -def test_017(): +def test_017(self): pod_count = 2 kubectl.create_and_check( config="configs/test-017-multi-version.yaml", @@ -981,7 +982,7 @@ def test_017(): @TestScenario @Name("test_018. Test that configuration is properly updated") -def test_018(): # Obsolete, covered by test_016 +def test_018(self): # Obsolete, covered by test_016 kubectl.create_and_check( config="configs/test-018-configmap.yaml", check={ @@ -1018,7 +1019,7 @@ def test_018(): # Obsolete, covered by test_016 @TestScenario @Name("test_019. Test that volume is correctly retained and can be re-attached") -def test_019(): +def test_019(self): require_zookeeper() config="configs/test-019-retain-volume.yaml" @@ -1062,7 +1063,7 @@ def test_019(): ) with Then("PVC should be re-mounted"): - with And("Non-replicated table should have data"): + with Then("Non-replicated table should have data"): out = clickhouse.query(chi, sql="select a from t1") assert out == "1" with And("Replicated table should have data"): @@ -1096,7 +1097,7 @@ def test_019(): ) with Then("Data should be in place"): - with And("Non-replicated table should have data"): + with Then("Non-replicated table should have data"): out = clickhouse.query(chi, sql="select a from t1") assert out == "1" with And("Replicated table should have data"): @@ -1109,7 +1110,8 @@ def test_019(): @TestScenario @Name("test_020. Test multi-volume configuration") -def test_020(config="configs/test-020-multi-volume.yaml"): +def test_020(self): + config="configs/test-020-multi-volume.yaml" chi = manifest.get_chi_name(util.get_full_path(config)) kubectl.create_and_check( config=config, @@ -1143,7 +1145,9 @@ def test_020(config="configs/test-020-multi-volume.yaml"): @TestScenario @Name("test_021. Test rescaling storage") -def test_021(config="configs/test-021-rescale-volume-01.yaml"): +def test_021(self): + config = "configs/test-021-rescale-volume-01.yaml" + with Given("Default storage class is expandable"): default_storage_class = kubectl.get_default_storage_class() assert default_storage_class is not None @@ -1197,6 +1201,7 @@ def test_021(config="configs/test-021-rescale-volume-01.yaml"): size = kubectl.get_pvc_size("disk1-chi-test-021-rescale-volume-simple-0-0-0") assert size == "200Mi" kubectl.wait_object("pvc", "disk2-chi-test-021-rescale-volume-simple-0-0-0") + kubectl.wait_field("pvc", "disk2-chi-test-021-rescale-volume-simple-0-0-0", ".status.phase", "Bound") size = kubectl.get_pvc_size("disk2-chi-test-021-rescale-volume-simple-0-0-0") assert size == "50Mi" @@ -1204,8 +1209,12 @@ def test_021(config="configs/test-021-rescale-volume-01.yaml"): kubectl.wait_pod_status("chi-test-021-rescale-volume-simple-0-0-0", "Running") # ClickHouse requires some time to mount volume. Race conditions. # TODO: wait for proper pod state and check the liveness probe probably. This is better than waiting - time.sleep(10) - out = clickhouse.query(chi, "SELECT count() FROM system.disks") + for i in range(8): + out = clickhouse.query(chi, "SELECT count() FROM system.disks") + if out == "2": + break + with Then(f"Not ready yet. Wait for {1< +
+ Sanity: The Headless CMS Construction Kit + + +--- + +Litter named for the fact that it outputs *literals*, which you *litter* your output with. As a side benefit, all Litter output is syntactically correct Go. You can use Litter to emit data during debug, and it's also really nice for "snapshot data" in unit tests, since it produces consistent, sorted output. Litter was inspired by [Spew](https://github.com/davecgh/go-spew), but focuses on terseness and readability. + +### Basic example + +This: + +```go +type Person struct { + Name string + Age int + Parent *Person +} + +litter.Dump(Person{ + Name: "Bob", + Age: 20, + Parent: &Person{ + Name: "Jane", + Age: 50, + }, +}) +``` + +will output: + +``` +Person{ + Name: "Bob", + Age: 20, + Parent: &Person{ + Name: "Jane", + Age: 50, + }, +} +``` + +### Use in tests + +Litter is a great alternative to JSON or YAML for providing "snapshots" or example data. For example: + +```go +func TestSearch(t *testing.T) { + result := DoSearch() + + actual := litterOpts.Sdump(result) + expected, err := ioutil.ReadFile("testdata.txt") + if err != nil { + // First run, write test data since it doesn't exist + if !os.IsNotExist(err) { + t.Error(err) + } + ioutil.Write("testdata.txt", actual, 0644) + actual = expected + } + if expected != actual { + t.Errorf("Expected %s, got %s", expected, actual) + } +} +``` + +The first run will use Litter to write the data to `testdata.txt`. On subsequent runs, the test will compare the data. Since Litter always provides a consistent view of a value, you can compare the strings directly. + +### Circular references + +Litter detects circular references or aliasing, and will replace additional references to the same object with aliases. For example: + +```go +type Circular struct { + Self *Circular +} + +selfref := Circular{} +selfref.Self = &selfref + +litter.Dump(selfref) +``` + +will output: + +``` +Circular { // p0 + Self: p0, +} +``` + +## Installation + +```bash +$ go get -u github.com/sanity-io/litter +``` + +## Quick start + +Add this import line to the file you're working in: + +```go +import "github.com/sanity-io/litter" +``` + +To dump a variable with full newlines, indentation, type, and aliasing information, use `Dump` or `Sdump`: + +```go +litter.Dump(myVar1) +str := litter.Sdump(myVar1) +``` + +### `litter.Dump(value, ...)` + +Dumps the data structure to STDOUT. + +### `litter.Sdump(value, ...)` + +Returns the dump as a string + +## Configuration + +You can configure litter globally by modifying the default `litter.Config` + +```go +// Strip all package names from types +litter.Config.StripPackageNames = true + +// Hide private struct fields from dumped structs +litter.Config.HidePrivateFields = true + +// Hide fields matched with given regexp if it is not nil. It is set up to hide fields generate with protoc-gen-go +litter.Config.FieldExclusions = regexp.MustCompile(`^(XXX_.*)$`) + +// Sets a "home" package. The package name will be stripped from all its types +litter.Config.HomePackage = "mypackage" + +// Sets separator used when multiple arguments are passed to Dump() or Sdump(). +litter.Config.Separator = "\n" + +// Use compact output: strip newlines and other unnecessary whitespace +litter.Config.Compact = true +``` + +### `litter.Options` + +Allows you to configure a local configuration of litter to allow for proper compartmentalization of state at the expense of some comfort: + +``` go + sq := litter.Options { + HidePrivateFields: true, + HomePackage: "thispack", + Separator: " ", + } + + sq.Dump("dumped", "with", "local", "settings") +``` + +## Custom dumpers + +Implement the interface Dumper on your types to take control of how your type is dumped. + +``` go +type Dumper interface { + LitterDump(w io.Writer) +} +``` + +Just write your custom dump to the provided stream, using multiple lines divided by `"\n"` if you need. Litter +might indent your output according to context, and optionally decorate your first line with a pointer comment +where appropriate. + +A couple of examples from the test suite: + +``` go +type CustomMultiLineDumper struct {} + +func (cmld *CustomMultiLineDumper) LitterDump(w io.Writer) { + w.Write([]byte("{\n multi\n line\n}")) +} + +type CustomSingleLineDumper int + +func (csld CustomSingleLineDumper) LitterDump(w io.Writer) { + w.Write([]byte("")) +} +```` diff --git a/vendor/github.com/sanity-io/litter/dump.go b/vendor/github.com/sanity-io/litter/dump.go new file mode 100644 index 000000000..54d0fafe6 --- /dev/null +++ b/vendor/github.com/sanity-io/litter/dump.go @@ -0,0 +1,504 @@ +package litter + +import ( + "bytes" + "fmt" + "io" + "os" + "reflect" + "regexp" + "runtime" + "sort" + "strconv" + "strings" +) + +var ( + packageNameStripperRegexp = regexp.MustCompile(`\b[a-zA-Z_]+[a-zA-Z_0-9]+\.`) + compactTypeRegexp = regexp.MustCompile(`\s*([,;{}()])\s*`) +) + +// Dumper is the interface for implementing custom dumper for your types. +type Dumper interface { + LitterDump(w io.Writer) +} + +// Options represents configuration options for litter +type Options struct { + Compact bool + StripPackageNames bool + HidePrivateFields bool + HideZeroValues bool + FieldExclusions *regexp.Regexp + FieldFilter func(reflect.StructField, reflect.Value) bool + HomePackage string + Separator string + StrictGo bool + + // DisablePointerReplacement, if true, disables the replacing of pointer data with variable names + // when it's safe. This is useful for diffing two structures, where pointer variables would cause + // false changes. However, circular graphs are still detected and elided to avoid infinite output. + DisablePointerReplacement bool +} + +// Config is the default config used when calling Dump +var Config = Options{ + StripPackageNames: false, + HidePrivateFields: true, + FieldExclusions: regexp.MustCompile(`^(XXX_.*)$`), // XXX_ is a prefix of fields generated by protoc-gen-go + Separator: " ", +} + +type dumpState struct { + w io.Writer + depth int + config *Options + pointers ptrmap + visitedPointers ptrmap + parentPointers ptrmap + currentPointerName string + homePackageRegexp *regexp.Regexp +} + +func (s *dumpState) write(b []byte) { + if _, err := s.w.Write(b); err != nil { + panic(err) + } +} + +func (s *dumpState) writeString(str string) { + s.write([]byte(str)) +} + +func (s *dumpState) indent() { + if !s.config.Compact { + s.write(bytes.Repeat([]byte(" "), s.depth)) + } +} + +func (s *dumpState) newlineWithPointerNameComment() { + if name := s.currentPointerName; name != "" { + if s.config.Compact { + s.write([]byte(fmt.Sprintf("/*%s*/", name))) + } else { + s.write([]byte(fmt.Sprintf(" // %s\n", name))) + } + s.currentPointerName = "" + return + } + if !s.config.Compact { + s.write([]byte("\n")) + } +} + +func (s *dumpState) dumpType(v reflect.Value) { + typeName := v.Type().String() + if s.config.StripPackageNames { + typeName = packageNameStripperRegexp.ReplaceAllLiteralString(typeName, "") + } else if s.homePackageRegexp != nil { + typeName = s.homePackageRegexp.ReplaceAllLiteralString(typeName, "") + } + if s.config.Compact { + typeName = compactTypeRegexp.ReplaceAllString(typeName, "$1") + } + s.write([]byte(typeName)) +} + +func (s *dumpState) dumpSlice(v reflect.Value) { + s.dumpType(v) + numEntries := v.Len() + if numEntries == 0 { + s.write([]byte("{}")) + if s.config.Compact { + s.write([]byte(";")) + } + s.newlineWithPointerNameComment() + return + } + s.write([]byte("{")) + s.newlineWithPointerNameComment() + s.depth++ + for i := 0; i < numEntries; i++ { + s.indent() + s.dumpVal(v.Index(i)) + if !s.config.Compact || i < numEntries-1 { + s.write([]byte(",")) + } + s.newlineWithPointerNameComment() + } + s.depth-- + s.indent() + s.write([]byte("}")) +} + +func (s *dumpState) dumpStruct(v reflect.Value) { + dumpPreamble := func() { + s.dumpType(v) + s.write([]byte("{")) + s.newlineWithPointerNameComment() + s.depth++ + } + preambleDumped := false + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + vtf := vt.Field(i) + if s.config.HidePrivateFields && vtf.PkgPath != "" || s.config.FieldExclusions != nil && s.config.FieldExclusions.MatchString(vtf.Name) { + continue + } + if s.config.FieldFilter != nil && !s.config.FieldFilter(vtf, v.Field(i)) { + continue + } + if s.config.HideZeroValues && isZeroValue(v.Field(i)) { + continue + } + if !preambleDumped { + dumpPreamble() + preambleDumped = true + } + s.indent() + s.write([]byte(vtf.Name)) + if s.config.Compact { + s.write([]byte(":")) + } else { + s.write([]byte(": ")) + } + s.dumpVal(v.Field(i)) + if !s.config.Compact || i < numFields-1 { + s.write([]byte(",")) + } + s.newlineWithPointerNameComment() + } + if preambleDumped { + s.depth-- + s.indent() + s.write([]byte("}")) + } else { + // There were no fields dumped + s.dumpType(v) + s.write([]byte("{}")) + } +} + +func (s *dumpState) dumpMap(v reflect.Value) { + s.dumpType(v) + s.write([]byte("{")) + s.newlineWithPointerNameComment() + s.depth++ + keys := v.MapKeys() + sort.Sort(mapKeySorter{ + keys: keys, + options: s.config, + }) + numKeys := len(keys) + for i, key := range keys { + s.indent() + s.dumpVal(key) + if s.config.Compact { + s.write([]byte(":")) + } else { + s.write([]byte(": ")) + } + s.dumpVal(v.MapIndex(key)) + if !s.config.Compact || i < numKeys-1 { + s.write([]byte(",")) + } + s.newlineWithPointerNameComment() + } + s.depth-- + s.indent() + s.write([]byte("}")) +} + +func (s *dumpState) dumpFunc(v reflect.Value) { + parts := strings.Split(runtime.FuncForPC(v.Pointer()).Name(), "/") + name := parts[len(parts)-1] + + // Anonymous function + if strings.Count(name, ".") > 1 { + s.dumpType(v) + } else { + if s.config.StripPackageNames { + name = packageNameStripperRegexp.ReplaceAllLiteralString(name, "") + } else if s.homePackageRegexp != nil { + name = s.homePackageRegexp.ReplaceAllLiteralString(name, "") + } + if s.config.Compact { + name = compactTypeRegexp.ReplaceAllString(name, "$1") + } + s.write([]byte(name)) + } +} + +func (s *dumpState) dumpCustom(v reflect.Value) { + // Run the custom dumper buffering the output + buf := new(bytes.Buffer) + dumpFunc := v.MethodByName("LitterDump") + dumpFunc.Call([]reflect.Value{reflect.ValueOf(buf)}) + + // Dump the type + s.dumpType(v) + + if s.config.Compact { + s.write(buf.Bytes()) + return + } + + // Now output the dump taking care to apply the current indentation-level + // and pointer name comments. + var err error + firstLine := true + for err == nil { + var lineBytes []byte + lineBytes, err = buf.ReadBytes('\n') + line := strings.TrimRight(string(lineBytes), " \n") + + if err != nil && err != io.EOF { + break + } + // Do not indent first line + if firstLine { + firstLine = false + } else { + s.indent() + } + s.write([]byte(line)) + + // At EOF we're done + if err == io.EOF { + return + } + s.newlineWithPointerNameComment() + } + panic(err) +} + +func (s *dumpState) dump(value interface{}) { + if value == nil { + printNil(s.w) + return + } + v := reflect.ValueOf(value) + s.dumpVal(v) +} + +func (s *dumpState) descendIntoPossiblePointer(value reflect.Value, f func()) { + canonicalize := true + if isPointerValue(value) { + ptr := value.Pointer() + + // If elision disabled, and this is not a circular reference, don't canonicalize + if s.config.DisablePointerReplacement && s.parentPointers.add(ptr) { + canonicalize = false + } + + // Add to stack of pointers we're recursively descending into + s.parentPointers.add(ptr) + defer s.parentPointers.remove(ptr) + } + + if !canonicalize { + pointerName, _ := s.pointerNameFor(value) + s.currentPointerName = pointerName + f() + return + } + + pointerName, firstVisit := s.pointerNameFor(value) + if pointerName == "" { + f() + return + } + if firstVisit { + s.currentPointerName = pointerName + f() + return + } + s.write([]byte(pointerName)) +} + +func (s *dumpState) dumpVal(value reflect.Value) { + if value.Kind() == reflect.Ptr && value.IsNil() { + s.write([]byte("nil")) + return + } + + v := deInterface(value) + kind := v.Kind() + + // Handle custom dumpers + dumperType := reflect.TypeOf((*Dumper)(nil)).Elem() + if v.Type().Implements(dumperType) { + s.descendIntoPossiblePointer(v, func() { + s.dumpCustom(v) + }) + return + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + s.write([]byte("")) + + case reflect.Bool: + printBool(s.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(s.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(s.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(s.w, v.Float(), 32) + + case reflect.Float64: + printFloat(s.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(s.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(s.w, v.Complex(), 64) + + case reflect.String: + s.write([]byte(strconv.Quote(v.String()))) + + case reflect.Slice: + if v.IsNil() { + printNil(s.w) + break + } + fallthrough + + case reflect.Array: + s.descendIntoPossiblePointer(v, func() { + s.dumpSlice(v) + }) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + printNil(s.w) + } + + case reflect.Ptr: + s.descendIntoPossiblePointer(v, func() { + if s.config.StrictGo { + s.writeString(fmt.Sprintf("(func(v %s) *%s { return &v })(", v.Elem().Type(), v.Elem().Type())) + s.dumpVal(v.Elem()) + s.writeString(")") + } else { + s.writeString("&") + s.dumpVal(v.Elem()) + } + }) + + case reflect.Map: + s.descendIntoPossiblePointer(v, func() { + s.dumpMap(v) + }) + + case reflect.Struct: + s.dumpStruct(v) + + case reflect.Func: + s.dumpFunc(v) + + default: + if v.CanInterface() { + s.writeString(fmt.Sprintf("%v", v.Interface())) + } else { + s.writeString(fmt.Sprintf("%v", v.String())) + } + } +} + +// registers that the value has been visited and checks to see if it is one of the +// pointers we will see multiple times. If it is, it returns a temporary name for this +// pointer. It also returns a boolean value indicating whether this is the first time +// this name is returned so the caller can decide whether the contents of the pointer +// has been dumped before or not. +func (s *dumpState) pointerNameFor(v reflect.Value) (string, bool) { + if isPointerValue(v) { + ptr := v.Pointer() + if info, ok := s.pointers[ptr]; ok { + firstVisit := s.visitedPointers.add(ptr) + return fmt.Sprintf("p%d", info.order), firstVisit + } + } + return "", false +} + +// prepares a new state object for dumping the provided value +func newDumpState(value interface{}, options *Options, writer io.Writer) *dumpState { + result := &dumpState{ + config: options, + pointers: mapReusedPointers(reflect.ValueOf(value)), + w: writer, + } + + if options.HomePackage != "" { + result.homePackageRegexp = regexp.MustCompile(fmt.Sprintf("\\b%s\\.", options.HomePackage)) + } + + return result +} + +// Dump a value to stdout +func Dump(value ...interface{}) { + (&Config).Dump(value...) +} + +// Sdump dumps a value to a string +func Sdump(value ...interface{}) string { + return (&Config).Sdump(value...) +} + +// Dump a value to stdout according to the options +func (o Options) Dump(values ...interface{}) { + for i, value := range values { + state := newDumpState(value, &o, os.Stdout) + if i > 0 { + state.write([]byte(o.Separator)) + } + state.dump(value) + } + _, _ = os.Stdout.Write([]byte("\n")) +} + +// Sdump dumps a value to a string according to the options +func (o Options) Sdump(values ...interface{}) string { + buf := new(bytes.Buffer) + for i, value := range values { + if i > 0 { + _, _ = buf.Write([]byte(o.Separator)) + } + state := newDumpState(value, &o, buf) + state.dump(value) + } + return buf.String() +} + +type mapKeySorter struct { + keys []reflect.Value + options *Options +} + +func (s mapKeySorter) Len() int { + return len(s.keys) +} + +func (s mapKeySorter) Swap(i, j int) { + s.keys[i], s.keys[j] = s.keys[j], s.keys[i] +} + +func (s mapKeySorter) Less(i, j int) bool { + ibuf := new(bytes.Buffer) + jbuf := new(bytes.Buffer) + newDumpState(s.keys[i], s.options, ibuf).dumpVal(s.keys[i]) + newDumpState(s.keys[j], s.options, jbuf).dumpVal(s.keys[j]) + return ibuf.String() < jbuf.String() +} diff --git a/vendor/github.com/sanity-io/litter/go.mod b/vendor/github.com/sanity-io/litter/go.mod new file mode 100644 index 000000000..c1c20c939 --- /dev/null +++ b/vendor/github.com/sanity-io/litter/go.mod @@ -0,0 +1,9 @@ +module github.com/sanity-io/litter + +go 1.14 + +require ( + github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b // indirect + github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0 // indirect + github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312 +) diff --git a/vendor/github.com/sanity-io/litter/go.sum b/vendor/github.com/sanity-io/litter/go.sum new file mode 100644 index 000000000..800ae0053 --- /dev/null +++ b/vendor/github.com/sanity-io/litter/go.sum @@ -0,0 +1,6 @@ +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b h1:XxMZvQZtTXpWMNWK82vdjCLCe7uGMFXdTsJH0v3Hkvw= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0 h1:GD+A8+e+wFkqje55/2fOVnZPkoDIu1VooBWfNrnY8Uo= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312 h1:UsFdQ3ZmlzS0BqZYGxvYaXvFGUbCmPGy8DM7qWJJiIQ= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/sanity-io/litter/pointers.go b/vendor/github.com/sanity-io/litter/pointers.go new file mode 100644 index 000000000..cbb417933 --- /dev/null +++ b/vendor/github.com/sanity-io/litter/pointers.go @@ -0,0 +1,126 @@ +package litter + +import ( + "reflect" + "sort" +) + +// mapReusedPointers takes a structure, and recursively maps all pointers mentioned in the tree, +// detecting circular references, and providing a list of all pointers that was referenced at +// least twice by the provided structure. +func mapReusedPointers(v reflect.Value) ptrmap { + pm := &pointerVisitor{} + pm.consider(v) + return pm.reused +} + +// A map of pointers. +type ( + ptrinfo struct { + order int + } + ptrmap map[uintptr]ptrinfo +) + +// Returns true if contains a pointer. +func (pm *ptrmap) contains(p uintptr) bool { + if *pm != nil { + _, ok := (*pm)[p] + return ok + } + return false +} + +// Removes a pointer. +func (pm *ptrmap) remove(p uintptr) { + if *pm != nil { + delete(*pm, p) + } +} + +// Adds a pointer. +func (pm *ptrmap) add(p uintptr) bool { + if pm.contains(p) { + return false + } + pm.put(p) + return true +} + +// Adds a pointer (slow path). +func (pm *ptrmap) put(p uintptr) { + if *pm == nil { + *pm = make(map[uintptr]ptrinfo, 31) + } + (*pm)[p] = ptrinfo{order: len(*pm)} +} + +type pointerVisitor struct { + pointers ptrmap + reused ptrmap +} + +// Recursively consider v and each of its children, updating the map according to the +// semantics of MapReusedPointers +func (pv *pointerVisitor) consider(v reflect.Value) { + if v.Kind() == reflect.Invalid { + return + } + if isPointerValue(v) && v.Pointer() != 0 { // pointer is 0 for unexported fields + if pv.tryAddPointer(v.Pointer()) { + // No use descending inside this value, since it have been seen before and all its descendants + // have been considered + return + } + } + + // Now descend into any children of this value + switch v.Kind() { + case reflect.Slice, reflect.Array: + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + pv.consider(v.Index(i)) + } + + case reflect.Interface: + pv.consider(v.Elem()) + + case reflect.Ptr: + pv.consider(v.Elem()) + + case reflect.Map: + keys := v.MapKeys() + sort.Sort(mapKeySorter{ + keys: keys, + options: &Config, + }) + for _, key := range keys { + pv.consider(v.MapIndex(key)) + } + + case reflect.Struct: + numFields := v.NumField() + for i := 0; i < numFields; i++ { + pv.consider(v.Field(i)) + } + } +} + +// addPointer to the pointerMap, update reusedPointers. Returns true if pointer was reused +func (pv *pointerVisitor) tryAddPointer(p uintptr) bool { + // Is this allready known to be reused? + if pv.reused.contains(p) { + return true + } + + // Have we seen it once before? + if pv.pointers.contains(p) { + // Add it to the register of pointers we have seen more than once + pv.reused.add(p) + return true + } + + // This pointer was new to us + pv.pointers.add(p) + return false +} diff --git a/vendor/github.com/sanity-io/litter/print.go b/vendor/github.com/sanity-io/litter/print.go new file mode 100644 index 000000000..700646dd3 --- /dev/null +++ b/vendor/github.com/sanity-io/litter/print.go @@ -0,0 +1,44 @@ +package litter + +import ( + "io" + "strconv" +) + +func printBool(w io.Writer, value bool) { + if value { + w.Write([]byte("true")) + return + } + w.Write([]byte("false")) +} + +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +func printComplex(w io.Writer, c complex128, floatPrecision int) { + w.Write([]byte("complex")) + printInt(w, int64(floatPrecision*2), 10) + r := real(c) + w.Write([]byte("(")) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write([]byte("+")) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write([]byte("i)")) +} + +func printNil(w io.Writer) { + w.Write([]byte("nil")) +} diff --git a/vendor/github.com/sanity-io/litter/util.go b/vendor/github.com/sanity-io/litter/util.go new file mode 100644 index 000000000..58be4751e --- /dev/null +++ b/vendor/github.com/sanity-io/litter/util.go @@ -0,0 +1,28 @@ +package litter + +import ( + "reflect" +) + +// deInterface returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func deInterface(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +func isPointerValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return true + } + return false +} + +func isZeroValue(v reflect.Value) bool { + return (isPointerValue(v) && v.IsNil()) || + (v.IsValid() && v.CanInterface() && reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface())) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 774eb3824..e8eadfc4e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -70,6 +70,8 @@ github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util # github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a github.com/r3labs/diff +# github.com/sanity-io/litter v1.3.0 +github.com/sanity-io/litter # github.com/satori/go.uuid v1.1.0 github.com/satori/go.uuid # github.com/spf13/pflag v1.0.5