From 2764c9c218bb1660df6130d3d8b2dbca2eebc732 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 21 Jan 2021 12:14:05 +0300 Subject: [PATCH 01/78] 0.13.5 --- release | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release b/release index 54d1a4f2a..c37136a84 100644 --- a/release +++ b/release @@ -1 +1 @@ -0.13.0 +0.13.5 From 2e2ff16ff91bf99ff2e9b9c9d0ed54660a50fa47 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 21 Jan 2021 12:15:41 +0300 Subject: [PATCH 02/78] env: build yamls --- .../dev/clickhouse-operator-install-dev.yaml | 18 +++++++++--------- ...clickhouse-operator-install-deployment.yaml | 18 +++++++++--------- .../operator/clickhouse-operator-install.yaml | 18 +++++++++--------- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/deploy/dev/clickhouse-operator-install-dev.yaml b/deploy/dev/clickhouse-operator-install-dev.yaml index c4113af18..11ff2b733 100644 --- a/deploy/dev/clickhouse-operator-install-dev.yaml +++ b/deploy/dev/clickhouse-operator-install-dev.yaml @@ -1473,7 +1473,7 @@ subjects: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-files # apiVersion: v1 @@ -1611,7 +1611,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-confd-files # apiVersion: v1 @@ -1626,7 +1626,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-configd-files # apiVersion: v1 @@ -1682,7 +1682,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-templatesd-files # apiVersion: v1 @@ -1781,7 +1781,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-usersd-files # apiVersion: v1 @@ -1830,8 +1830,8 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.13.0 -# altinity/metrics-exporter:0.13.0 +# altinity/clickhouse-operator:0.13.5 +# altinity/metrics-exporter:0.13.5 # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -1874,7 +1874,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.13.0 + image: altinity/clickhouse-operator:0.13.5 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -1939,7 +1939,7 @@ spec: resource: limits.memory - name: metrics-exporter - image: altinity/metrics-exporter:0.13.0 + image: altinity/metrics-exporter:0.13.5 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder diff --git a/deploy/operator/clickhouse-operator-install-deployment.yaml b/deploy/operator/clickhouse-operator-install-deployment.yaml index 7f9e9a444..bd5cd9a9d 100644 --- a/deploy/operator/clickhouse-operator-install-deployment.yaml +++ b/deploy/operator/clickhouse-operator-install-deployment.yaml @@ -1,7 +1,7 @@ # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-files # apiVersion: v1 @@ -138,7 +138,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-confd-files # apiVersion: v1 @@ -152,7 +152,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-configd-files # apiVersion: v1 @@ -207,7 +207,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-templatesd-files # apiVersion: v1 @@ -305,7 +305,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-usersd-files # apiVersion: v1 @@ -353,8 +353,8 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.13.0 -# altinity/metrics-exporter:0.13.0 +# altinity/clickhouse-operator:0.13.5 +# altinity/metrics-exporter:0.13.5 # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -396,7 +396,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.13.0 + image: altinity/clickhouse-operator:0.13.5 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -461,7 +461,7 @@ spec: resource: limits.memory - name: metrics-exporter - image: altinity/metrics-exporter:0.13.0 + image: altinity/metrics-exporter:0.13.5 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder diff --git a/deploy/operator/clickhouse-operator-install.yaml b/deploy/operator/clickhouse-operator-install.yaml index a34a5b4c8..cf765609d 100644 --- a/deploy/operator/clickhouse-operator-install.yaml +++ b/deploy/operator/clickhouse-operator-install.yaml @@ -1473,7 +1473,7 @@ subjects: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-files # apiVersion: v1 @@ -1611,7 +1611,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-confd-files # apiVersion: v1 @@ -1626,7 +1626,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-configd-files # apiVersion: v1 @@ -1682,7 +1682,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-templatesd-files # apiVersion: v1 @@ -1781,7 +1781,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.13.0 +# altinity/clickhouse-operator:0.13.5 # etc-clickhouse-operator-usersd-files # apiVersion: v1 @@ -1830,8 +1830,8 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.13.0 -# altinity/metrics-exporter:0.13.0 +# altinity/clickhouse-operator:0.13.5 +# altinity/metrics-exporter:0.13.5 # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -1874,7 +1874,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.13.0 + image: altinity/clickhouse-operator:0.13.5 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -1939,7 +1939,7 @@ spec: resource: limits.memory - name: metrics-exporter - image: altinity/metrics-exporter:0.13.0 + image: altinity/metrics-exporter:0.13.5 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder From f515fcc6d1b2f96115f5b2c85274a54d59b0d8ca Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 21 Jan 2021 12:58:42 +0300 Subject: [PATCH 03/78] dev: unify getStatefulSet* functions --- pkg/controller/chi/deleter.go | 4 ++-- pkg/controller/chi/getter.go | 17 ++++++++++++++++- pkg/controller/chi/worker.go | 28 ++++++++++++++++++---------- pkg/model/schemer.go | 2 +- 4 files changed, 37 insertions(+), 14 deletions(-) diff --git a/pkg/controller/chi/deleter.go b/pkg/controller/chi/deleter.go index 1f35f9c99..4bb0568e6 100644 --- a/pkg/controller/chi/deleter.go +++ b/pkg/controller/chi/deleter.go @@ -116,7 +116,7 @@ func (c *Controller) deleteStatefulSet(host *chop.ChiHost) error { log.V(1).Infof("deleteStatefulSet(%s/%s)", namespace, name) - statefulSet, err := c.getStatefulSetByHost(host) + statefulSet, err := c.getStatefulSet(host) if err != nil { if apierrors.IsNotFound(err) { log.V(1).Infof("NEUTRAL not found StatefulSet %s/%s", namespace, name) @@ -154,7 +154,7 @@ func (c *Controller) syncStatefulSet(host *chop.ChiHost) { for { // TODO // There should be better way to sync cache - if _, err := c.getStatefulSetByHost(host); err == nil { + if _, err := c.getStatefulSet(host); err == nil { log.V(2).Infof("cache NOT yet synced") time.Sleep(15 * time.Second) } else { diff --git a/pkg/controller/chi/getter.go b/pkg/controller/chi/getter.go index 047a1414c..0d090bb1d 100644 --- a/pkg/controller/chi/getter.go +++ b/pkg/controller/chi/getter.go @@ -124,9 +124,24 @@ func (c *Controller) getService(objMeta *meta.ObjectMeta, byNameOnly bool) (*cor return nil, fmt.Errorf("too much objects found %d expecting 1", len(objects)) } +// getStatefulSet gets StatefulSet +func (c *Controller) getStatefulSet(obj interface{}, byName ...bool) (*apps.StatefulSet, error) { + switch typedObj := obj.(type) { + case *meta.ObjectMeta: + var b bool + if len(byName) > 0 { + b = byName[0] + } + return c.getStatefulSetByMeta(typedObj, b) + case *chop.ChiHost: + return c.getStatefulSetByHost(typedObj) + } + return nil, fmt.Errorf("unknown type") +} + // getStatefulSet gets StatefulSet either by namespaced name or by labels // TODO review byNameOnly params -func (c *Controller) getStatefulSet(objMeta *meta.ObjectMeta, byNameOnly bool) (*apps.StatefulSet, error) { +func (c *Controller) getStatefulSetByMeta(objMeta *meta.ObjectMeta, byNameOnly bool) (*apps.StatefulSet, error) { get := c.statefulSetLister.StatefulSets(objMeta.Namespace).Get list := c.statefulSetLister.StatefulSets(objMeta.Namespace).List var objects []*apps.StatefulSet diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index 5ec446b94..b1ad10846 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -554,25 +554,31 @@ func (w *worker) reconcileHost(host *chop.ChiHost) error { return nil } -// Exclude host from ClickHouse clusters func (w *worker) excludeHost(host *chop.ChiHost, status StatefulSetStatus) error { if w.waitExcludeHost(host, status) { w.a.V(1). Info("Exclude from cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) + w.excludeHostFromClickHouseCluster(host) + } - options := chopmodel.NewClickHouseConfigFilesGeneratorOptions(). - SetRemoteServersGeneratorOptions(chopmodel.NewRemoteServersGeneratorOptions(). + return nil +} + +// excludeHostFromClickHouseCluster excludes host from all ClickHouse clusters +func (w *worker) excludeHostFromClickHouseCluster(host *chop.ChiHost) { + // Specify in options to exclude host from ClickHouse config file + options := chopmodel.NewClickHouseConfigFilesGeneratorOptions(). + SetRemoteServersGeneratorOptions( + chopmodel.NewRemoteServersGeneratorOptions(). ExcludeHost(host). ExcludeReconcileAttributes( chop.NewChiHostReconcileAttributes().SetAdd(), ), - ) - - _ = w.reconcileCHIConfigMaps(host.CHI, options, true) // remove host from cluster config only if we are going to wait for exclusion - _ = w.waitHostNotInCluster(host) - } + ) - return nil + // Remove host from cluster config and wait for ClickHouse to pick-up the change + _ = w.reconcileCHIConfigMaps(host.CHI, options, true) + _ = w.waitHostNotInCluster(host) } // determines whether reconciler should wait for host to be excluded from/included into cluster @@ -642,10 +648,12 @@ func (w *worker) includeHost(host *chop.ChiHost, status StatefulSetStatus) error return nil } +// waitHostInCluster waits until host is a member of at least one ClickHouse cluster func (w *worker) waitHostInCluster(host *chop.ChiHost) error { return w.c.pollHost(host, nil, w.schemer.IsHostInCluster) } +// waitHostNotInCluster waits until host is not a member of any ClickHouse clusters func (w *worker) waitHostNotInCluster(host *chop.ChiHost) error { return w.c.pollHost(host, nil, func(host *chop.ChiHost) bool { return !w.schemer.IsHostInCluster(host) @@ -760,7 +768,7 @@ func (w *worker) deleteHost(host *chop.ChiHost) error { WithStatusAction(host.CHI). Info("Delete host %s/%s - started", host.Address.ClusterName, host.Name) - if _, err := w.c.getStatefulSetByHost(host); err != nil { + if _, err := w.c.getStatefulSet(host); err != nil { w.a.WithEvent(host.CHI, eventActionDelete, eventReasonDeleteCompleted). WithStatusAction(host.CHI). Info("Delete host %s/%s - completed StatefulSet not found - already deleted? err: %v", diff --git a/pkg/model/schemer.go b/pkg/model/schemer.go index b055ac3a2..98fa84d34 100644 --- a/pkg/model/schemer.go +++ b/pkg/model/schemer.go @@ -310,7 +310,7 @@ func (s *Schemer) HostCreateTables(host *chop.ChiHost) error { return nil } -// IsHostInCluster +// IsHostInCluster checks whether host is a member of at least one ClickHouse cluster func (s *Schemer) IsHostInCluster(host *chop.ChiHost) bool { sqls := []string{heredoc.Docf( `SELECT throwIf(count()=0) FROM system.clusters WHERE cluster='%s' AND is_local`, From 11cc86bd76d4ff28668dbcd876ef2bf4016c85fc Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 21 Jan 2021 13:33:33 +0300 Subject: [PATCH 04/78] dev: add pod getter --- pkg/controller/chi/getter.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pkg/controller/chi/getter.go b/pkg/controller/chi/getter.go index 0d090bb1d..8301b0bcd 100644 --- a/pkg/controller/chi/getter.go +++ b/pkg/controller/chi/getter.go @@ -196,6 +196,20 @@ func (c *Controller) getStatefulSetByHost(host *chop.ChiHost) (*apps.StatefulSet return c.statefulSetLister.StatefulSets(namespace).Get(name) } +// getPod gets pod for host or StatefulSet +func (c *Controller) getPod(obj interface{}) (*core.Pod, error) { + var name, namespace string + switch typedObj := obj.(type) { + case *chop.ChiHost: + name = chopmodel.CreatePodName(obj) + namespace = typedObj.Address.Namespace + case *apps.StatefulSet: + name = chopmodel.CreatePodName(obj) + namespace = typedObj.Namespace + } + return c.podLister.Pods(namespace).Get(name) +} + // GetCHIByObjectMeta gets CHI by namespaced name func (c *Controller) GetCHIByObjectMeta(objectMeta *meta.ObjectMeta) (*chiv1.ClickHouseInstallation, error) { chiName, err := chopmodel.GetCHINameFromObjectMeta(objectMeta) From 53541486417bcb3e40a91ae8a242d394bf6cd8dd Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 21 Jan 2021 14:56:37 +0300 Subject: [PATCH 05/78] dev: introduce map reducer functions --- pkg/controller/chi/getter.go | 2 +- pkg/util/map.go | 42 ++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/pkg/controller/chi/getter.go b/pkg/controller/chi/getter.go index 8301b0bcd..37d4fd660 100644 --- a/pkg/controller/chi/getter.go +++ b/pkg/controller/chi/getter.go @@ -200,7 +200,7 @@ func (c *Controller) getStatefulSetByHost(host *chop.ChiHost) (*apps.StatefulSet func (c *Controller) getPod(obj interface{}) (*core.Pod, error) { var name, namespace string switch typedObj := obj.(type) { - case *chop.ChiHost: + case *chop.ChiHost: name = chopmodel.CreatePodName(obj) namespace = typedObj.Address.Namespace case *apps.StatefulSet: diff --git a/pkg/util/map.go b/pkg/util/map.go index 4a1d664eb..80184e555 100644 --- a/pkg/util/map.go +++ b/pkg/util/map.go @@ -108,6 +108,48 @@ func MergeStringMapsPreserve(dst, src map[string]string, keys ...string) map[str } } +// SubtractStringMaps subtracts "delta" from "base" by keys +func SubtractStringMaps(base, delta map[string]string) map[string]string { + if len(delta) == 0 { + // Nothing to delete + return base + } + if len(base) == 0 { + // Nowhere to delete from + return base + } + + // Extract keys from delta and delete them from base + for _, key := range delta { + if _, ok := base[key]; ok { + delete(base, key) + } + } + + return base +} + +// MapDeleteKeys deletes multiple keys from the map +func MapDeleteKeys(base map[string]string, keys ...string) map[string]string { + if len(keys) == 0 { + // Nothing to delete + return base + } + if len(base) == 0 { + // Nowhere to delete from + return base + } + + // Extract delete keys from base + for _, key := range keys { + if _, ok := base[key]; ok { + delete(base, key) + } + } + + return base +} + // MapHasKeys checks whether map has all keys from specified list func MapHasKeys(m map[string]string, keys ...string) bool { for _, needle := range keys { From fa5ff529f4617707362fadc7d8e335d15ad520c2 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 21 Jan 2021 15:03:43 +0300 Subject: [PATCH 06/78] dev: reconcider own labeler --- pkg/controller/chi/labeler.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/pkg/controller/chi/labeler.go b/pkg/controller/chi/labeler.go index 900924016..1100b1fb7 100644 --- a/pkg/controller/chi/labeler.go +++ b/pkg/controller/chi/labeler.go @@ -15,6 +15,7 @@ package chi import ( + "github.com/altinity/clickhouse-operator/pkg/util" log "github.com/golang/glog" // log "k8s.io/klog" "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -133,7 +134,14 @@ func (c *Controller) labelMyObjectsTree() { } } +// addLabels adds app and version labels func (c *Controller) addLabels(meta *v1.ObjectMeta) { - meta.Labels[model.LabelAppName] = model.LabelAppValue - meta.Labels[model.LabelCHOP] = c.chop.Version + util.MergeStringMapsOverwrite( + meta.Labels, + // Add the following labels + map[string]string{ + model.LabelAppName: model.LabelAppValue, + model.LabelCHOP: c.chop.Version, + }, + ) } From 118b506fdf6e942cf8a7be75c719dd2ef2ae33fd Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 21 Jan 2021 15:33:15 +0300 Subject: [PATCH 07/78] dev: ready labels operators --- pkg/model/labeler.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go index 9e34a0063..1653b76ad 100644 --- a/pkg/model/labeler.go +++ b/pkg/model/labeler.go @@ -23,10 +23,12 @@ import ( "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" kublabels "k8s.io/apimachinery/pkg/labels" + "time" ) const ( // Kubernetes labels + LabelReadyName = clickhousealtinitycom.GroupName + "/ready" LabelAppName = clickhousealtinitycom.GroupName + "/app" LabelAppValue = "chop" LabelCHOP = clickhousealtinitycom.GroupName + "/chop" @@ -439,3 +441,23 @@ func GetClusterNameFromObjectMeta(meta *meta.ObjectMeta) (string, error) { } return meta.Labels[LabelClusterName], nil } + +// AddLabelReady adds "ready" label with value = UTC now +func AddLabelReady(meta *meta.ObjectMeta) { + if meta == nil { + return + } + util.MergeStringMapsOverwrite( + meta.Labels, + map[string]string{ + LabelReadyName: time.Now().UTC().String(), + }) +} + +// DeleteLabelReady deletes "ready" label +func DeleteLabelReady(meta *meta.ObjectMeta) { + if meta == nil { + return + } + util.MapDeleteKeys(meta.Labels, LabelReadyName) +} From 5ab3e9b5103ce5367146b43a2e75e95fd2c790b9 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 22 Jan 2021 11:34:33 +0300 Subject: [PATCH 08/78] dev: continue with ready labels --- pkg/controller/chi/getter.go | 10 +++++++--- pkg/controller/chi/pods.go | 20 ++++++++++++++++++++ pkg/model/creator.go | 8 ++++---- pkg/model/labeler.go | 30 ++++++++++++++++++++++++++---- 4 files changed, 57 insertions(+), 11 deletions(-) diff --git a/pkg/controller/chi/getter.go b/pkg/controller/chi/getter.go index 37d4fd660..81c70b58d 100644 --- a/pkg/controller/chi/getter.go +++ b/pkg/controller/chi/getter.go @@ -124,7 +124,9 @@ func (c *Controller) getService(objMeta *meta.ObjectMeta, byNameOnly bool) (*cor return nil, fmt.Errorf("too much objects found %d expecting 1", len(objects)) } -// getStatefulSet gets StatefulSet +// getStatefulSet gets StatefulSet. Accepted types: +// 1. *meta.ObjectMeta +// 2. *chop.ChiHost func (c *Controller) getStatefulSet(obj interface{}, byName ...bool) (*apps.StatefulSet, error) { switch typedObj := obj.(type) { case *meta.ObjectMeta: @@ -196,7 +198,9 @@ func (c *Controller) getStatefulSetByHost(host *chop.ChiHost) (*apps.StatefulSet return c.statefulSetLister.StatefulSets(namespace).Get(name) } -// getPod gets pod for host or StatefulSet +// getPod gets pod for host or StatefulSet. Accepted types: +// 1. *apps.StatefulSet +// 2. *chop.ChiHost func (c *Controller) getPod(obj interface{}) (*core.Pod, error) { var name, namespace string switch typedObj := obj.(type) { @@ -207,7 +211,7 @@ func (c *Controller) getPod(obj interface{}) (*core.Pod, error) { name = chopmodel.CreatePodName(obj) namespace = typedObj.Namespace } - return c.podLister.Pods(namespace).Get(name) + return c.kubeClient.CoreV1().Pods(namespace).Get(name, newGetOptions()) } // GetCHIByObjectMeta gets CHI by namespaced name diff --git a/pkg/controller/chi/pods.go b/pkg/controller/chi/pods.go index 21f17d7c0..659a1e148 100644 --- a/pkg/controller/chi/pods.go +++ b/pkg/controller/chi/pods.go @@ -24,6 +24,26 @@ import ( chopmodel "github.com/altinity/clickhouse-operator/pkg/model" ) +func (c *Controller) appendLabelReady(host *chop.ChiHost) error { + pod, err := c.getPod(host) + if err != nil { + return err + } + chopmodel.AppendLabelReady(&pod.ObjectMeta) + _, err = c.kubeClient.CoreV1().Pods(pod.Namespace).Update(pod) + return err +} + +func (c *Controller) deleteLabelReady(host *chop.ChiHost) error { + pod, err := c.getPod(host) + if err != nil { + return err + } + chopmodel.DeleteLabelReady(&pod.ObjectMeta) + _, err = c.kubeClient.CoreV1().Pods(pod.Namespace).Update(pod) + return err +} + func (c *Controller) walkContainers(host *chop.ChiHost, f func(container *v1.Container)) { namespace := host.Address.Namespace name := chopmodel.CreatePodName(host) diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 95e3519dc..ab06d85a8 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -61,7 +61,7 @@ func (c *Creator) CreateServiceCHI() *corev1.Service { c.chi.Namespace, serviceName, c.labeler.getLabelsServiceCHI(), - c.labeler.getSelectorCHIScope(), + c.labeler.getSelectorCHIScopeReady(), ) } else { // Incorrect/unknown .templates.ServiceTemplate specified @@ -88,7 +88,7 @@ func (c *Creator) CreateServiceCHI() *corev1.Service { TargetPort: intstr.FromString(chDefaultTCPPortName), }, }, - Selector: c.labeler.getSelectorCHIScope(), + Selector: c.labeler.getSelectorCHIScopeReady(), Type: corev1.ServiceTypeLoadBalancer, ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeLocal, }, @@ -108,7 +108,7 @@ func (c *Creator) CreateServiceCluster(cluster *chiv1.ChiCluster) *corev1.Servic cluster.Address.Namespace, serviceName, c.labeler.getLabelsServiceCluster(cluster), - c.labeler.getSelectorClusterScope(cluster), + c.labeler.getSelectorClusterScopeReady(cluster), ) } else { return nil @@ -127,7 +127,7 @@ func (c *Creator) CreateServiceShard(shard *chiv1.ChiShard) *corev1.Service { shard.Address.Namespace, serviceName, c.labeler.getLabelsServiceShard(shard), - c.labeler.getSelectorShardScope(shard), + c.labeler.getSelectorShardScopeReady(shard), ) } else { return nil diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go index 1653b76ad..59da6cf34 100644 --- a/pkg/model/labeler.go +++ b/pkg/model/labeler.go @@ -23,12 +23,12 @@ import ( "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" kublabels "k8s.io/apimachinery/pkg/labels" - "time" ) const ( // Kubernetes labels LabelReadyName = clickhousealtinitycom.GroupName + "/ready" + LabelReadyValue = "yes" LabelAppName = clickhousealtinitycom.GroupName + "/app" LabelAppValue = "chop" LabelCHOP = clickhousealtinitycom.GroupName + "/chop" @@ -162,6 +162,11 @@ func (l *Labeler) getSelectorCHIScope() map[string]string { } } +// getSelectorCHIScopeReady gets labels to select a ready-labelled CHI-scoped object +func (l *Labeler) getSelectorCHIScopeReady() map[string]string { + return l.appendReadyLabels(l.getSelectorCHIScope()) +} + // getLabelsClusterScope gets labels for Cluster-scoped object func (l *Labeler) getLabelsClusterScope(cluster *chi.ChiCluster) map[string]string { // Combine generated labels and CHI-provided labels @@ -184,6 +189,11 @@ func (l *Labeler) getSelectorClusterScope(cluster *chi.ChiCluster) map[string]st } } +// getSelectorClusterScope gets labels to select a ready-labelled Cluster-scoped object +func (l *Labeler) getSelectorClusterScopeReady(cluster *chi.ChiCluster) map[string]string { + return l.appendReadyLabels(l.getSelectorClusterScope(cluster)) +} + // getLabelsShardScope gets labels for Shard-scoped object func (l *Labeler) getLabelsShardScope(shard *chi.ChiShard) map[string]string { // Combine generated labels and CHI-provided labels @@ -208,6 +218,11 @@ func (l *Labeler) getSelectorShardScope(shard *chi.ChiShard) map[string]string { } } +// getSelectorShardScope gets labels to select a ready-labelled Shard-scoped object +func (l *Labeler) getSelectorShardScopeReady(shard *chi.ChiShard) map[string]string { + return l.appendReadyLabels(l.getSelectorShardScope(shard)) +} + // getLabelsHostScope gets labels for Host-scoped object func (l *Labeler) getLabelsHostScope(host *chi.ChiHost, applySupplementaryServiceLabels bool) map[string]string { // Combine generated labels and CHI-provided labels @@ -258,6 +273,13 @@ func (l *Labeler) appendCHILabels(dst map[string]string) map[string]string { return util.MergeStringMapsOverwrite(dst, l.chi.Labels) } +// appendReadyLabels appends "Ready" label to labels set +func (l *Labeler) appendReadyLabels(dst map[string]string) map[string]string { + return util.MergeStringMapsOverwrite(dst, map[string]string{ + LabelReadyName: LabelReadyValue, + }) +} + // getAnnotationsHostScope gets annotations for Host-scoped object func (l *Labeler) getAnnotationsHostScope(host *chi.ChiHost) map[string]string { // We may want to append some annotations in here @@ -442,15 +464,15 @@ func GetClusterNameFromObjectMeta(meta *meta.ObjectMeta) (string, error) { return meta.Labels[LabelClusterName], nil } -// AddLabelReady adds "ready" label with value = UTC now -func AddLabelReady(meta *meta.ObjectMeta) { +// AppendLabelReady adds "ready" label with value = UTC now +func AppendLabelReady(meta *meta.ObjectMeta) { if meta == nil { return } util.MergeStringMapsOverwrite( meta.Labels, map[string]string{ - LabelReadyName: time.Now().UTC().String(), + LabelReadyName: LabelReadyValue, }) } From beeace87ec82be8a4f5212e42e5306505994f9e9 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 22 Jan 2021 11:34:46 +0300 Subject: [PATCH 09/78] dev: create tables on host --- pkg/controller/chi/worker.go | 71 +++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 29 deletions(-) diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index b1ad10846..58182dd1e 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -341,18 +341,19 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { func(shard *chop.ChiShard) { }, func(host *chop.ChiHost) { - if update { - w.a.V(1). - WithEvent(new, eventActionCreate, eventReasonCreateStarted). - WithStatusAction(new). - Info("Adding tables on shard/host:%d/%d cluster:%s", host.Address.ShardIndex, host.Address.ReplicaIndex, host.Address.ClusterName) - if err := w.schemer.HostCreateTables(host); err != nil { - w.a.Error("ERROR create tables on host %s. err: %v", host.Name, err) - } - } else { - w.a.V(1). - Info("As CHI is just created, not need to add tables on host %d to shard %d in cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) - } + // + //if update { + // w.a.V(1). + // WithEvent(new, eventActionCreate, eventReasonCreateStarted). + // WithStatusAction(new). + // Info("Adding tables on shard/host:%d/%d cluster:%s", host.Address.ShardIndex, host.Address.ReplicaIndex, host.Address.ClusterName) + // if err := w.schemer.HostCreateTables(host); err != nil { + // w.a.Error("ERROR create tables on host %s. err: %v", host.Name, err) + // } + //} else { + // w.a.V(1). + // Info("As CHI is just created, not need to add tables on host %d to shard %d in cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) + //} }, ) @@ -558,12 +559,17 @@ func (w *worker) excludeHost(host *chop.ChiHost, status StatefulSetStatus) error if w.waitExcludeHost(host, status) { w.a.V(1). Info("Exclude from cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) + w.excludeHostFromService(host) w.excludeHostFromClickHouseCluster(host) } return nil } +func (w *worker) excludeHostFromService(host *chop.ChiHost) { + w.c.deleteLabelReady(host) +} + // excludeHostFromClickHouseCluster excludes host from all ClickHouse clusters func (w *worker) excludeHostFromClickHouseCluster(host *chop.ChiHost) { // Specify in options to exclude host from ClickHouse config file @@ -581,7 +587,7 @@ func (w *worker) excludeHostFromClickHouseCluster(host *chop.ChiHost) { _ = w.waitHostNotInCluster(host) } -// determines whether reconciler should wait for host to be excluded from/included into cluster +// determines whether reconciler should wait for host to be excluded from cluster func (w *worker) waitExcludeHost(host *chop.ChiHost, status StatefulSetStatus) bool { if (status == statefulSetStatusNew) || (status == statefulSetStatusSame) { // No need to wait for new and non-modified StatefulSets @@ -593,20 +599,19 @@ func (w *worker) waitExcludeHost(host *chop.ChiHost, status StatefulSetStatus) b return false } - if host.CHI.IsReconcilingPolicyWait() { + // Check CHI settings + switch { + case host.CHI.IsReconcilingPolicyWait(): return true - } else if host.CHI.IsReconcilingPolicyNoWait() { + case host.CHI.IsReconcilingPolicyNoWait(): return false } - if !w.c.chop.Config().ReconcileWaitExclude { - return false - } - - return true + // Fallback to operator's settings + return w.c.chop.Config().ReconcileWaitExclude } -// determines whether reconciler should wait for host to be excluded from/included into cluster +// determines whether reconciler should wait for host to be included into cluster func (w *worker) waitIncludeHost(host *chop.ChiHost, status StatefulSetStatus) bool { if (status == statefulSetStatusNew) || (status == statefulSetStatusSame) { return false @@ -617,23 +622,29 @@ func (w *worker) waitIncludeHost(host *chop.ChiHost, status StatefulSetStatus) b return false } - if host.CHI.IsReconcilingPolicyWait() { + // Check CHI settings + switch { + case host.CHI.IsReconcilingPolicyWait(): return true - } else if host.CHI.IsReconcilingPolicyNoWait() { + case host.CHI.IsReconcilingPolicyNoWait(): return false } - if w.c.chop.Config().ReconcileWaitInclude == false { - return false - } - - return true + // Fallback to operator's settings + return w.c.chop.Config().ReconcileWaitInclude } // Include host back to ClickHouse clusters func (w *worker) includeHost(host *chop.ChiHost, status StatefulSetStatus) error { w.a.V(1). Info("Include into cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) + w.includeHostIntoClickHouseCluster(host, status) + w.includeHostIntoService(host) + + return nil +} + +func (w *worker) includeHostIntoClickHouseCluster(host *chop.ChiHost, status StatefulSetStatus) { options := chopmodel.NewClickHouseConfigFilesGeneratorOptions(). SetRemoteServersGeneratorOptions(chopmodel.NewRemoteServersGeneratorOptions(). ExcludeReconcileAttributes( @@ -644,8 +655,10 @@ func (w *worker) includeHost(host *chop.ChiHost, status StatefulSetStatus) error if w.waitIncludeHost(host, status) { _ = w.waitHostInCluster(host) } +} - return nil +func (w *worker) includeHostIntoService(host *chop.ChiHost) { + w.c.appendLabelReady(host) } // waitHostInCluster waits until host is a member of at least one ClickHouse cluster From f53d5affc64121ed48a5daf2b7afd025eb5e0cdc Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 22 Jan 2021 16:24:52 +0300 Subject: [PATCH 10/78] dev: split announcer into independent and controller-dependent parties and use it --- cmd/metrics_exporter/app/metrics_exporter.go | 2 +- cmd/operator/app/clickhouse_operator.go | 2 +- pkg/announcer/announcer.go | 129 ++++++++++++ .../clickhouse.altinity.com/v1/type_chi.go | 5 +- .../v1/type_config_chop.go | 30 +-- .../v1/type_settings.go | 13 -- .../v1/type_templates.go | 16 +- pkg/apis/metrics/clickhouse_fetcher.go | 3 +- pkg/apis/metrics/exporter.go | 4 +- pkg/chop/chop.go | 23 +- pkg/chop/config_manager.go | 25 ++- pkg/chop/kube_machinery.go | 12 +- pkg/controller/chi/announcer.go | 198 +++++++++++++----- pkg/controller/chi/controller.go | 108 +++++----- pkg/controller/chi/creator.go | 37 ++-- pkg/controller/chi/deleter.go | 73 ++++--- pkg/controller/chi/event.go | 11 +- pkg/controller/chi/labeler.go | 23 +- pkg/controller/chi/pods.go | 8 +- pkg/controller/chi/poller.go | 36 ++-- pkg/controller/chi/volumes.go | 12 +- pkg/controller/chi/worker.go | 2 +- pkg/model/ch_config_generator.go | 1 + pkg/model/clickhouse/connection.go | 20 +- pkg/model/clickhouse/pool.go | 11 +- pkg/model/creator.go | 29 ++- pkg/model/labeler.go | 8 +- pkg/model/namer.go | 5 +- pkg/model/normalizer.go | 34 +-- pkg/model/schemer.go | 53 +++-- pkg/util/fs.go | 4 - pkg/util/retry.go | 13 +- tests/test.py | 2 +- 33 files changed, 569 insertions(+), 383 deletions(-) create mode 100644 pkg/announcer/announcer.go diff --git a/cmd/metrics_exporter/app/metrics_exporter.go b/cmd/metrics_exporter/app/metrics_exporter.go index a24267e39..af64a69aa 100644 --- a/cmd/metrics_exporter/app/metrics_exporter.go +++ b/cmd/metrics_exporter/app/metrics_exporter.go @@ -95,7 +95,7 @@ func Run() { // Create operator instance chop := chop.GetCHOp(chopClient, chopConfigFile) chop.SetupLog() - chop.Config().WriteToLog() + log.Info(chop.Config().String(true)) exporter := metrics.StartMetricsREST( metrics.NewCHAccessInfo( diff --git a/cmd/operator/app/clickhouse_operator.go b/cmd/operator/app/clickhouse_operator.go index 26b6a0962..272124abf 100644 --- a/cmd/operator/app/clickhouse_operator.go +++ b/cmd/operator/app/clickhouse_operator.go @@ -100,7 +100,7 @@ func Run() { // Create operator instance chop := chop.GetCHOp(chopClient, chopConfigFile) chop.SetupLog() - chop.Config().WriteToLog() + log.Info(chop.Config().String(true)) log.V(1).Infof("Log options parsed\n") diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go new file mode 100644 index 000000000..fcfa966c3 --- /dev/null +++ b/pkg/announcer/announcer.go @@ -0,0 +1,129 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package announcer + +import ( + log "github.com/golang/glog" +) + +// Announcer handler all log/event/status messages going outside of controller/worker +type Announcer struct { + v log.Level + + // writeLog specifies whether to write log file + writeLog bool +} + +// announcer which would be used in top-level functions, can be called as default +var announcer Announcer + +// init creates default announcer +func init() { + announcer = New() +} + +// New creates new announcer +func New() Announcer { + return Announcer{ + writeLog: true, + } +} + +// V is inspired by log.V() +func (a Announcer) V(level log.Level) Announcer { + b := a + b.v = level + b.writeLog = true + return b +} + +// V is inspired by log.V() +func V(level log.Level) Announcer { + return announcer.V(level) +} + +// Info is inspired by log.Infof() +func (a Announcer) Info(format string, args ...interface{}) { + // Produce classic log line + if a.writeLog { + if a.v > 0 { + if len(args) > 0 { + log.V(a.v).Infof(format, args...) + } else { + log.V(a.v).Info(format) + } + } else { + if len(args) > 0 { + log.Infof(format, args...) + } else { + log.Info(format) + } + } + } +} + +// Info is inspired by log.Infof() +func Info(format string, args ...interface{}) { + announcer.Info(format, args...) +} + +// Warning is inspired by log.Warningf() +func (a Announcer) Warning(format string, args ...interface{}) { + // Produce classic log line + if a.writeLog { + if len(args) > 0 { + log.Warningf(format, args...) + } else { + log.Warning(format) + } + } +} + +// Warning is inspired by log.Warningf() +func Warning(format string, args ...interface{}) { + announcer.Warning(format, args...) +} + +// Error is inspired by log.Errorf() +func (a Announcer) Error(format string, args ...interface{}) { + // Produce classic log line + if a.writeLog { + if len(args) > 0 { + log.Errorf(format, args...) + } else { + log.Error(format) + } + } +} + +// Error is inspired by log.Errorf() +func Error(format string, args ...interface{}) { + announcer.Error(format, args...) +} + +// Fatal is inspired by log.Fatalf() +func (a Announcer) Fatal(format string, args ...interface{}) { + // Write and exit + if len(args) > 0 { + log.Fatalf(format, args...) + } else { + log.Fatal(format) + } +} + +// Fatal is inspired by log.Fatalf() +func Fatal(format string, args ...interface{}) { + announcer.Fatal(format, args...) +} diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go index f4c6fa76e..70d3b233d 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go @@ -15,10 +15,11 @@ package v1 import ( - "github.com/altinity/clickhouse-operator/pkg/util" - "github.com/altinity/clickhouse-operator/pkg/version" "math" "strings" + + "github.com/altinity/clickhouse-operator/pkg/util" + "github.com/altinity/clickhouse-operator/pkg/version" ) // fillStatus fills .Status diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go b/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go index c8b23f472..4e8f0fad3 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go @@ -16,11 +16,11 @@ package v1 import ( "bytes" + "fmt" "os" "strings" "time" - log "github.com/golang/glog" // log "k8s.io/klog" "github.com/imdario/mergo" @@ -151,21 +151,23 @@ type OperatorConfig struct { } // MergeFrom merges -func (config *OperatorConfig) MergeFrom(from *OperatorConfig, _type MergeType) { +func (config *OperatorConfig) MergeFrom(from *OperatorConfig, _type MergeType) error { switch _type { case MergeTypeFillEmptyValues: if err := mergo.Merge(config, *from); err != nil { - log.V(1).Infof("FAIL merge config Error: %q", err) + return fmt.Errorf("FAIL merge config Error: %q", err) } case MergeTypeOverrideByNonEmptyValues: if err := mergo.Merge(config, *from, mergo.WithOverride); err != nil { - log.V(1).Infof("FAIL merge config Error: %q", err) + return fmt.Errorf("FAIL merge config Error: %q", err) } } + + return nil } // readCHITemplates build OperatorConfig.CHITemplate from template files content -func (config *OperatorConfig) readCHITemplates() { +func (config *OperatorConfig) readCHITemplates() (errs []error) { // Read CHI template files config.CHITemplateFiles = util.ReadFilesIntoMap(config.CHITemplatesPath, config.isCHITemplateExt) @@ -174,11 +176,13 @@ func (config *OperatorConfig) readCHITemplates() { template := new(ClickHouseInstallation) if err := yaml.Unmarshal([]byte(config.CHITemplateFiles[filename]), template); err != nil { // Unable to unmarshal - skip incorrect template - log.V(1).Infof("FAIL readCHITemplates() unable to unmarshal file %s Error: %q", filename, err) + errs = append(errs, fmt.Errorf("FAIL readCHITemplates() unable to unmarshal file %s Error: %q", filename, err)) continue } config.enlistCHITemplate(template) } + + return } // enlistCHITemplate inserts template into templates catalog @@ -187,7 +191,6 @@ func (config *OperatorConfig) enlistCHITemplate(template *ClickHouseInstallation config.CHITemplates = make([]*ClickHouseInstallation, 0) } config.CHITemplates = append(config.CHITemplates, template) - log.V(1).Infof("enlistCHITemplate(%s/%s)", template.Namespace, template.Name) } // unlistCHITemplate removes template from templates catalog @@ -196,11 +199,9 @@ func (config *OperatorConfig) unlistCHITemplate(template *ClickHouseInstallation return } - log.V(1).Infof("unlistCHITemplate(%s/%s)", template.Namespace, template.Name) // Nullify found template entry for _, _template := range config.CHITemplates { if (_template.Name == template.Name) && (_template.Namespace == template.Namespace) { - log.V(1).Infof("unlistCHITemplate(%s/%s) - found, unlisting", template.Namespace, template.Name) // TODO normalize //config.CHITemplates[i] = nil _template.Name = "" @@ -226,7 +227,6 @@ func (config *OperatorConfig) FindTemplate(use *ChiUseTemplate, namespace string if use.Namespace != "" { // With fully-specified use template direct (full name) only match is applicable, and it is not possible // This is strange situation, however - log.V(1).Infof("STRANGE FindTemplate(%s/%s) - unexpected position", use.Namespace, use.Name) return nil } @@ -249,7 +249,6 @@ func (config *OperatorConfig) FindAutoTemplates() []*ClickHouseInstallation { res = append(res, _template) } } - log.V(3).Infof("Found %d auto templates", len(res)) return res } @@ -452,10 +451,6 @@ func (config *OperatorConfig) applyDefaultWatchNamespace() { // readClickHouseCustomConfigFiles reads all extra user-specified ClickHouse config files func (config *OperatorConfig) readClickHouseCustomConfigFiles() { - log.V(0).Infof("Read Common Config files from folder: %s", config.CHCommonConfigsPath) - log.V(0).Infof("Read Host Config files from folder: %s", config.CHHostConfigsPath) - log.V(0).Infof("Read Users Config files from folder: %s", config.CHUsersConfigsPath) - config.CHCommonConfigs = util.ReadFilesIntoMap(config.CHCommonConfigsPath, config.isCHConfigExt) config.CHHostConfigs = util.ReadFilesIntoMap(config.CHHostConfigsPath, config.isCHConfigExt) config.CHUsersConfigs = util.ReadFilesIntoMap(config.CHUsersConfigsPath, config.isCHConfigExt) @@ -542,11 +537,6 @@ func (config *OperatorConfig) String(hideCredentials bool) string { return b.String() } -// WriteToLog writes OperatorConfig into log -func (config *OperatorConfig) WriteToLog() { - log.V(1).Infof("OperatorConfig:\n%s", config.String(true)) -} - // TODO unify with GetInformerNamespace // IsWatchedNamespace returns whether specified namespace is in a list of watched func (config *OperatorConfig) IsWatchedNamespace(namespace string) bool { diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_settings.go b/pkg/apis/clickhouse.altinity.com/v1/type_settings.go index 5950a2653..571b6c632 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_settings.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_settings.go @@ -23,9 +23,6 @@ import ( "sort" "strconv" "strings" - - log "github.com/golang/glog" - // log "k8s.io/klog" ) const ( @@ -177,7 +174,6 @@ func unmarshalScalar(untyped interface{}) (string, bool) { typeOf := reflect.TypeOf(untyped) if typeOf == nil { // Unable to determine type of the value - log.V(3).Infof("unmarshalScalar() typeOf==nil") return "", false } @@ -220,12 +216,9 @@ func unmarshalScalar(untyped interface{}) (string, bool) { knownType = true } - str := typeOf.String() if knownType { - log.V(3).Infof("unmarshalScalar() type=%v value=%s", str, res) return res, true } else { - log.V(3).Infof("unmarshalScalar() type=%v - UNABLE to unmarshal", str) return "", false } } @@ -238,7 +231,6 @@ func unmarshalVector(untyped interface{}) ([]string, bool) { typeOf := reflect.TypeOf(untyped) if typeOf == nil { // Unable to determine type of the value - log.V(3).Infof("unmarshalVector() typeOf==nil") return nil, false } @@ -253,12 +245,9 @@ func unmarshalVector(untyped interface{}) ([]string, bool) { knownType = true } - str := typeOf.String() if knownType { - log.V(3).Infof("unmarshalVector() type=%v value=%s", str, res) return res, true } else { - log.V(3).Infof("unmarshalVector type=%v - UNABLE to unmarshal", str) return nil, false } } @@ -535,8 +524,6 @@ func string2Section(section string) (SettingsSection, error) { return SectionHost, nil } - log.V(1).Infof("unknown section specified %v", section) - return SectionEmpty, fmt.Errorf("unknown section specified %v", section) } diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_templates.go b/pkg/apis/clickhouse.altinity.com/v1/type_templates.go index f20d51be2..daa877018 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_templates.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_templates.go @@ -15,13 +15,11 @@ package v1 import ( - log "github.com/golang/glog" - // log "k8s.io/klog" - + "fmt" "github.com/imdario/mergo" ) -func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) { +func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) (errs []error) { if from == nil { return } @@ -46,7 +44,7 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) { // Override `to` template with `from` template //templates.PodTemplates[toIndex] = *fromTemplate.DeepCopy() if err := mergo.Merge(toTemplate, *fromTemplate, mergo.WithOverride); err != nil { - log.V(1).Infof("ERROR merge template(%s): %v", toTemplate.Name, err) + errs = append(errs, fmt.Errorf("ERROR merge template(%s): %v", toTemplate.Name, err)) } break } @@ -80,7 +78,7 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) { // Override `to` template with `from` template //templates.PodTemplates[toIndex] = *fromTemplate.DeepCopy() if err := mergo.Merge(toTemplate, *fromTemplate, mergo.WithOverride); err != nil { - log.V(1).Infof("ERROR merge template(%s): %v", toTemplate.Name, err) + errs = append(errs, fmt.Errorf("ERROR merge template(%s): %v", toTemplate.Name, err)) } break } @@ -114,7 +112,7 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) { // Override `to` template with `from` template //templates.VolumeClaimTemplates[toIndex] = *fromTemplate.DeepCopy() if err := mergo.Merge(toTemplate, *fromTemplate, mergo.WithOverride); err != nil { - log.V(1).Infof("ERROR merge template(%s): %v", toTemplate.Name, err) + errs = append(errs, fmt.Errorf("ERROR merge template(%s): %v", toTemplate.Name, err)) } break } @@ -148,7 +146,7 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) { // Override `to` template with `from` template //templates.ServiceTemplates[toIndex] = *fromTemplate.DeepCopy() if err := mergo.Merge(toTemplate, *fromTemplate, mergo.WithOverride); err != nil { - log.V(1).Infof("ERROR merge template(%s): %v", toTemplate.Name, err) + errs = append(errs, fmt.Errorf("ERROR merge template(%s): %v", toTemplate.Name, err)) } break } @@ -161,4 +159,6 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) { } } } + + return } diff --git a/pkg/apis/metrics/clickhouse_fetcher.go b/pkg/apis/metrics/clickhouse_fetcher.go index 9114ee9b0..bc3ffa3bf 100644 --- a/pkg/apis/metrics/clickhouse_fetcher.go +++ b/pkg/apis/metrics/clickhouse_fetcher.go @@ -16,10 +16,11 @@ package metrics import ( sqlmodule "database/sql" + "time" "github.com/MakeNowJust/heredoc" + "github.com/altinity/clickhouse-operator/pkg/model/clickhouse" - "time" ) const ( diff --git a/pkg/apis/metrics/exporter.go b/pkg/apis/metrics/exporter.go index 38053935f..4a644406f 100644 --- a/pkg/apis/metrics/exporter.go +++ b/pkg/apis/metrics/exporter.go @@ -17,8 +17,6 @@ package metrics import ( "encoding/json" "fmt" - "github.com/altinity/clickhouse-operator/pkg/chop" - "k8s.io/apimachinery/pkg/apis/meta/v1" "net/http" "sync" @@ -26,7 +24,9 @@ import ( // log "k8s.io/klog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/altinity/clickhouse-operator/pkg/chop" chopclientset "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" ) diff --git a/pkg/chop/chop.go b/pkg/chop/chop.go index 0c6e73594..7aa9e70c0 100644 --- a/pkg/chop/chop.go +++ b/pkg/chop/chop.go @@ -16,11 +16,10 @@ package chop import ( "flag" + + log "github.com/altinity/clickhouse-operator/pkg/announcer" "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopclientset "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" - - log "github.com/golang/glog" - // log "k8s.io/klog" ) type CHOp struct { @@ -50,31 +49,31 @@ func (c *CHOp) Config() *v1.OperatorConfig { func (c *CHOp) SetupLog() { updated := false if c.Config().Logtostderr != "" { - log.V(1).Infof("Log option cur value %s=%s\n", "logtostderr", flag.Lookup("logtostderr").Value) - log.V(1).Infof("Log option new value %s=%s\n", "logtostderr", c.Config().Logtostderr) + log.V(1).Info("Log option cur value %s=%s", "logtostderr", flag.Lookup("logtostderr").Value) + log.V(1).Info("Log option new value %s=%s", "logtostderr", c.Config().Logtostderr) updated = true _ = flag.Set("logtostderr", c.Config().Logtostderr) } if c.Config().Alsologtostderr != "" { - log.V(1).Infof("Log option cur value %s=%s\n", "alsologtostderr", flag.Lookup("alsologtostderr").Value) - log.V(1).Infof("Log option new value %s=%s\n", "alsologtostderr", c.Config().Alsologtostderr) + log.V(1).Info("Log option cur value %s=%s", "alsologtostderr", flag.Lookup("alsologtostderr").Value) + log.V(1).Info("Log option new value %s=%s", "alsologtostderr", c.Config().Alsologtostderr) updated = true _ = flag.Set("alsologtostderr", c.Config().Alsologtostderr) } if c.Config().Stderrthreshold != "" { - log.V(1).Infof("Log option cur value %s=%s\n", "stderrthreshold", flag.Lookup("stderrthreshold").Value) - log.V(1).Infof("Log option new value %s=%s\n", "stderrthreshold", c.Config().Stderrthreshold) + log.V(1).Info("Log option cur value %s=%s", "stderrthreshold", flag.Lookup("stderrthreshold").Value) + log.V(1).Info("Log option new value %s=%s", "stderrthreshold", c.Config().Stderrthreshold) updated = true _ = flag.Set("stderrthreshold", c.Config().Stderrthreshold) } if c.Config().V != "" { - log.V(1).Infof("Log option cur value %s=%s\n", "v", flag.Lookup("v").Value) - log.V(1).Infof("Log option new value %s=%s\n", "v", c.Config().V) + log.V(1).Info("Log option cur value %s=%s", "v", flag.Lookup("v").Value) + log.V(1).Info("Log option new value %s=%s", "v", c.Config().V) updated = true _ = flag.Set("v", c.Config().V) } if updated { - log.V(1).Infof("Additional log options applied\n") + log.V(1).Info("Additional log options applied") } } diff --git a/pkg/chop/config_manager.go b/pkg/chop/config_manager.go index ec79accee..b21be48c2 100644 --- a/pkg/chop/config_manager.go +++ b/pkg/chop/config_manager.go @@ -15,19 +15,18 @@ package chop import ( - log "github.com/golang/glog" - // log "k8s.io/klog" - - "github.com/kubernetes-sigs/yaml" "io/ioutil" "os" "os/user" "path/filepath" "sort" + "github.com/kubernetes-sigs/yaml" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + log "github.com/altinity/clickhouse-operator/pkg/announcer" chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopclientset "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type ConfigManager struct { @@ -77,7 +76,7 @@ func (cm *ConfigManager) Init() error { return err } log.V(1).Info("File-based ClickHouseOperatorConfigurations") - cm.fileConfig.WriteToLog() + log.V(1).Info(cm.fileConfig.String(true)) // Get configs from all config Custom Resources watchedNamespace := cm.fileConfig.GetInformerNamespace() @@ -89,14 +88,14 @@ func (cm *ConfigManager) Init() error { // From now on we have one unified CHOP config log.V(1).Info("Unified (but not post-processed yet) CHOP config") - cm.config.WriteToLog() + log.V(1).Info(cm.config.String(true)) // Finalize config by post-processing cm.config.Postprocess() // OperatorConfig is ready log.V(1).Info("Final CHOP config") - cm.config.WriteToLog() + log.V(1).Info(cm.config.String(true)) return nil } @@ -116,7 +115,7 @@ func (cm *ConfigManager) getCRBasedConfigs(namespace string) { // Get list of ClickHouseOperatorConfiguration objects var err error if cm.chopConfigList, err = cm.chopClient.ClickhouseV1().ClickHouseOperatorConfigurations(namespace).List(metav1.ListOptions{}); err != nil { - log.V(1).Infof("Error read ClickHouseOperatorConfigurations %v", err) + log.V(1).Error("Error read ClickHouseOperatorConfigurations %v", err) return } @@ -152,8 +151,8 @@ func (cm *ConfigManager) getCRBasedConfigs(namespace string) { // logCRBasedConfigs writes all ClickHouseOperatorConfiguration objects into log func (cm *ConfigManager) logCRBasedConfigs() { for _, chOperatorConfiguration := range cm.crConfigs { - log.V(1).Infof("chop config %s/%s :", chOperatorConfiguration.ConfigFolderPath, chOperatorConfiguration.ConfigFilePath) - chOperatorConfiguration.WriteToLog() + log.V(1).Info("chop config %s/%s :", chOperatorConfiguration.ConfigFolderPath, chOperatorConfiguration.ConfigFilePath) + log.V(1).Info(chOperatorConfiguration.String(true)) } } @@ -305,9 +304,9 @@ func (cm *ConfigManager) logEnvVarParams() { sort.Strings(keys) // Walk over sorted names aka keys - log.V(1).Infof("Parameters num: %d\n", len(cm.runtimeParams)) + log.V(1).Info("Parameters num: %d", len(cm.runtimeParams)) for _, k := range keys { - log.V(1).Infof("%s=%s\n", k, cm.runtimeParams[k]) + log.V(1).Info("%s=%s", k, cm.runtimeParams[k]) } } diff --git a/pkg/chop/kube_machinery.go b/pkg/chop/kube_machinery.go index b6b4106bd..31a12efd0 100644 --- a/pkg/chop/kube_machinery.go +++ b/pkg/chop/kube_machinery.go @@ -20,9 +20,7 @@ import ( "os/user" "path/filepath" - log "github.com/golang/glog" - // log "k8s.io/klog" - + log "github.com/altinity/clickhouse-operator/pkg/announcer" chopclientset "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" "github.com/altinity/clickhouse-operator/pkg/version" @@ -67,18 +65,18 @@ func getKubeConfig(kubeConfigFile, masterURL string) (*kuberest.Config, error) { func GetClientset(kubeConfigFile, masterURL string) (*kube.Clientset, *chopclientset.Clientset) { kubeConfig, err := getKubeConfig(kubeConfigFile, masterURL) if err != nil { - log.Fatalf("Unable to build kubeconf: %s", err.Error()) + log.Fatal("Unable to build kubeconf: %s", err.Error()) os.Exit(1) } kubeClientset, err := kube.NewForConfig(kubeConfig) if err != nil { - log.Fatalf("Unable to initialize kubernetes API clientset: %s", err.Error()) + log.Fatal("Unable to initialize kubernetes API clientset: %s", err.Error()) } chopClientset, err := chopclientset.NewForConfig(kubeConfig) if err != nil { - log.Fatalf("Unable to initialize clickhouse-operator API clientset: %s", err.Error()) + log.Fatal("Unable to initialize clickhouse-operator API clientset: %s", err.Error()) } return kubeClientset, chopClientset @@ -89,7 +87,7 @@ func GetCHOp(chopClient *chopclientset.Clientset, initCHOpConfigFilePath string) // Create operator instance chop := NewCHOp(version.Version, chopClient, initCHOpConfigFilePath) if err := chop.Init(); err != nil { - log.Fatalf("Unable to init CHOP instance %v\n", err) + log.Fatal("Unable to init CHOP instance %v", err) os.Exit(1) } diff --git a/pkg/controller/chi/announcer.go b/pkg/controller/chi/announcer.go index 1c66750f4..0b21f19fa 100644 --- a/pkg/controller/chi/announcer.go +++ b/pkg/controller/chi/announcer.go @@ -17,132 +17,228 @@ package chi import ( "fmt" - log "github.com/golang/glog" - // log "k8s.io/klog" - + a "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + log "github.com/golang/glog" ) // Announcer handler all log/event/status messages going outside of controller/worker type Announcer struct { - c *Controller - chi *chop.ClickHouseInstallation - v log.Level - writeLog bool - writeEvent bool - eventAction string - eventReason string - writeStatusAction bool + a.Announcer + + ctrl *Controller + chi *chop.ClickHouseInstallation + + // writeEvent specifies whether to produce k8s event into chi, therefore requires chi to be specified + // See k8s event for details. + // https://kubernetes.io/docs/reference/kubernetes-api/cluster-resources/event-v1/ + writeEvent bool + // eventAction specifies k8s event action + eventAction string + // event reason specifies k8s event reason + eventReason string + + // writeStatusAction specifies whether to produce action into `ClickHouseInstallation.Status.Action` of chi, + // therefore requires chi to be specified + writeStatusAction bool + // writeStatusAction specifies whether to produce action into `ClickHouseInstallation.Status.Actions` of chi, + // therefore requires chi to be specified writeStatusActions bool - writeStatusError bool + // writeStatusAction specifies whether to produce action into `ClickHouseInstallation.Status.Error` of chi, + // therefore requires chi to be specified + writeStatusError bool } -// NewAnnouncer creates new announcer -func NewAnnouncer(c *Controller) Announcer { +// New creates new announcer +func NewAnnouncer() Announcer { return Announcer{ - c: c, - writeLog: true, + Announcer: a.New(), } } +// WithController specifies controller to be used in case `chi`-related announces need to be done +func (a Announcer) WithController(ctrl *Controller) Announcer { + b := a + b.ctrl = ctrl + return b +} + // V is inspired by log.V() func (a Announcer) V(level log.Level) Announcer { b := a - b.v = level - b.writeLog = true + b.Announcer = b.Announcer.V(level) return b } -// WithEvent is used in chained calls in order to produce event +// WithEvent is used in chained calls in order to produce event into `chi` func (a Announcer) WithEvent( chi *chop.ClickHouseInstallation, action string, reason string, ) Announcer { b := a - b.writeEvent = true - b.chi = chi - b.eventAction = action - b.eventReason = reason + if chi == nil { + b.writeEvent = false + b.chi = nil + b.eventAction = "" + b.eventReason = "" + } else { + b.writeEvent = true + b.chi = chi + b.eventAction = action + b.eventReason = reason + } return b } -// WithStatusAction is used in chained calls in order to produce action in ClickHouseInstallation.Status.Action +// WithStatusAction is used in chained calls in order to produce action into `ClickHouseInstallation.Status.Action` func (a Announcer) WithStatusAction(chi *chop.ClickHouseInstallation) Announcer { b := a - b.writeStatusAction = true - b.writeStatusActions = true - b.chi = chi + if chi == nil { + b.chi = nil + b.writeStatusAction = false + b.writeStatusActions = false + } else { + b.chi = chi + b.writeStatusAction = true + b.writeStatusActions = true + } return b } // WithStatusActions is used in chained calls in order to produce action in ClickHouseInstallation.Status.Actions func (a Announcer) WithStatusActions(chi *chop.ClickHouseInstallation) Announcer { b := a - b.writeStatusActions = true - b.chi = chi + if chi == nil { + b.chi = nil + b.writeStatusActions = false + } else { + b.chi = chi + b.writeStatusActions = true + } return b } // WithStatusAction is used in chained calls in order to produce error in ClickHouseInstallation.Status.Error func (a Announcer) WithStatusError(chi *chop.ClickHouseInstallation) Announcer { b := a - b.writeStatusError = true - b.chi = chi + if chi == nil { + b.chi = nil + b.writeStatusError = false + } else { + b.chi = chi + b.writeStatusError = true + } return b } // Info is inspired by log.Infof() func (a Announcer) Info(format string, args ...interface{}) { - if a.writeLog { - if a.v > 0 { - log.V(a.v).Infof(format, args...) + // Produce classic log line + a.Announcer.Info(format, args...) + + // Produce k8s event + if a.writeEvent && a.chiCapable() { + if len(args) > 0 { + a.ctrl.EventInfo(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) } else { - log.Infof(format, args...) + a.ctrl.EventInfo(a.chi, a.eventAction, a.eventReason, fmt.Sprint(format)) } } - if a.writeEvent { - a.c.eventInfo(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) - } + + // Produce chi status record a.writeCHIStatus(format, args...) } // Warning is inspired by log.Warningf() func (a Announcer) Warning(format string, args ...interface{}) { - if a.writeLog { - log.Warningf(format, args...) - } - if a.writeEvent { - a.c.eventWarning(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) + // Produce classic log line + a.Announcer.Warning(format, args...) + + // Produce k8s event + if a.writeEvent && a.chiCapable() { + if len(args) > 0 { + a.ctrl.EventWarning(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) + } else { + a.ctrl.EventWarning(a.chi, a.eventAction, a.eventReason, fmt.Sprint(format)) + } } + + // Produce chi status record a.writeCHIStatus(format, args...) } // Error is inspired by log.Errorf() func (a Announcer) Error(format string, args ...interface{}) { - if a.writeLog { - log.Errorf(format, args...) + // Produce classic log line + a.Announcer.Error(format, args...) + + // Produce k8s event + if a.writeEvent && a.chiCapable() { + if len(args) > 0 { + a.ctrl.EventError(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) + } else { + a.ctrl.EventError(a.chi, a.eventAction, a.eventReason, fmt.Sprint(format)) + } } - if a.writeEvent { - a.c.eventError(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) + + // Produce chi status record + a.writeCHIStatus(format, args...) +} + +// Fatal is inspired by log.Fatalf() +func (a Announcer) Fatal(format string, args ...interface{}) { + // Produce k8s event + if a.writeEvent && a.chiCapable() { + if len(args) > 0 { + a.ctrl.EventError(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...)) + } else { + a.ctrl.EventError(a.chi, a.eventAction, a.eventReason, fmt.Sprint(format)) + } } + + // Produce chi status record a.writeCHIStatus(format, args...) + + // Write and exit + a.Announcer.Fatal(format, args...) +} + +// chiCapable checks whether announcer is capable to produce chi-based announcements +func (a Announcer) chiCapable() bool { + return (a.ctrl != nil) && (a.chi != nil) } // writeCHIStatus is internal function which writes ClickHouseInstallation.Status func (a Announcer) writeCHIStatus(format string, args ...interface{}) { + if !a.chiCapable() { + return + } + if a.writeStatusAction { - a.chi.Status.Action = fmt.Sprintf(format, args...) + if len(args) > 0 { + a.chi.Status.Action = fmt.Sprintf(format, args...) + } else { + a.chi.Status.Action = fmt.Sprint(format) + } } if a.writeStatusActions { - (&a.chi.Status).PushAction(fmt.Sprintf(format, args...)) + if len(args) > 0 { + (&a.chi.Status).PushAction(fmt.Sprintf(format, args...)) + } else { + (&a.chi.Status).PushAction(fmt.Sprint(format)) + } } if a.writeStatusError { - (&a.chi.Status).SetAndPushError(fmt.Sprintf(format, args...)) + if len(args) > 0 { + (&a.chi.Status).SetAndPushError(fmt.Sprintf(format, args...)) + } else { + (&a.chi.Status).SetAndPushError(fmt.Sprint(format)) + } } // Propagate status updates into object if a.writeStatusAction || a.writeStatusActions || a.writeStatusError { - _ = a.c.updateCHIObjectStatus(a.chi, true) + _ = a.ctrl.updateCHIObjectStatus(a.chi, true) } } diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index 6deb32c78..c71f330ba 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -27,7 +27,7 @@ import ( chopmodels "github.com/altinity/clickhouse-operator/pkg/model" "github.com/altinity/clickhouse-operator/pkg/util" - log "github.com/golang/glog" + log "github.com/altinity/clickhouse-operator/pkg/announcer" "gopkg.in/d4l3k/messagediff.v1" apps "k8s.io/api/apps/v1" core "k8s.io/api/core/v1" @@ -58,7 +58,7 @@ func NewController( // Setup events eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(log.Infof) + eventBroadcaster.StartLogging(log.Info) eventBroadcaster.StartRecordingToSink( &typedcore.EventSinkImpl{ Interface: kubeClient.CoreV1().Events(""), @@ -122,7 +122,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chi.Namespace) { return } - log.V(2).Infof("chiInformer.AddFunc - %s/%s added", chi.Namespace, chi.Name) + log.V(2).Info("chiInformer.AddFunc - %s/%s added", chi.Namespace, chi.Name) c.enqueueObject(chi.Namespace, chi.Name, NewReconcileChi(reconcileAdd, nil, chi)) }, UpdateFunc: func(old, new interface{}) { @@ -139,7 +139,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chi.Namespace) { return } - log.V(2).Infof("chiInformer.DeleteFunc - CHI %s/%s deleted", chi.Namespace, chi.Name) + log.V(2).Info("chiInformer.DeleteFunc - CHI %s/%s deleted", chi.Namespace, chi.Name) c.enqueueObject(chi.Namespace, chi.Name, NewReconcileChi(reconcileDelete, chi, nil)) }, }) @@ -150,7 +150,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chit.Namespace) { return } - log.V(2).Infof("chitInformer.AddFunc - %s/%s added", chit.Namespace, chit.Name) + log.V(2).Info("chitInformer.AddFunc - %s/%s added", chit.Namespace, chit.Name) c.enqueueObject(chit.Namespace, chit.Name, NewReconcileChit(reconcileAdd, nil, chit)) }, UpdateFunc: func(old, new interface{}) { @@ -159,7 +159,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(newChit.Namespace) { return } - log.V(2).Infof("chitInformer.UpdateFunc - %s/%s", newChit.Namespace, newChit.Name) + log.V(2).Info("chitInformer.UpdateFunc - %s/%s", newChit.Namespace, newChit.Name) c.enqueueObject(newChit.Namespace, newChit.Name, NewReconcileChit(reconcileUpdate, oldChit, newChit)) }, DeleteFunc: func(obj interface{}) { @@ -167,7 +167,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chit.Namespace) { return } - log.V(2).Infof("chitInformer.DeleteFunc - %s/%s deleted", chit.Namespace, chit.Name) + log.V(2).Info("chitInformer.DeleteFunc - %s/%s deleted", chit.Namespace, chit.Name) c.enqueueObject(chit.Namespace, chit.Name, NewReconcileChit(reconcileDelete, chit, nil)) }, }) @@ -178,7 +178,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chopConfig.Namespace) { return } - log.V(2).Infof("chopInformer.AddFunc - %s/%s added", chopConfig.Namespace, chopConfig.Name) + log.V(2).Info("chopInformer.AddFunc - %s/%s added", chopConfig.Namespace, chopConfig.Name) c.enqueueObject(chopConfig.Namespace, chopConfig.Name, NewReconcileChopConfig(reconcileAdd, nil, chopConfig)) }, UpdateFunc: func(old, new interface{}) { @@ -187,7 +187,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(newChopConfig.Namespace) { return } - log.V(2).Infof("chopInformer.UpdateFunc - %s/%s", newChopConfig.Namespace, newChopConfig.Name) + log.V(2).Info("chopInformer.UpdateFunc - %s/%s", newChopConfig.Namespace, newChopConfig.Name) c.enqueueObject(newChopConfig.Namespace, newChopConfig.Name, NewReconcileChopConfig(reconcileUpdate, oldChopConfig, newChopConfig)) }, DeleteFunc: func(obj interface{}) { @@ -195,7 +195,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chopConfig.Namespace) { return } - log.V(2).Infof("chopInformer.DeleteFunc - %s/%s deleted", chopConfig.Namespace, chopConfig.Name) + log.V(2).Info("chopInformer.DeleteFunc - %s/%s deleted", chopConfig.Namespace, chopConfig.Name) c.enqueueObject(chopConfig.Namespace, chopConfig.Name, NewReconcileChopConfig(reconcileDelete, chopConfig, nil)) }, }) @@ -206,7 +206,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&service.ObjectMeta) { return } - log.V(2).Infof("serviceInformer AddFunc %s/%s", service.Namespace, service.Name) + log.V(2).Info("serviceInformer AddFunc %s/%s", service.Namespace, service.Name) }, UpdateFunc: func(old, new interface{}) { oldService := old.(*core.Service) @@ -219,7 +219,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&service.ObjectMeta) { return } - log.V(2).Infof("serviceInformer DeleteFunc %s/%s", service.Namespace, service.Name) + log.V(2).Info("serviceInformer DeleteFunc %s/%s", service.Namespace, service.Name) }, }) @@ -229,7 +229,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&endpoints.ObjectMeta) { return } - log.V(2).Infof("endpointsInformer AddFunc %s/%s", endpoints.Namespace, endpoints.Name) + log.V(2).Info("endpointsInformer AddFunc %s/%s", endpoints.Namespace, endpoints.Name) }, UpdateFunc: func(old, new interface{}) { oldEndpoints := old.(*core.Endpoints) @@ -240,14 +240,14 @@ func (c *Controller) addEventHandlers( diff, equal := messagediff.DeepDiff(oldEndpoints, newEndpoints) if equal { - log.V(2).Infof("onUpdateEndpoints(%s/%s): no changes found", oldEndpoints.Namespace, oldEndpoints.Name) + log.V(2).Info("onUpdateEndpoints(%s/%s): no changes found", oldEndpoints.Namespace, oldEndpoints.Name) // No need to react return } added := false for path := range diff.Added { - log.V(2).Infof("onUpdateEndpoints(%s/%s): added %v", oldEndpoints.Namespace, oldEndpoints.Name, path) + log.V(2).Info("onUpdateEndpoints(%s/%s): added %v", oldEndpoints.Namespace, oldEndpoints.Name, path) for _, pathnode := range *path { s := pathnode.String() if s == ".Addresses" { @@ -256,14 +256,14 @@ func (c *Controller) addEventHandlers( } } for path := range diff.Removed { - log.V(2).Infof("onUpdateEndpoints(%s/%s): removed %v", oldEndpoints.Namespace, oldEndpoints.Name, path) + log.V(2).Info("onUpdateEndpoints(%s/%s): removed %v", oldEndpoints.Namespace, oldEndpoints.Name, path) } for path := range diff.Modified { - log.V(2).Infof("onUpdateEndpoints(%s/%s): modified %v", oldEndpoints.Namespace, oldEndpoints.Name, path) + log.V(2).Info("onUpdateEndpoints(%s/%s): modified %v", oldEndpoints.Namespace, oldEndpoints.Name, path) } if added { - log.V(1).Infof("endpointsInformer UpdateFunc(%s/%s) IP ASSIGNED %v", newEndpoints.Namespace, newEndpoints.Name, newEndpoints.Subsets) + log.V(1).Info("endpointsInformer UpdateFunc(%s/%s) IP ASSIGNED %v", newEndpoints.Namespace, newEndpoints.Name, newEndpoints.Subsets) c.enqueueObject(newEndpoints.Namespace, newEndpoints.Name, NewDropDns(&newEndpoints.ObjectMeta)) } }, @@ -272,7 +272,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&endpoints.ObjectMeta) { return } - log.V(2).Infof("endpointsInformer DeleteFunc %s/%s", endpoints.Namespace, endpoints.Name) + log.V(2).Info("endpointsInformer DeleteFunc %s/%s", endpoints.Namespace, endpoints.Name) }, }) @@ -282,21 +282,21 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&configMap.ObjectMeta) { return } - log.V(2).Infof("configMapInformer AddFunc %s/%s", configMap.Namespace, configMap.Name) + log.V(2).Info("configMapInformer AddFunc %s/%s", configMap.Namespace, configMap.Name) }, UpdateFunc: func(old, new interface{}) { configMap := old.(*core.ConfigMap) if !c.isTrackedObject(&configMap.ObjectMeta) { return } - log.V(2).Infof("configMapInformer UpdateFunc %s/%s", configMap.Namespace, configMap.Name) + log.V(2).Info("configMapInformer UpdateFunc %s/%s", configMap.Namespace, configMap.Name) }, DeleteFunc: func(obj interface{}) { configMap := obj.(*core.ConfigMap) if !c.isTrackedObject(&configMap.ObjectMeta) { return } - log.V(2).Infof("configMapInformer DeleteFunc %s/%s", configMap.Namespace, configMap.Name) + log.V(2).Info("configMapInformer DeleteFunc %s/%s", configMap.Namespace, configMap.Name) }, }) @@ -306,7 +306,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - log.V(2).Infof("statefulSetInformer AddFunc %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(2).Info("statefulSetInformer AddFunc %s/%s", statefulSet.Namespace, statefulSet.Name) //controller.handleObject(obj) }, UpdateFunc: func(old, new interface{}) { @@ -314,14 +314,14 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - log.V(2).Infof("statefulSetInformer UpdateFunc %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(2).Info("statefulSetInformer UpdateFunc %s/%s", statefulSet.Namespace, statefulSet.Name) }, DeleteFunc: func(obj interface{}) { statefulSet := obj.(*apps.StatefulSet) if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - log.V(2).Infof("statefulSetInformer DeleteFunc %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(2).Info("statefulSetInformer DeleteFunc %s/%s", statefulSet.Namespace, statefulSet.Name) //controller.handleObject(obj) }, }) @@ -332,21 +332,21 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&pod.ObjectMeta) { return } - log.V(2).Infof("podInformer AddFunc %s/%s", pod.Namespace, pod.Name) + log.V(2).Info("podInformer AddFunc %s/%s", pod.Namespace, pod.Name) }, UpdateFunc: func(old, new interface{}) { pod := old.(*core.Pod) if !c.isTrackedObject(&pod.ObjectMeta) { return } - log.V(2).Infof("podInformer UpdateFunc %s/%s", pod.Namespace, pod.Name) + log.V(2).Info("podInformer UpdateFunc %s/%s", pod.Namespace, pod.Name) }, DeleteFunc: func(obj interface{}) { pod := obj.(*core.Pod) if !c.isTrackedObject(&pod.ObjectMeta) { return } - log.V(2).Infof("podInformer DeleteFunc %s/%s", pod.Namespace, pod.Name) + log.V(2).Info("podInformer DeleteFunc %s/%s", pod.Namespace, pod.Name) }, }) } @@ -385,9 +385,9 @@ func (c *Controller) Run(ctx context.Context) { // Start threads // workersNum := len(c.queues) - log.V(1).Infof("ClickHouseInstallation controller: starting workers number: %d", workersNum) + log.V(1).Info("ClickHouseInstallation controller: starting workers number: %d", workersNum) for i := 0; i < workersNum; i++ { - log.V(1).Infof("ClickHouseInstallation controller: starting worker %d out of %d", i+1, workersNum) + log.V(1).Info("ClickHouseInstallation controller: starting worker %d out of %d", i+1, workersNum) worker := c.newWorker(c.queues[i]) go wait.Until(worker.run, runWorkerPeriod, ctx.Done()) } @@ -425,9 +425,9 @@ func (c *Controller) updateWatch(namespace, name string, hostnames []string) { // updateWatchAsync func (c *Controller) updateWatchAsync(namespace, name string, hostnames []string) { if err := metrics.InformMetricsExporterAboutWatchedCHI(namespace, name, hostnames); err != nil { - log.V(1).Infof("FAIL update watch (%s/%s): %q", namespace, name, err) + log.V(1).Info("FAIL update watch (%s/%s): %q", namespace, name, err) } else { - log.V(2).Infof("OK update watch (%s/%s)", namespace, name) + log.V(2).Info("OK update watch (%s/%s)", namespace, name) } } @@ -439,15 +439,15 @@ func (c *Controller) deleteWatch(namespace, name string) { // deleteWatchAsync func (c *Controller) deleteWatchAsync(namespace, name string) { if err := metrics.InformMetricsExporterToDeleteWatchedCHI(namespace, name); err != nil { - log.V(1).Infof("FAIL delete watch (%s/%s): %q", namespace, name, err) + log.V(1).Info("FAIL delete watch (%s/%s): %q", namespace, name, err) } else { - log.V(2).Infof("OK delete watch (%s/%s)", namespace, name) + log.V(2).Info("OK delete watch (%s/%s)", namespace, name) } } // addChit sync new CHIT - creates all its resources func (c *Controller) addChit(chit *chi.ClickHouseInstallationTemplate) error { - log.V(1).Infof("addChit(%s/%s)", chit.Namespace, chit.Name) + log.V(1).Info("addChit(%s/%s)", chit.Namespace, chit.Name) c.chop.Config().AddCHITemplate((*chi.ClickHouseInstallation)(chit)) return nil } @@ -455,19 +455,19 @@ func (c *Controller) addChit(chit *chi.ClickHouseInstallationTemplate) error { // updateChit sync CHIT which was already created earlier func (c *Controller) updateChit(old, new *chi.ClickHouseInstallationTemplate) error { if old.ObjectMeta.ResourceVersion == new.ObjectMeta.ResourceVersion { - log.V(2).Infof("updateChit(%s/%s): ResourceVersion did not change: %s", old.Namespace, old.Name, old.ObjectMeta.ResourceVersion) + log.V(2).Info("updateChit(%s/%s): ResourceVersion did not change: %s", old.Namespace, old.Name, old.ObjectMeta.ResourceVersion) // No need to react return nil } - log.V(2).Infof("updateChit(%s/%s):", new.Namespace, new.Name) + log.V(2).Info("updateChit(%s/%s):", new.Namespace, new.Name) c.chop.Config().UpdateCHITemplate((*chi.ClickHouseInstallation)(new)) return nil } // deleteChit deletes CHIT func (c *Controller) deleteChit(chit *chi.ClickHouseInstallationTemplate) error { - log.V(2).Infof("deleteChit(%s/%s):", chit.Namespace, chit.Name) + log.V(2).Info("deleteChit(%s/%s):", chit.Namespace, chit.Name) c.chop.Config().DeleteCHITemplate((*chi.ClickHouseInstallation)(chit)) return nil } @@ -475,9 +475,9 @@ func (c *Controller) deleteChit(chit *chi.ClickHouseInstallationTemplate) error // addChopConfig func (c *Controller) addChopConfig(chopConfig *chi.ClickHouseOperatorConfiguration) error { if c.chop.ConfigManager.IsConfigListed(chopConfig) { - log.V(1).Infof("addChopConfig(%s/%s) already known config - do nothing", chopConfig.Namespace, chopConfig.Name) + log.V(1).Info("addChopConfig(%s/%s) already known config - do nothing", chopConfig.Namespace, chopConfig.Name) } else { - log.V(1).Infof("addChopConfig(%s/%s) new, previously unknown config, need to apply", chopConfig.Namespace, chopConfig.Name) + log.V(1).Info("addChopConfig(%s/%s) new, previously unknown config, need to apply", chopConfig.Namespace, chopConfig.Name) // TODO // NEED REFACTORING // os.Exit(0) @@ -489,12 +489,12 @@ func (c *Controller) addChopConfig(chopConfig *chi.ClickHouseOperatorConfigurati // updateChopConfig func (c *Controller) updateChopConfig(old, new *chi.ClickHouseOperatorConfiguration) error { if old.ObjectMeta.ResourceVersion == new.ObjectMeta.ResourceVersion { - log.V(2).Infof("updateChopConfig(%s/%s): ResourceVersion did not change: %s", old.Namespace, old.Name, old.ObjectMeta.ResourceVersion) + log.V(2).Info("updateChopConfig(%s/%s): ResourceVersion did not change: %s", old.Namespace, old.Name, old.ObjectMeta.ResourceVersion) // No need to react return nil } - log.V(2).Infof("updateChopConfig(%s/%s):", new.Namespace, new.Name) + log.V(2).Info("updateChopConfig(%s/%s):", new.Namespace, new.Name) // TODO // NEED REFACTORING //os.Exit(0) @@ -504,7 +504,7 @@ func (c *Controller) updateChopConfig(old, new *chi.ClickHouseOperatorConfigurat // deleteChit deletes CHIT func (c *Controller) deleteChopConfig(chopConfig *chi.ClickHouseOperatorConfiguration) error { - log.V(2).Infof("deleteChopConfig(%s/%s):", chopConfig.Namespace, chopConfig.Name) + log.V(2).Info("deleteChopConfig(%s/%s):", chopConfig.Namespace, chopConfig.Name) // TODO // NEED REFACTORING //os.Exit(0) @@ -519,13 +519,13 @@ func (c *Controller) updateCHIObject(chi *chi.ClickHouseInstallation) error { if err != nil { // Error update - log.V(1).Infof("ERROR update CHI (%s/%s): %q", namespace, name, err) + log.V(1).Info("ERROR update CHI (%s/%s): %q", namespace, name, err) return err } if chi.ObjectMeta.ResourceVersion != new.ObjectMeta.ResourceVersion { // Updated - log.V(2).Infof("updateCHIObject(%s/%s): ResourceVersion bump %s=>%s", + log.V(2).Info("updateCHIObject(%s/%s): ResourceVersion bump %s=>%s", namespace, name, chi.ObjectMeta.ResourceVersion, new.ObjectMeta.ResourceVersion, ) chi.ObjectMeta.ResourceVersion = new.ObjectMeta.ResourceVersion @@ -540,21 +540,21 @@ func (c *Controller) updateCHIObject(chi *chi.ClickHouseInstallation) error { // updateCHIObjectStatus updates ClickHouseInstallation object's Status func (c *Controller) updateCHIObjectStatus(chi *chi.ClickHouseInstallation, tolerateAbsence bool) error { namespace, name := util.NamespaceName(chi.ObjectMeta) - log.V(2).Infof("Update CHI status (%s/%s)", namespace, name) + log.V(2).Info("Update CHI status (%s/%s)", namespace, name) cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(name, newGetOptions()) if err != nil { if tolerateAbsence { return nil } - log.V(1).Infof("ERROR GetCHI (%s/%s): %q", namespace, name, err) + log.V(1).Info("ERROR GetCHI (%s/%s): %q", namespace, name, err) return err } if cur == nil { if tolerateAbsence { return nil } - log.V(1).Infof("ERROR GetCHI (%s/%s): NULL returned", namespace, name) + log.V(1).Info("ERROR GetCHI (%s/%s): NULL returned", namespace, name) return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", namespace, name) } @@ -566,7 +566,7 @@ func (c *Controller) updateCHIObjectStatus(chi *chi.ClickHouseInstallation, tole // installFinalizer func (c *Controller) installFinalizer(chi *chi.ClickHouseInstallation) error { namespace, name := util.NamespaceName(chi.ObjectMeta) - log.V(2).Infof("Update CHI status (%s/%s)", namespace, name) + log.V(2).Info("Update CHI status (%s/%s)", namespace, name) cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(name, newGetOptions()) if err != nil { @@ -588,7 +588,7 @@ func (c *Controller) installFinalizer(chi *chi.ClickHouseInstallation) error { // uninstallFinalizer func (c *Controller) uninstallFinalizer(chi *chi.ClickHouseInstallation) error { namespace, name := util.NamespaceName(chi.ObjectMeta) - log.V(2).Infof("Update CHI status (%s/%s)", namespace, name) + log.V(2).Info("Update CHI status (%s/%s)", namespace, name) cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(name, newGetOptions()) if err != nil { @@ -634,13 +634,13 @@ func (c *Controller) handleObject(obj interface{}) { return } - log.V(1).Infof("Processing object: %s", object.GetName()) + log.V(1).Info("Processing object: %s", object.GetName()) // Get owner - it is expected to be CHI chi, err := c.chiLister.ClickHouseInstallations(object.GetNamespace()).Get(ownerRef.Name) if err != nil { - log.V(1).Infof("ignoring orphaned object '%s' of ClickHouseInstallation '%s'", object.GetSelfLink(), ownerRef.Name) + log.V(1).Info("ignoring orphaned object '%s' of ClickHouseInstallation '%s'", object.GetSelfLink(), ownerRef.Name) return } @@ -650,11 +650,11 @@ func (c *Controller) handleObject(obj interface{}) { // waitForCacheSync is a logger-wrapper over cache.WaitForCacheSync() and it waits for caches to populate func waitForCacheSync(name string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) bool { - log.V(1).Infof("Syncing caches for %s controller", name) + log.V(1).Info("Syncing caches for %s controller", name) if !cache.WaitForCacheSync(stopCh, cacheSyncs...) { utilruntime.HandleError(fmt.Errorf(messageUnableToSync, name)) return false } - log.V(1).Infof("Caches are synced for %s controller", name) + log.V(1).Info("Caches are synced for %s controller", name) return true } diff --git a/pkg/controller/chi/creator.go b/pkg/controller/chi/creator.go index fd2feb1af..e03a418de 100644 --- a/pkg/controller/chi/creator.go +++ b/pkg/controller/chi/creator.go @@ -18,18 +18,17 @@ package chi import ( "errors" "fmt" - log "github.com/golang/glog" - "k8s.io/api/core/v1" - // log "k8s.io/klog" + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" - apps "k8s.io/api/apps/v1" ) // createStatefulSet is an internal function, used in reconcileStatefulSet only func (c *Controller) createStatefulSet(statefulSet *apps.StatefulSet, host *chop.ChiHost) error { - log.V(1).Infof("Create StatefulSet %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(1).Info("Create StatefulSet %s/%s", statefulSet.Namespace, statefulSet.Name) if statefulSet, err := c.kubeClient.AppsV1().StatefulSets(statefulSet.Namespace).Create(statefulSet); err != nil { // Error call Create() return err @@ -49,13 +48,13 @@ func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStat // Convenience shortcuts namespace := newStatefulSet.Namespace name := newStatefulSet.Name - log.V(2).Infof("updateStatefulSet(%s/%s)", namespace, name) + log.V(2).Info("updateStatefulSet(%s/%s)", namespace, name) // Apply newStatefulSet and wait for Generation to change updatedStatefulSet, err := c.kubeClient.AppsV1().StatefulSets(namespace).Update(newStatefulSet) if err != nil { // Update failed - log.V(1).Infof("updateStatefulSet(%s/%s) - git err: %v", namespace, name, err) + log.V(1).Info("updateStatefulSet(%s/%s) - git err: %v", namespace, name, err) return err } @@ -65,11 +64,11 @@ func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStat if updatedStatefulSet.Generation == oldStatefulSet.Generation { // Generation is not updated - no changes in .spec section were made - log.V(2).Infof("updateStatefulSet(%s/%s) - no generation change", namespace, name) + log.V(2).Info("updateStatefulSet(%s/%s) - no generation change", namespace, name) return nil } - log.V(1).Infof("updateStatefulSet(%s/%s) - generation change %d=>%d", namespace, name, oldStatefulSet.Generation, updatedStatefulSet.Generation) + log.V(1).Info("updateStatefulSet(%s/%s) - generation change %d=>%d", namespace, name, oldStatefulSet.Generation, updatedStatefulSet.Generation) if err := c.waitHostReady(host); err == nil { // Target generation reached, StatefulSet updated successfully @@ -87,13 +86,13 @@ func (c *Controller) updatePersistentVolume(pv *v1.PersistentVolume) error { // Convenience shortcuts namespace := pv.Namespace name := pv.Name - log.V(2).Infof("updatePersistentVolume(%s/%s)", namespace, name) + log.V(2).Info("updatePersistentVolume(%s/%s)", namespace, name) // Apply newStatefulSet and wait for Generation to change _, err := c.kubeClient.CoreV1().PersistentVolumes().Update(pv) if err != nil { // Update failed - log.V(1).Infof("updatePersistentVolume(%s/%s) - git err: %v", namespace, name, err) + log.V(1).Info("updatePersistentVolume(%s/%s) - git err: %v", namespace, name, err) return err } @@ -111,22 +110,22 @@ func (c *Controller) onStatefulSetCreateFailed(failedStatefulSet *apps.StatefulS switch c.chop.Config().OnStatefulSetCreateFailureAction { case chop.OnStatefulSetCreateFailureActionAbort: // Report appropriate error, it will break reconcile loop - log.V(1).Infof("onStatefulSetCreateFailed(%s/%s) - abort", namespace, name) + log.V(1).Info("onStatefulSetCreateFailed(%s/%s) - abort", namespace, name) return errors.New(fmt.Sprintf("Create failed on %s/%s", namespace, name)) case chop.OnStatefulSetCreateFailureActionDelete: // Delete gracefully failed StatefulSet - log.V(1).Infof("onStatefulSetCreateFailed(%s/%s) - going to DELETE FAILED StatefulSet", namespace, name) + log.V(1).Info("onStatefulSetCreateFailed(%s/%s) - going to DELETE FAILED StatefulSet", namespace, name) _ = c.deleteHost(host) return c.shouldContinueOnCreateFailed() case chop.OnStatefulSetCreateFailureActionIgnore: // Ignore error, continue reconcile loop - log.V(1).Infof("onStatefulSetCreateFailed(%s/%s) - going to ignore error", namespace, name) + log.V(1).Info("onStatefulSetCreateFailed(%s/%s) - going to ignore error", namespace, name) return nil default: - log.V(1).Infof("Unknown c.chop.Config().OnStatefulSetCreateFailureAction=%s", c.chop.Config().OnStatefulSetCreateFailureAction) + log.V(1).Info("Unknown c.chop.Config().OnStatefulSetCreateFailureAction=%s", c.chop.Config().OnStatefulSetCreateFailureAction) return nil } @@ -144,12 +143,12 @@ func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.Statefu switch c.chop.Config().OnStatefulSetUpdateFailureAction { case chop.OnStatefulSetUpdateFailureActionAbort: // Report appropriate error, it will break reconcile loop - log.V(1).Infof("onStatefulSetUpdateFailed(%s/%s) - abort", namespace, name) + log.V(1).Info("onStatefulSetUpdateFailed(%s/%s) - abort", namespace, name) return errors.New(fmt.Sprintf("Update failed on %s/%s", namespace, name)) case chop.OnStatefulSetUpdateFailureActionRollback: // Need to revert current StatefulSet to oldStatefulSet - log.V(1).Infof("onStatefulSetUpdateFailed(%s/%s) - going to ROLLBACK FAILED StatefulSet", namespace, name) + log.V(1).Info("onStatefulSetUpdateFailed(%s/%s) - going to ROLLBACK FAILED StatefulSet", namespace, name) if statefulSet, err := c.statefulSetLister.StatefulSets(namespace).Get(name); err != nil { // Unable to get StatefulSet return err @@ -168,11 +167,11 @@ func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.Statefu case chop.OnStatefulSetUpdateFailureActionIgnore: // Ignore error, continue reconcile loop - log.V(1).Infof("onStatefulSetUpdateFailed(%s/%s) - going to ignore error", namespace, name) + log.V(1).Info("onStatefulSetUpdateFailed(%s/%s) - going to ignore error", namespace, name) return nil default: - log.V(1).Infof("Unknown c.chop.Config().OnStatefulSetUpdateFailureAction=%s", c.chop.Config().OnStatefulSetUpdateFailureAction) + log.V(1).Info("Unknown c.chop.Config().OnStatefulSetUpdateFailureAction=%s", c.chop.Config().OnStatefulSetUpdateFailureAction) return nil } diff --git a/pkg/controller/chi/deleter.go b/pkg/controller/chi/deleter.go index 4bb0568e6..37eb0ceff 100644 --- a/pkg/controller/chi/deleter.go +++ b/pkg/controller/chi/deleter.go @@ -15,14 +15,13 @@ package chi import ( - "k8s.io/api/core/v1" "time" - log "github.com/golang/glog" - // log "k8s.io/klog" apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopmodel "github.com/altinity/clickhouse-operator/pkg/model" ) @@ -37,14 +36,14 @@ func (c *Controller) deleteHost(host *chop.ChiHost) error { // 5. Service // Need to delete all these item - log.V(1).Infof("Controller delete host started %s/%s", host.Address.ClusterName, host.Name) + log.V(1).Info("Controller delete host started %s/%s", host.Address.ClusterName, host.Name) _ = c.deleteStatefulSet(host) _ = c.deletePVC(host) _ = c.deleteConfigMap(host) _ = c.deleteServiceHost(host) - log.V(1).Infof("Controller delete host completed %s/%s", host.Address.ClusterName, host.Name) + log.V(1).Info("Controller delete host completed %s/%s", host.Address.ClusterName, host.Name) return nil } @@ -65,22 +64,22 @@ func (c *Controller) deleteConfigMapsCHI(chi *chop.ClickHouseInstallation) error // Delete ConfigMap err = c.kubeClient.CoreV1().ConfigMaps(chi.Namespace).Delete(configMapCommon, newDeleteOptions()) if err == nil { - log.V(1).Infof("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommon) + log.V(1).Info("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommon) } else if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommon) + log.V(1).Info("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommon) err = nil } else { - log.V(1).Infof("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommon, err) + log.V(1).Info("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommon, err) } err = c.kubeClient.CoreV1().ConfigMaps(chi.Namespace).Delete(configMapCommonUsersName, newDeleteOptions()) if err == nil { - log.V(1).Infof("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName) + log.V(1).Info("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName) } else if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName) + log.V(1).Info("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName) err = nil } else { - log.V(1).Infof("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommonUsersName, err) + log.V(1).Info("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommonUsersName, err) } return err @@ -89,15 +88,15 @@ func (c *Controller) deleteConfigMapsCHI(chi *chop.ClickHouseInstallation) error // statefulSetDeletePod delete a pod of a StatefulSet. This requests StatefulSet to relaunch deleted pod func (c *Controller) statefulSetDeletePod(statefulSet *apps.StatefulSet) error { name := chopmodel.CreatePodName(statefulSet) - log.V(1).Infof("Delete Pod %s/%s", statefulSet.Namespace, name) + log.V(1).Info("Delete Pod %s/%s", statefulSet.Namespace, name) err := c.kubeClient.CoreV1().Pods(statefulSet.Namespace).Delete(name, newDeleteOptions()) if err == nil { - log.V(1).Infof("OK delete Pod %s/%s", statefulSet.Namespace, name) + log.V(1).Info("OK delete Pod %s/%s", statefulSet.Namespace, name) } else if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found Pod %s/%s", statefulSet.Namespace, name) + log.V(1).Info("NEUTRAL not found Pod %s/%s", statefulSet.Namespace, name) err = nil } else { - log.V(1).Infof("FAIL delete ConfigMap %s/%s err:%v", statefulSet.Namespace, name, err) + log.V(1).Info("FAIL delete ConfigMap %s/%s err:%v", statefulSet.Namespace, name, err) } return err @@ -114,14 +113,14 @@ func (c *Controller) deleteStatefulSet(host *chop.ChiHost) error { name := chopmodel.CreateStatefulSetName(host) namespace := host.Address.Namespace - log.V(1).Infof("deleteStatefulSet(%s/%s)", namespace, name) + log.V(1).Info("deleteStatefulSet(%s/%s)", namespace, name) statefulSet, err := c.getStatefulSet(host) if err != nil { if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found StatefulSet %s/%s", namespace, name) + log.V(1).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) } else { - log.V(1).Infof("error get StatefulSet %s/%s err:%v", namespace, name, err) + log.V(1).Info("error get StatefulSet %s/%s err:%v", namespace, name, err) } return nil } @@ -136,13 +135,13 @@ func (c *Controller) deleteStatefulSet(host *chop.ChiHost) error { // And now delete empty StatefulSet if err := c.kubeClient.AppsV1().StatefulSets(namespace).Delete(name, newDeleteOptions()); err == nil { - log.V(1).Infof("OK delete StatefulSet %s/%s", namespace, name) + log.V(1).Info("OK delete StatefulSet %s/%s", namespace, name) c.syncStatefulSet(host) } else if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found StatefulSet %s/%s", namespace, name) + log.V(1).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) err = nil } else { - log.V(1).Infof("FAIL delete StatefulSet %s/%s err: %v", namespace, name, err) + log.V(1).Info("FAIL delete StatefulSet %s/%s err: %v", namespace, name, err) return nil } @@ -155,10 +154,10 @@ func (c *Controller) syncStatefulSet(host *chop.ChiHost) { // TODO // There should be better way to sync cache if _, err := c.getStatefulSet(host); err == nil { - log.V(2).Infof("cache NOT yet synced") + log.V(2).Info("cache NOT yet synced") time.Sleep(15 * time.Second) } else { - log.V(1).Infof("cache synced") + log.V(1).Info("cache synced") return } } @@ -173,19 +172,19 @@ func (c *Controller) deletePVC(host *chop.ChiHost) error { c.walkActualPVCs(host, func(pvc *v1.PersistentVolumeClaim) { if !chopmodel.HostCanDeletePVC(host, pvc.Name) { - log.V(1).Infof("PVC %s/%s should not be deleted, leave it intact", namespace, pvc.Name) + log.V(1).Info("PVC %s/%s should not be deleted, leave it intact", namespace, pvc.Name) // Move to the next PVC return } // Actually delete PVC if err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Delete(pvc.Name, newDeleteOptions()); err == nil { - log.V(1).Infof("OK delete PVC %s/%s", namespace, pvc.Name) + log.V(1).Info("OK delete PVC %s/%s", namespace, pvc.Name) } else if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found PVC %s/%s", namespace, pvc.Name) + log.V(1).Info("NEUTRAL not found PVC %s/%s", namespace, pvc.Name) err = nil } else { - log.Errorf("FAIL to delete PVC %s/%s err:%v", namespace, pvc.Name, err) + log.Error("FAIL to delete PVC %s/%s err:%v", namespace, pvc.Name, err) } }) @@ -197,15 +196,15 @@ func (c *Controller) deleteConfigMap(host *chop.ChiHost) error { name := chopmodel.CreateConfigMapPodName(host) namespace := host.Address.Namespace - log.V(1).Infof("deleteConfigMap(%s/%s)", namespace, name) + log.V(1).Info("deleteConfigMap(%s/%s)", namespace, name) if err := c.kubeClient.CoreV1().ConfigMaps(namespace).Delete(name, newDeleteOptions()); err == nil { - log.V(1).Infof("OK delete ConfigMap %s/%s", namespace, name) + log.V(1).Info("OK delete ConfigMap %s/%s", namespace, name) } else if apierrors.IsNotFound(err) { - log.V(1).Infof("NEUTRAL not found ConfigMap %s/%s", namespace, name) + log.V(1).Info("NEUTRAL not found ConfigMap %s/%s", namespace, name) err = nil } else { - log.V(1).Infof("FAIL delete ConfigMap %s/%s err:%v", namespace, name, err) + log.V(1).Info("FAIL delete ConfigMap %s/%s err:%v", namespace, name, err) } return nil @@ -215,7 +214,7 @@ func (c *Controller) deleteConfigMap(host *chop.ChiHost) error { func (c *Controller) deleteServiceHost(host *chop.ChiHost) error { serviceName := chopmodel.CreateStatefulSetServiceName(host) namespace := host.Address.Namespace - log.V(1).Infof("deleteServiceReplica(%s/%s)", namespace, serviceName) + log.V(1).Info("deleteServiceReplica(%s/%s)", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -223,7 +222,7 @@ func (c *Controller) deleteServiceHost(host *chop.ChiHost) error { func (c *Controller) deleteServiceShard(shard *chop.ChiShard) error { serviceName := chopmodel.CreateShardServiceName(shard) namespace := shard.Address.Namespace - log.V(1).Infof("deleteServiceShard(%s/%s)", namespace, serviceName) + log.V(1).Info("deleteServiceShard(%s/%s)", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -231,7 +230,7 @@ func (c *Controller) deleteServiceShard(shard *chop.ChiShard) error { func (c *Controller) deleteServiceCluster(cluster *chop.ChiCluster) error { serviceName := chopmodel.CreateClusterServiceName(cluster) namespace := cluster.Address.Namespace - log.V(1).Infof("deleteServiceCluster(%s/%s)", namespace, serviceName) + log.V(1).Info("deleteServiceCluster(%s/%s)", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -239,7 +238,7 @@ func (c *Controller) deleteServiceCluster(cluster *chop.ChiCluster) error { func (c *Controller) deleteServiceCHI(chi *chop.ClickHouseInstallation) error { serviceName := chopmodel.CreateCHIServiceName(chi) namespace := chi.Namespace - log.V(1).Infof("deleteServiceCHI(%s/%s)", namespace, serviceName) + log.V(1).Info("deleteServiceCHI(%s/%s)", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -258,9 +257,9 @@ func (c *Controller) deleteServiceIfExists(namespace, name string) error { // Delete service err = c.kubeClient.CoreV1().Services(namespace).Delete(name, newDeleteOptions()) if err == nil { - log.V(1).Infof("OK delete Service %s/%s", namespace, name) + log.V(1).Info("OK delete Service %s/%s", namespace, name) } else { - log.V(1).Infof("FAIL delete Service %s/%s err:%v", namespace, name, err) + log.V(1).Info("FAIL delete Service %s/%s err:%v", namespace, name, err) } return err diff --git a/pkg/controller/chi/event.go b/pkg/controller/chi/event.go index f89a2fb68..d189049eb 100644 --- a/pkg/controller/chi/event.go +++ b/pkg/controller/chi/event.go @@ -17,11 +17,10 @@ package chi import ( "time" - log "github.com/golang/glog" - // log "k8s.io/klog" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" ) @@ -60,7 +59,7 @@ const ( eventReasonDeleteFailed = "DeleteFailed" ) -func (c *Controller) eventInfo( +func (c *Controller) EventInfo( chi *chop.ClickHouseInstallation, action string, reason string, @@ -69,7 +68,7 @@ func (c *Controller) eventInfo( c.emitEvent(chi, eventTypeInfo, action, reason, message) } -func (c *Controller) eventWarning( +func (c *Controller) EventWarning( chi *chop.ClickHouseInstallation, action string, reason string, @@ -78,7 +77,7 @@ func (c *Controller) eventWarning( c.emitEvent(chi, eventTypeWarning, action, reason, message) } -func (c *Controller) eventError( +func (c *Controller) EventError( chi *chop.ClickHouseInstallation, action string, reason string, @@ -139,6 +138,6 @@ func (c *Controller) emitEvent( _, err := c.kubeClient.CoreV1().Events(namespace).Create(event) if err != nil { - log.V(1).Infof("Create Event failed: %v", err) + log.V(1).Error("Create Event failed: %v", err) } } diff --git a/pkg/controller/chi/labeler.go b/pkg/controller/chi/labeler.go index 1100b1fb7..6fc23021a 100644 --- a/pkg/controller/chi/labeler.go +++ b/pkg/controller/chi/labeler.go @@ -15,13 +15,12 @@ package chi import ( - "github.com/altinity/clickhouse-operator/pkg/util" - log "github.com/golang/glog" - // log "k8s.io/klog" "k8s.io/apimachinery/pkg/apis/meta/v1" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/model" + "github.com/altinity/clickhouse-operator/pkg/util" ) func (c *Controller) labelMyObjectsTree() { @@ -56,21 +55,21 @@ func (c *Controller) labelMyObjectsTree() { namespace, ok2 := c.chop.ConfigManager.GetRuntimeParam(chiv1.OPERATOR_POD_NAMESPACE) if !ok1 || !ok2 { - log.V(1).Infof("ERROR fetch Pod name out of %s/%s", namespace, podName) + log.V(1).Info("ERROR fetch Pod name out of %s/%s", namespace, podName) return } // Pod namespaced name found, fetch the Pod pod, err := c.podLister.Pods(namespace).Get(podName) if err != nil { - log.V(1).Infof("ERROR get Pod %s/%s", namespace, podName) + log.V(1).Info("ERROR get Pod %s/%s", namespace, podName) return } // Put label on the Pod c.addLabels(&pod.ObjectMeta) if _, err := c.kubeClient.CoreV1().Pods(namespace).Update(pod); err != nil { - log.V(1).Infof("ERROR put label on Pod %s/%s %v", namespace, podName, err) + log.V(1).Info("ERROR put label on Pod %s/%s %v", namespace, podName, err) } // Find parent ReplicaSet @@ -86,21 +85,21 @@ func (c *Controller) labelMyObjectsTree() { if replicaSetName == "" { // ReplicaSet not found - log.V(1).Infof("ERROR ReplicaSet for Pod %s/%s not found", namespace, podName) + log.V(1).Info("ERROR ReplicaSet for Pod %s/%s not found", namespace, podName) return } // ReplicaSet namespaced name found, fetch the ReplicaSet replicaSet, err := c.kubeClient.AppsV1().ReplicaSets(namespace).Get(replicaSetName, v1.GetOptions{}) if err != nil { - log.V(1).Infof("ERROR get ReplicaSet %s/%s %v", namespace, replicaSetName, err) + log.V(1).Info("ERROR get ReplicaSet %s/%s %v", namespace, replicaSetName, err) return } // Put label on the ReplicaSet c.addLabels(&replicaSet.ObjectMeta) if _, err := c.kubeClient.AppsV1().ReplicaSets(namespace).Update(replicaSet); err != nil { - log.V(1).Infof("ERROR put label on ReplicaSet %s/%s %v", namespace, replicaSetName, err) + log.V(1).Info("ERROR put label on ReplicaSet %s/%s %v", namespace, replicaSetName, err) } // Find parent Deployment @@ -116,21 +115,21 @@ func (c *Controller) labelMyObjectsTree() { if deploymentName == "" { // Deployment not found - log.V(1).Infof("ERROR Deployment for %s Pod %s ReplicaSet %s not found", namespace, podName, replicaSetName) + log.V(1).Info("ERROR Deployment for %s Pod %s ReplicaSet %s not found", namespace, podName, replicaSetName) return } // Deployment namespaced name found, fetch the Deployment deployment, err := c.kubeClient.AppsV1().Deployments(namespace).Get(deploymentName, v1.GetOptions{}) if err != nil { - log.V(1).Infof("ERROR get Deployment %s/%s", namespace, deploymentName) + log.V(1).Info("ERROR get Deployment %s/%s", namespace, deploymentName) return } // Put label on the Deployment c.addLabels(&deployment.ObjectMeta) if _, err := c.kubeClient.AppsV1().Deployments(namespace).Update(deployment); err != nil { - log.V(1).Infof("ERROR put label on Deployment %s/%s %v", namespace, deploymentName, err) + log.V(1).Info("ERROR put label on Deployment %s/%s %v", namespace, deploymentName, err) } } diff --git a/pkg/controller/chi/pods.go b/pkg/controller/chi/pods.go index 659a1e148..5c95602a0 100644 --- a/pkg/controller/chi/pods.go +++ b/pkg/controller/chi/pods.go @@ -17,9 +17,7 @@ package chi import ( "k8s.io/api/core/v1" - log "github.com/golang/glog" - // log "k8s.io/klog" - + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopmodel "github.com/altinity/clickhouse-operator/pkg/model" ) @@ -49,7 +47,7 @@ func (c *Controller) walkContainers(host *chop.ChiHost, f func(container *v1.Con name := chopmodel.CreatePodName(host) pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(name, newGetOptions()) if err != nil { - log.Errorf("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) + log.Error("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) return } @@ -64,7 +62,7 @@ func (c *Controller) walkContainerStatuses(host *chop.ChiHost, f func(status *v1 name := chopmodel.CreatePodName(host) pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(name, newGetOptions()) if err != nil { - log.Errorf("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) + log.Error("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) return } diff --git a/pkg/controller/chi/poller.go b/pkg/controller/chi/poller.go index 0b751f045..92c12ed68 100644 --- a/pkg/controller/chi/poller.go +++ b/pkg/controller/chi/poller.go @@ -20,12 +20,10 @@ import ( "fmt" "time" - log "github.com/golang/glog" - // log "k8s.io/klog" - apps "k8s.io/api/apps/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/model" ) @@ -71,24 +69,24 @@ func (c *Controller) waitHostRunning(host *chop.ChiHost) error { for { if c.isHostRunning(host) { // All is good, job done, exit - log.V(1).Infof("waitHostRunning(%s/%s)-OK", namespace, name) + log.V(1).Info("waitHostRunning(%s/%s)-OK", namespace, name) return nil } // Object is found, function not positive if time.Since(start) >= (time.Duration(waitStatefulSetGenerationTimeoutBeforeStartBothering) * time.Second) { // Start bothering with log messages after some time only - log.V(1).Infof("waitHostRunning(%s/%s)-WAIT", namespace, name) + log.V(1).Info("waitHostRunning(%s/%s)-WAIT", namespace, name) } if time.Since(start) >= (time.Duration(c.chop.Config().StatefulSetUpdateTimeout) * time.Second) { // Timeout reached, no good result available, time to quit - log.V(1).Infof("ERROR waitHostRunning(%s/%s) - TIMEOUT reached", namespace, name) + log.V(1).Info("ERROR waitHostRunning(%s/%s) - TIMEOUT reached", namespace, name) return errors.New(fmt.Sprintf("waitHostRunning(%s/%s) - wait timeout", namespace, name)) } // Wait some more time - log.V(2).Infof("waithostRunning(%s/%s)", namespace, name) + log.V(2).Info("waithostRunning(%s/%s)", namespace, name) select { case <-time.After(time.Duration(c.chop.Config().StatefulSetUpdatePollPeriod) * time.Second): } @@ -153,31 +151,31 @@ func (c *Controller) pollStatefulSet(entity interface{}, opts *StatefulSetPollOp // Object is found if f(statefulSet) { // All is good, job done, exit - log.V(1).Infof("pollStatefulSet(%s/%s)-OK :%s", namespace, name, model.StrStatefulSetStatus(&statefulSet.Status)) + log.V(1).Info("pollStatefulSet(%s/%s)-OK :%s", namespace, name, model.StrStatefulSetStatus(&statefulSet.Status)) return nil } // Object is found, but function is not positive if time.Since(start) >= opts.StartBotheringAfterTimeout { // Start bothering with log messages after some time only - log.V(1).Infof("pollStatefulSet(%s/%s)-WAIT:%s", namespace, name, model.StrStatefulSetStatus(&statefulSet.Status)) + log.V(1).Info("pollStatefulSet(%s/%s)-WAIT:%s", namespace, name, model.StrStatefulSetStatus(&statefulSet.Status)) } } else if apierrors.IsNotFound(err) { // Object is not found - it either failed to be created or just still not created if time.Since(start) >= opts.CreateTimeout { // No more wait for object to be created. Consider create as failed. if opts.CreateTimeout > 0 { - log.V(1).Infof("ERROR pollStatefulSet(%s/%s) Get() FAILED - StatefulSet still not found, abort", namespace, name) + log.V(1).Info("ERROR pollStatefulSet(%s/%s) Get() FAILED - StatefulSet still not found, abort", namespace, name) } else { - log.V(1).Infof("pollStatefulSet(%s/%s) Get() NEUTRAL StatefulSet not found and no wait required", namespace, name) + log.V(1).Info("pollStatefulSet(%s/%s) Get() NEUTRAL StatefulSet not found and no wait required", namespace, name) } return err } // Object with such name not found - may be is still being created - wait for it - log.V(1).Infof("pollStatefulSet(%s/%s)-WAIT: object not found. Not created yet?", namespace, name) + log.V(1).Info("pollStatefulSet(%s/%s)-WAIT: object not found. Not created yet?", namespace, name) } else { // Some kind of total error - log.Errorf("ERROR pollStatefulSet(%s/%s) Get() FAILED", namespace, name) + log.Error("ERROR pollStatefulSet(%s/%s) Get() FAILED", namespace, name) return err } @@ -185,12 +183,12 @@ func (c *Controller) pollStatefulSet(entity interface{}, opts *StatefulSetPollOp if time.Since(start) >= opts.Timeout { // Timeout reached, no good result available, time to quit - log.V(1).Infof("ERROR pollStatefulSet(%s/%s) - TIMEOUT reached", namespace, name) + log.V(1).Info("ERROR pollStatefulSet(%s/%s) - TIMEOUT reached", namespace, name) return errors.New(fmt.Sprintf("waitStatefulSet(%s/%s) - wait timeout", namespace, name)) } // Wait some more time - log.V(2).Infof("pollStatefulSet(%s/%s)", namespace, name) + log.V(2).Info("pollStatefulSet(%s/%s)", namespace, name) select { case <-time.After(opts.Interval): } @@ -212,24 +210,24 @@ func (c *Controller) pollHost(host *chop.ChiHost, opts *StatefulSetPollOptions, for { if f(host) { // All is good, job done, exit - log.V(1).Infof("pollHost(%s/%s)-OK", namespace, name) + log.V(1).Info("pollHost(%s/%s)-OK", namespace, name) return nil } // Object is found, but function is not positive if time.Since(start) >= opts.StartBotheringAfterTimeout { // Start bothering with log messages after some time only - log.V(1).Infof("pollHost(%s/%s)-WAIT", namespace, name) + log.V(1).Info("pollHost(%s/%s)-WAIT", namespace, name) } if time.Since(start) >= opts.Timeout { // Timeout reached, no good result available, time to quit - log.V(1).Infof("ERROR pollHost(%s/%s) - TIMEOUT reached", namespace, name) + log.V(1).Info("ERROR pollHost(%s/%s) - TIMEOUT reached", namespace, name) return errors.New(fmt.Sprintf("pollHost(%s/%s) - wait timeout", namespace, name)) } // Wait some more time - log.V(2).Infof("pollHost(%s/%s)", namespace, name) + log.V(2).Info("pollHost(%s/%s)", namespace, name) select { case <-time.After(opts.Interval): } diff --git a/pkg/controller/chi/volumes.go b/pkg/controller/chi/volumes.go index dfe17444a..aecc6097f 100644 --- a/pkg/controller/chi/volumes.go +++ b/pkg/controller/chi/volumes.go @@ -17,9 +17,7 @@ package chi import ( "k8s.io/api/core/v1" - log "github.com/golang/glog" - // log "k8s.io/klog" - + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopmodel "github.com/altinity/clickhouse-operator/pkg/model" ) @@ -29,7 +27,7 @@ func (c *Controller) walkPVCs(host *chop.ChiHost, f func(pvc *v1.PersistentVolum name := chopmodel.CreatePodName(host) pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(name, newGetOptions()) if err != nil { - log.Errorf("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) + log.Error("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) return } @@ -42,7 +40,7 @@ func (c *Controller) walkPVCs(host *chop.ChiHost, f func(pvc *v1.PersistentVolum pvcName := volume.PersistentVolumeClaim.ClaimName pvc, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(pvcName, newGetOptions()) if err != nil { - log.Errorf("FAIL get PVC %s/%s err:%v", namespace, pvcName, err) + log.Error("FAIL get PVC %s/%s err:%v", namespace, pvcName, err) continue } @@ -56,7 +54,7 @@ func (c *Controller) walkActualPVCs(host *chop.ChiHost, f func(pvc *v1.Persisten pvcList, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).List(newListOptions(labeler.GetSelectorHostScope(host))) if err != nil { - log.Errorf("FAIL get list of PVC for host %s/%s err:%v", namespace, host.Name, err) + log.Error("FAIL get list of PVC for host %s/%s err:%v", namespace, host.Name, err) return } @@ -72,7 +70,7 @@ func (c *Controller) walkPVs(host *chop.ChiHost, f func(pv *v1.PersistentVolume) c.walkPVCs(host, func(pvc *v1.PersistentVolumeClaim) { pv, err := c.kubeClient.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, newGetOptions()) if err != nil { - log.Errorf("FAIL get PV %s err:%v", pvc.Spec.VolumeName, err) + log.Error("FAIL get PV %s err:%v", pvc.Spec.VolumeName, err) return } f(pv) diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index 58182dd1e..f87ef7ea2 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -49,7 +49,7 @@ type worker struct { func (c *Controller) newWorker(queue workqueue.RateLimitingInterface) *worker { return &worker{ c: c, - a: NewAnnouncer(c), + a: NewAnnouncer().WithController(c), queue: queue, normalizer: chopmodel.NewNormalizer(c.chop), schemer: chopmodel.NewSchemer( diff --git a/pkg/model/ch_config_generator.go b/pkg/model/ch_config_generator.go index b5c7a429b..abcc7f31a 100644 --- a/pkg/model/ch_config_generator.go +++ b/pkg/model/ch_config_generator.go @@ -17,6 +17,7 @@ package model import ( "bytes" "fmt" + chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" xmlbuilder "github.com/altinity/clickhouse-operator/pkg/model/builder/xml" "github.com/altinity/clickhouse-operator/pkg/util" diff --git a/pkg/model/clickhouse/connection.go b/pkg/model/clickhouse/connection.go index 971bde728..f3809b374 100644 --- a/pkg/model/clickhouse/connection.go +++ b/pkg/model/clickhouse/connection.go @@ -20,8 +20,7 @@ import ( "fmt" "time" - log "github.com/golang/glog" - // log "k8s.io/klog" + log "github.com/altinity/clickhouse-operator/pkg/announcer" _ "github.com/mailru/go-clickhouse" ) @@ -39,11 +38,10 @@ func NewConnection(params *CHConnectionParams) *CHConnection { } func (c *CHConnection) connect() { - - log.V(2).Infof("Establishing connection: %s", c.params.GetDSNWithHiddenCredentials()) + log.V(2).Info("Establishing connection: %s", c.params.GetDSNWithHiddenCredentials()) dbConnection, err := databasesql.Open("clickhouse", c.params.GetDSN()) if err != nil { - log.V(1).Infof("FAILED Open(%s) %v", c.params.GetDSNWithHiddenCredentials(), err) + log.V(1).Info("FAILED Open(%s) %v", c.params.GetDSNWithHiddenCredentials(), err) return } @@ -52,7 +50,7 @@ func (c *CHConnection) connect() { defer cancel() if err := dbConnection.PingContext(ctx); err != nil { - log.V(1).Infof("FAILED Ping(%s) %v", c.params.GetDSNWithHiddenCredentials(), err) + log.V(1).Info("FAILED Ping(%s) %v", c.params.GetDSNWithHiddenCredentials(), err) _ = dbConnection.Close() return } @@ -62,7 +60,7 @@ func (c *CHConnection) connect() { func (c *CHConnection) ensureConnected() bool { if c.conn != nil { - log.V(2).Infof("Already connected: %s", c.params.GetDSNWithHiddenCredentials()) + log.V(2).Info("Already connected: %s", c.params.GetDSNWithHiddenCredentials()) return true } @@ -89,7 +87,7 @@ func (q *Query) Close() { err := q.Rows.Close() q.Rows = nil if err != nil { - log.V(1).Infof("UNABLE to close rows. err: %v", err) + log.V(1).Info("UNABLE to close rows. err: %v", err) } } @@ -122,7 +120,7 @@ func (c *CHConnection) Query(sql string) (*Query, error) { return nil, err } - log.V(2).Infof("clickhouse.QueryContext():'%s'", sql) + log.V(2).Info("clickhouse.QueryContext():'%s'", sql) return &Query{ ctx: ctx, @@ -149,11 +147,11 @@ func (c *CHConnection) Exec(sql string) error { _, err := c.conn.ExecContext(ctx, sql) if err != nil { - log.V(1).Infof("FAILED Exec(%s) %v for SQL: %s", c.params.GetDSNWithHiddenCredentials(), err, sql) + log.V(1).Info("FAILED Exec(%s) %v for SQL: %s", c.params.GetDSNWithHiddenCredentials(), err, sql) return err } - log.V(2).Infof("clickhouse.Exec():\n", sql) + log.V(2).Info("clickhouse.Exec():\n", sql) return nil } diff --git a/pkg/model/clickhouse/pool.go b/pkg/model/clickhouse/pool.go index 1d3fdd71f..56d5463ee 100644 --- a/pkg/model/clickhouse/pool.go +++ b/pkg/model/clickhouse/pool.go @@ -15,8 +15,7 @@ package clickhouse import ( - log "github.com/golang/glog" - // log "k8s.io/klog" + log "github.com/altinity/clickhouse-operator/pkg/announcer" "sync" ) @@ -32,7 +31,7 @@ func GetPooledDBConnection(params *CHConnectionParams) *CHConnection { key := makePoolKey(params) if connection, existed := dbConnectionPool.Load(key); existed { - log.V(2).Infof("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) + log.V(2).Info("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) return connection.(*CHConnection) } @@ -43,16 +42,16 @@ func GetPooledDBConnection(params *CHConnectionParams) *CHConnection { // Double check for race condition if connection, existed := dbConnectionPool.Load(key); existed { - log.V(2).Infof("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) + log.V(2).Info("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) return connection.(*CHConnection) } - log.V(2).Infof("Add connection to the pool: %s", params.GetDSNWithHiddenCredentials()) + log.V(2).Info("Add connection to the pool: %s", params.GetDSNWithHiddenCredentials()) dbConnectionPool.Store(key, NewConnection(params)) // Fetch from the pool if connection, existed := dbConnectionPool.Load(key); existed { - log.V(2).Infof("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) + log.V(2).Info("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) return connection.(*CHConnection) } diff --git a/pkg/model/creator.go b/pkg/model/creator.go index ab06d85a8..5e6109a58 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -18,13 +18,12 @@ import ( "fmt" // "net/url" - log "github.com/golang/glog" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - // log "k8s.io/klog" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" "github.com/altinity/clickhouse-operator/pkg/util" @@ -53,7 +52,7 @@ func NewCreator( func (c *Creator) CreateServiceCHI() *corev1.Service { serviceName := CreateCHIServiceName(c.chi) - log.V(1).Infof("CreateServiceCHI(%s/%s)", c.chi.Namespace, serviceName) + log.V(1).Info("CreateServiceCHI(%s/%s)", c.chi.Namespace, serviceName) if template, ok := c.chi.GetCHIServiceTemplate(); ok { // .templates.ServiceTemplate specified return c.createServiceFromTemplate( @@ -100,7 +99,7 @@ func (c *Creator) CreateServiceCHI() *corev1.Service { func (c *Creator) CreateServiceCluster(cluster *chiv1.ChiCluster) *corev1.Service { serviceName := CreateClusterServiceName(cluster) - log.V(1).Infof("CreateServiceCluster(%s/%s)", cluster.Address.Namespace, serviceName) + log.V(1).Info("CreateServiceCluster(%s/%s)", cluster.Address.Namespace, serviceName) if template, ok := cluster.GetServiceTemplate(); ok { // .templates.ServiceTemplate specified return c.createServiceFromTemplate( @@ -119,7 +118,7 @@ func (c *Creator) CreateServiceCluster(cluster *chiv1.ChiCluster) *corev1.Servic func (c *Creator) CreateServiceShard(shard *chiv1.ChiShard) *corev1.Service { serviceName := CreateShardServiceName(shard) - log.V(1).Infof("CreateServiceShard(%s/%s)", shard.Address.Namespace, serviceName) + log.V(1).Info("CreateServiceShard(%s/%s)", shard.Address.Namespace, serviceName) if template, ok := shard.GetServiceTemplate(); ok { // .templates.ServiceTemplate specified return c.createServiceFromTemplate( @@ -139,7 +138,7 @@ func (c *Creator) CreateServiceHost(host *chiv1.ChiHost) *corev1.Service { serviceName := CreateStatefulSetServiceName(host) statefulSetName := CreateStatefulSetName(host) - log.V(1).Infof("CreateServiceHost(%s/%s) for Set %s", host.Address.Namespace, serviceName, statefulSetName) + log.V(1).Info("CreateServiceHost(%s/%s) for Set %s", host.Address.Namespace, serviceName, statefulSetName) if template, ok := host.GetServiceTemplate(); ok { // .templates.ServiceTemplate specified return c.createServiceFromTemplate( @@ -194,7 +193,7 @@ func (c *Creator) verifyServiceTemplatePorts(template *chiv1.ChiServiceTemplate) servicePort := &template.Spec.Ports[i] if (servicePort.Port < 1) || (servicePort.Port > 65535) { msg := fmt.Sprintf("verifyServiceTemplatePorts(%s) INCORRECT PORT: %d ", template.Name, servicePort.Port) - log.V(1).Infof(msg) + log.V(1).Info(msg) return fmt.Errorf(msg) } } @@ -386,7 +385,7 @@ func (c *Creator) personalizeStatefulSetTemplate(statefulSet *apps.StatefulSet, // In case we have default LogVolumeClaimTemplate specified - need to append log container to Pod Template if host.Templates.LogVolumeClaimTemplate != "" { addContainer(&statefulSet.Spec.Template.Spec, newDefaultLogContainer()) - log.V(1).Infof("setupStatefulSetPodTemplate() add log container for statefulSet %s", statefulSetName) + log.V(1).Info("setupStatefulSetPodTemplate() add log container for statefulSet %s", statefulSetName) } } @@ -400,11 +399,11 @@ func (c *Creator) getPodTemplate(host *chiv1.ChiHost) *chiv1.ChiPodTemplate { // Host references known PodTemplate // Make local copy of this PodTemplate, in order not to spoil the original common-used template podTemplate = podTemplate.DeepCopy() - log.V(1).Infof("getPodTemplate() statefulSet %s use custom template %s", statefulSetName, podTemplate.Name) + log.V(1).Info("getPodTemplate() statefulSet %s use custom template %s", statefulSetName, podTemplate.Name) } else { // Host references UNKNOWN PodTemplate, will use default one podTemplate = c.newDefaultPodTemplate(statefulSetName) - log.V(1).Infof("getPodTemplate() statefulSet %s use default generated template", statefulSetName) + log.V(1).Info("getPodTemplate() statefulSet %s use default generated template", statefulSetName) } // Here we have local copy of Pod Template, to be used to create StatefulSet @@ -649,14 +648,14 @@ func (c *Creator) setupStatefulSetApplyVolumeMount( // 3. Specified (by volumeClaimTemplateName) VolumeClaimTemplate has to be available as well if _, ok := c.chi.GetVolumeClaimTemplate(volumeClaimTemplateName); !ok { // Incorrect/unknown .templates.VolumeClaimTemplate specified - log.V(1).Infof("Can not find volumeClaimTemplate %s. Volume claim can not be mounted", volumeClaimTemplateName) + log.V(1).Info("Can not find volumeClaimTemplate %s. Volume claim can not be mounted", volumeClaimTemplateName) return nil } // 4. Specified container has to be available container := getContainerByName(statefulSet, containerName) if container == nil { - log.V(1).Infof("Can not find container %s. Volume claim can not be mounted", containerName) + log.V(1).Info("Can not find container %s. Volume claim can not be mounted", containerName) return nil } @@ -675,7 +674,7 @@ func (c *Creator) setupStatefulSetApplyVolumeMount( // 1. Check whether this VolumeClaimTemplate is already listed in VolumeMount of this container if volumeMount.Name == existingVolumeMount.Name { // This .templates.VolumeClaimTemplate is already used in VolumeMount - log.V(1).Infof( + log.V(1).Info( "setupStatefulSetApplyVolumeClaim(%s) container %s volumeClaimTemplateName %s already used", statefulSet.Name, container.Name, @@ -687,7 +686,7 @@ func (c *Creator) setupStatefulSetApplyVolumeMount( // 2. Check whether `mountPath` (say '/var/lib/clickhouse') is already mounted if volumeMount.MountPath == existingVolumeMount.MountPath { // `mountPath` (say /var/lib/clickhouse) is already mounted - log.V(1).Infof( + log.V(1).Info( "setupStatefulSetApplyVolumeClaim(%s) container %s mountPath %s already used", statefulSet.Name, container.Name, @@ -709,7 +708,7 @@ func (c *Creator) setupStatefulSetApplyVolumeMount( ) } - log.V(1).Infof("setupStatefulSetApplyVolumeClaim(%s) container %s mounted %s on %s", + log.V(1).Info("setupStatefulSetApplyVolumeClaim(%s) container %s mounted %s on %s", statefulSet.Name, container.Name, volumeMount.Name, diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go index 59da6cf34..d4996bf43 100644 --- a/pkg/model/labeler.go +++ b/pkg/model/labeler.go @@ -16,13 +16,15 @@ package model import ( "fmt" + + "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + kublabels "k8s.io/apimachinery/pkg/labels" + "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com" chi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" "github.com/altinity/clickhouse-operator/pkg/util" - "k8s.io/api/core/v1" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" - kublabels "k8s.io/apimachinery/pkg/labels" ) const ( diff --git a/pkg/model/namer.go b/pkg/model/namer.go index 2c9221ddb..5eec606c6 100644 --- a/pkg/model/namer.go +++ b/pkg/model/namer.go @@ -16,13 +16,14 @@ package model import ( "fmt" - "k8s.io/api/core/v1" "strconv" "strings" + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/util" - apps "k8s.io/api/apps/v1" ) const ( diff --git a/pkg/model/normalizer.go b/pkg/model/normalizer.go index 4bb46ef56..2f32e28af 100644 --- a/pkg/model/normalizer.go +++ b/pkg/model/normalizer.go @@ -20,16 +20,14 @@ import ( "fmt" "strings" - log "github.com/golang/glog" - // log "k8s.io/klog" + log "github.com/altinity/clickhouse-operator/pkg/announcer" + chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + "github.com/altinity/clickhouse-operator/pkg/chop" + "github.com/altinity/clickhouse-operator/pkg/util" "gopkg.in/d4l3k/messagediff.v1" "k8s.io/api/core/v1" v12 "k8s.io/apimachinery/pkg/apis/meta/v1" - - chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" - "github.com/altinity/clickhouse-operator/pkg/chop" - "github.com/altinity/clickhouse-operator/pkg/util" ) // Normalizer @@ -71,12 +69,16 @@ func (n *Normalizer) CreateTemplatedCHI(chi *chiv1.ClickHouseInstallation, withD var useTemplates []chiv1.ChiUseTemplate - for _, template := range n.chop.Config().FindAutoTemplates() { - useTemplates = append(useTemplates, chiv1.ChiUseTemplate{ - Name: template.Name, - Namespace: template.Namespace, - UseType: useTypeMerge, - }) + if autoTemplates := n.chop.Config().FindAutoTemplates(); len(autoTemplates) > 0 { + log.V(1).Info("Adding %d auto-templates", len(autoTemplates)) + for _, template := range autoTemplates { + log.V(1).Info("Add %s/%s auto-template", template.Name, template.Namespace) + useTemplates = append(useTemplates, chiv1.ChiUseTemplate{ + Name: template.Name, + Namespace: template.Namespace, + UseType: useTypeMerge, + }) + } } if len(chi.Spec.UseTemplates) > 0 { @@ -91,10 +93,10 @@ func (n *Normalizer) CreateTemplatedCHI(chi *chiv1.ClickHouseInstallation, withD for i := range useTemplates { useTemplate := &useTemplates[i] if template := n.chop.Config().FindTemplate(useTemplate, chi.Namespace); template == nil { - log.V(1).Infof("UNABLE to find template %s/%s referenced in useTemplates. Skip it.", useTemplate.Namespace, useTemplate.Name) + log.V(1).Info("UNABLE to find template %s/%s referenced in useTemplates. Skip it.", useTemplate.Namespace, useTemplate.Name) } else { (&n.chi.Spec).MergeFrom(&template.Spec, chiv1.MergeTypeOverrideByNonEmptyValues) - log.V(2).Infof("Merge template %s/%s referenced in useTemplates", useTemplate.Namespace, useTemplate.Name) + log.V(2).Info("Merge template %s/%s referenced in useTemplates", useTemplate.Namespace, useTemplate.Name) } } @@ -182,7 +184,7 @@ func (n *Normalizer) getHostTemplate(host *chiv1.ChiHost) *chiv1.ChiHostTemplate hostTemplate, ok := host.GetHostTemplate() if ok { // Host references known HostTemplate - log.V(2).Infof("getHostTemplate() statefulSet %s use custom host template %s", statefulSetName, hostTemplate.Name) + log.V(2).Info("getHostTemplate() statefulSet %s use custom host template %s", statefulSetName, hostTemplate.Name) return hostTemplate } @@ -203,7 +205,7 @@ func (n *Normalizer) getHostTemplate(host *chiv1.ChiHost) *chiv1.ChiHostTemplate hostTemplate = newDefaultHostTemplate(statefulSetName) } - log.V(3).Infof("getHostTemplate() statefulSet %s use default host template", statefulSetName) + log.V(3).Info("getHostTemplate() statefulSet %s use default host template", statefulSetName) return hostTemplate } diff --git a/pkg/model/schemer.go b/pkg/model/schemer.go index 98fa84d34..1d4441e8c 100644 --- a/pkg/model/schemer.go +++ b/pkg/model/schemer.go @@ -19,9 +19,8 @@ import ( "strings" "github.com/MakeNowJust/heredoc" - log "github.com/golang/glog" - // log "k8s.io/klog" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/model/clickhouse" "github.com/altinity/clickhouse-operator/pkg/util" @@ -71,18 +70,18 @@ func (s *Schemer) getObjectListFromClickHouse(endpoints []string, sql string) ([ // Fetch data from any of specified services var query *clickhouse.Query = nil for _, endpoint := range endpoints { - log.V(1).Infof("Run query on: %s of %v", endpoint, endpoints) + log.V(1).Info("Run query on: %s of %v", endpoint, endpoints) query, err = s.getCHConnection(endpoint).Query(sql) if err == nil { // One of specified services returned result, no need to iterate more break } else { - log.V(1).Infof("Run query on: %s of %v FAILED skip to next. err: %v", endpoint, endpoints, err) + log.V(1).Info("Run query on: %s of %v FAILED skip to next. err: %v", endpoint, endpoints, err) } } if err != nil { - log.V(1).Infof("Run query FAILED on all %v", endpoints) + log.V(1).Info("Run query FAILED on all %v", endpoints) return nil, nil, err } @@ -95,7 +94,7 @@ func (s *Schemer) getObjectListFromClickHouse(endpoints []string, sql string) ([ names = append(names, name) statements = append(statements, statement) } else { - log.V(1).Infof("UNABLE to scan row err: %v", err) + log.V(1).Info("UNABLE to scan row err: %v", err) } } @@ -123,7 +122,7 @@ func (s *Schemer) getCreateDistributedObjects(host *chop.ChiHost) ([]string, []s // remove new host from the list. See https://stackoverflow.com/questions/37334119/how-to-delete-an-element-from-a-slice-in-golang hosts[hostIndex] = hosts[nHosts-1] hosts = hosts[:nHosts-1] - log.V(1).Infof("Extracting distributed table definitions from hosts: %v", hosts) + log.V(1).Info("Extracting distributed table definitions from hosts: %v", hosts) cluster_tables := fmt.Sprintf("remote('%s', system, tables)", strings.Join(hosts, ",")) @@ -172,28 +171,28 @@ func (s *Schemer) getCreateDistributedObjects(host *chop.ChiHost) ([]string, []s cluster_tables, )) - log.V(1).Infof("fetch dbs list") - log.V(1).Infof("dbs sql\n%v", sqlDBs) + log.V(1).Info("fetch dbs list") + log.V(1).Info("dbs sql\n%v", sqlDBs) names1, sqlStatements1, _ := s.getObjectListFromClickHouse(CreatePodFQDNsOfCHI(host.GetCHI()), sqlDBs) - log.V(1).Infof("names1:") + log.V(1).Info("names1:") for _, v := range names1 { - log.V(1).Infof("names1: %s", v) + log.V(1).Info("names1: %s", v) } - log.V(1).Infof("sql1:") + log.V(1).Info("sql1:") for _, v := range sqlStatements1 { - log.V(1).Infof("sql1: %s", v) + log.V(1).Info("sql1: %s", v) } - log.V(1).Infof("fetch table list") - log.V(1).Infof("tbl sql\n%v", sqlTables) + log.V(1).Info("fetch table list") + log.V(1).Info("tbl sql\n%v", sqlTables) names2, sqlStatements2, _ := s.getObjectListFromClickHouse(CreatePodFQDNsOfCHI(host.GetCHI()), sqlTables) - log.V(1).Infof("names2:") + log.V(1).Info("names2:") for _, v := range names2 { - log.V(1).Infof("names2: %s", v) + log.V(1).Info("names2: %s", v) } - log.V(1).Infof("sql2:") + log.V(1).Info("sql2:") for _, v := range sqlStatements2 { - log.V(1).Infof("sql2: %s", v) + log.V(1).Info("sql2: %s", v) } return append(names1, names2...), append(sqlStatements1, sqlStatements2...), nil @@ -226,7 +225,7 @@ func (s *Schemer) getCreateReplicaObjects(host *chop.ChiHost) ([]string, []strin // remove new replica from the list. See https://stackoverflow.com/questions/37334119/how-to-delete-an-element-from-a-slice-in-golang replicas[replicaIndex] = replicas[nReplicas-1] replicas = replicas[:nReplicas-1] - log.V(1).Infof("Extracting replicated table definitions from %v", replicas) + log.V(1).Info("Extracting replicated table definitions from %v", replicas) system_tables := fmt.Sprintf("remote('%s', system, tables)", strings.Join(replicas, ",")) @@ -274,28 +273,28 @@ func (s *Schemer) hostGetDropTables(host *chop.ChiHost) ([]string, []string, err // HostDeleteTables func (s *Schemer) HostDeleteTables(host *chop.ChiHost) error { tableNames, dropTableSQLs, _ := s.hostGetDropTables(host) - log.V(1).Infof("Drop tables: %v as %v", tableNames, dropTableSQLs) + log.V(1).Info("Drop tables: %v as %v", tableNames, dropTableSQLs) return s.hostApplySQLs(host, dropTableSQLs, false) } // HostCreateTables func (s *Schemer) HostCreateTables(host *chop.ChiHost) error { - log.V(1).Infof("Migrating schema objects to host %s", host.Address.HostName) + log.V(1).Info("Migrating schema objects to host %s", host.Address.HostName) var err1, err2 error if names, createSQLs, err := s.getCreateReplicaObjects(host); err == nil { if len(createSQLs) > 0 { - log.V(1).Infof("Creating replica objects at %s: %v", host.Address.HostName, names) - log.V(1).Infof("\n%v", createSQLs) + log.V(1).Info("Creating replica objects at %s: %v", host.Address.HostName, names) + log.V(1).Info("\n%v", createSQLs) err1 = s.hostApplySQLs(host, createSQLs, true) } } if names, createSQLs, err := s.getCreateDistributedObjects(host); err == nil { if len(createSQLs) > 0 { - log.V(1).Infof("Creating distributed objects at %s: %v", host.Address.HostName, names) - log.V(1).Infof("\n%v", createSQLs) + log.V(1).Info("Creating distributed objects at %s: %v", host.Address.HostName, names) + log.V(1).Info("\n%v", createSQLs) err2 = s.hostApplySQLs(host, createSQLs, true) } } @@ -362,7 +361,7 @@ func (s *Schemer) applySQLs(hosts []string, sqls []string, retry bool) error { for _, host := range hosts { conn := s.getCHConnection(host) if conn == nil { - log.V(1).Infof("Unable to get conn to host %s", host) + log.V(1).Info("Unable to get conn to host %s", host) continue } err := util.Retry(maxTries, "Applying sqls", func() error { diff --git a/pkg/util/fs.go b/pkg/util/fs.go index 89a4e7026..7e0843089 100644 --- a/pkg/util/fs.go +++ b/pkg/util/fs.go @@ -15,9 +15,6 @@ package util import ( - log "github.com/golang/glog" - // log "k8s.io/klog" - "io/ioutil" "os" "path/filepath" @@ -55,7 +52,6 @@ func ReadFilesIntoMap(path string, isOurFile func(string) bool) map[string]strin file := matches[i] if isOurFile(file) { // Pick our files only - log.V(2).Infof("Reading file %s\n", file) if content, err := ioutil.ReadFile(file); (err == nil) && (len(content) > 0) { // File content read successfully and file has some content if files == nil { diff --git a/pkg/util/retry.go b/pkg/util/retry.go index 143b3114c..eb46964fd 100644 --- a/pkg/util/retry.go +++ b/pkg/util/retry.go @@ -15,10 +15,9 @@ package util import ( - log "github.com/golang/glog" - // log "k8s.io/klog" - "time" + + log "github.com/altinity/clickhouse-operator/pkg/announcer" ) // Retry @@ -30,7 +29,7 @@ func Retry(tries int, desc string, f func() error) error { // All ok, no need to retry more if try > 1 { // Done, but after some retries, this is not 'clean' - log.V(1).Infof("DONE attempt %d of %d: %s", try, tries, desc) + log.V(1).Info("DONE attempt %d of %d: %s", try, tries, desc) } return nil } @@ -38,16 +37,16 @@ func Retry(tries int, desc string, f func() error) error { if try < tries { // Try failed, need to sleep and retry seconds := try * 5 - log.V(1).Infof("FAILED attempt %d of %d, sleep %d sec and retry: %s", try, tries, seconds, desc) + log.V(1).Info("FAILED attempt %d of %d, sleep %d sec and retry: %s", try, tries, seconds, desc) select { case <-time.After(time.Duration(seconds) * time.Second): } } else if tries == 1 { // On single try do not put so much emotion. It just failed and user is not intended to retry - log.V(1).Infof("FAILED single try. No retries will be made for %s", desc) + log.V(1).Info("FAILED single try. No retries will be made for %s", desc) } else { // On last try no need to wait more - log.V(1).Infof("FAILED AND ABORT. All %d attempts: %s", tries, desc) + log.V(1).Info("FAILED AND ABORT. All %d attempts: %s", tries, desc) } } diff --git a/tests/test.py b/tests/test.py index b8a1e5792..60d2b62f8 100644 --- a/tests/test.py +++ b/tests/test.py @@ -49,7 +49,7 @@ test_operator.test_006, test_operator.test_007, test_operator.test_008, - (test_operator.test_009, {"version_from": "0.12.0"}), + (test_operator.test_009, {"version_from": "0.13.0"}), test_operator.test_010, test_operator.test_011, test_operator.test_011_1, From baf1d84ed8d9fa10e4010bd03d2b72204a3d7e2c Mon Sep 17 00:00:00 2001 From: Benjamin Brombach Date: Fri, 22 Jan 2021 18:41:35 +0100 Subject: [PATCH 11/78] #634 Checking if also password_double_sha1_hex is present to not create an additional password_sha256_hex --- pkg/model/normalizer.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/model/normalizer.go b/pkg/model/normalizer.go index 2f32e28af..c57435370 100644 --- a/pkg/model/normalizer.go +++ b/pkg/model/normalizer.go @@ -1282,8 +1282,9 @@ func (n *Normalizer) normalizeConfigurationUsers(users *chiv1.Settings) { } _, okPasswordSHA256 := (*users)[username+"/password_sha256_hex"] - // if SHA256 is not set, initialize it from the password - if pass != "" && !okPasswordSHA256 { + _, okPasswordDoubleSHA1 := (*users)[username+"/password_double_sha1_hex"] + // if SHA256 or DoubleSHA1 are not set, initialize SHA256 from the password + if pass != "" && !okPasswordSHA256 && !okPasswordDoubleSHA1 { pass_sha256 := sha256.Sum256([]byte(pass)) (*users)[username+"/password_sha256_hex"] = chiv1.NewScalarSetting(hex.EncodeToString(pass_sha256[:])) okPasswordSHA256 = true From 6f6303548dc43b2110b9f8ea15f8d7c948fa8287 Mon Sep 17 00:00:00 2001 From: alz Date: Mon, 25 Jan 2021 13:51:55 +0300 Subject: [PATCH 12/78] Improved test_025 --- tests/test_operator.py | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/tests/test_operator.py b/tests/test_operator.py index 39a23d966..f932ec944 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -1332,23 +1332,33 @@ def test_025(): ".status.containerStatuses[0].ready", "true", backoff = 1 ) - tables_notready_cnt = 0 - data_notready_cnt = 0 + start_time = time.time() + lb_error_time = start_time + distr_lb_error_time = start_time + latent_replica_time = start_time for i in range(1, 100): - cnt_local = clickhouse.query_with_error(chi, "select count() from test_local", "chi-test-025-rescaling-default-0-1.test.svc.cluster.local") - cnt_distr = clickhouse.query_with_error(chi, "select count() from test_distr", "chi-test-025-rescaling-default-0-1.test.svc.cluster.local") - if "Exception" in cnt_local: - tables_notready_cnt = tables_notready_cnt + 1 - print("Exception. Waiting 1 second.") - else: - print(f"local: {cnt_local}, distr: {cnt_distr}") + cnt_local = clickhouse.query_with_error(chi, "select count() from test_local", "chi-test-025-rescaling-default-0-1.test.svc.cluster.local") + cnt_lb = clickhouse.query_with_error(chi, "select count() from test_local") + cnt_distr_lb = clickhouse.query_with_error(chi, "select count() from test_distr") + if "Exception" in cnt_lb or cnt_lb == 0: + lb_error_time = time.time() + if "Exception" in cnt_distr_lb or cnt_distr_lb == 0: + distr_lb_error_time = time.time() + print(f"local via loadbalancer: {cnt_lb}, distributed via loadbalancer: {cnt_distr_lb}") + if "Exception" not in cnt_local: + print(f"local: {cnt_local}, distr: {cnt_distr_lb}") if cnt_local == numbers: break - data_notready_cnt = data_notready_cnt + 1 - print("Replicated table did not catch up. Waiting 1 second.") + latent_replica_time = time.time() + print("Replicated table did not catch up") + print("Waiting 1 second.") time.sleep(1) - data_notready_cnt += tables_notready_cnt - print(f"Tables not ready: {tables_notready_cnt}s, data not ready: {data_notready_cnt}s") + print(f"Tables not ready: {round(distr_lb_error_time - start_time)}s, data not ready: {round(latent_replica_time - distr_lb_error_time)}s") + + with Then("Query to the distributed table via load balancer should never fail"): + assert round(distr_lb_error_time - start_time) == 0 + with And("Query to the local table via load balancer should never fail"): + assert round(lb_error_time - start_time) == 0 kubectl.delete_chi(chi) \ No newline at end of file From fd31f7d1e5a670c8ba914129c9d3e4bdb6e8b70e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 25 Jan 2021 17:00:42 +0300 Subject: [PATCH 13/78] dev: initial steps to log address --- cmd/operator/app/clickhouse_operator.go | 21 ++-- pkg/announcer/announcer.go | 158 ++++++++++++++++++++---- pkg/model/schemer.go | 4 +- pkg/util/retry.go | 12 +- pkg/util/runtime.go | 47 +++++++ 5 files changed, 199 insertions(+), 43 deletions(-) create mode 100644 pkg/util/runtime.go diff --git a/cmd/operator/app/clickhouse_operator.go b/cmd/operator/app/clickhouse_operator.go index 272124abf..09201c0d5 100644 --- a/cmd/operator/app/clickhouse_operator.go +++ b/cmd/operator/app/clickhouse_operator.go @@ -24,16 +24,13 @@ import ( "syscall" "time" + kubeinformers "k8s.io/client-go/informers" + + log "github.com/altinity/clickhouse-operator/pkg/announcer" "github.com/altinity/clickhouse-operator/pkg/chop" + chopinformers "github.com/altinity/clickhouse-operator/pkg/client/informers/externalversions" "github.com/altinity/clickhouse-operator/pkg/controller/chi" "github.com/altinity/clickhouse-operator/pkg/version" - - chopinformers "github.com/altinity/clickhouse-operator/pkg/client/informers/externalversions" - - log "github.com/golang/glog" - // log "k8s.io/klog" - - kubeinformers "k8s.io/client-go/informers" ) // Prometheus exporter defaults @@ -87,12 +84,15 @@ func Run() { os.Exit(0) } + log.S().Info("operator") + defer log.E().Info("operator") + if debugRequest { kubeInformerFactoryResyncPeriod = defaultInformerFactoryResyncDebugPeriod chopInformerFactoryResyncPeriod = defaultInformerFactoryResyncDebugPeriod } - log.Infof("Starting clickhouse-operator. Version:%s GitSHA:%s BuiltAt:%s\n", version.Version, version.GitSHA, version.BuiltAt) + log.A().Info("Starting clickhouse-operator. Version:%s GitSHA:%s BuiltAt:%s", version.Version, version.GitSHA, version.BuiltAt) // Initialize k8s API clients kubeClient, chopClient := chop.GetClientset(kubeConfigFile, masterURL) @@ -100,10 +100,9 @@ func Run() { // Create operator instance chop := chop.GetCHOp(chopClient, chopConfigFile) chop.SetupLog() + log.V(1).A().Info("Log options parsed") log.Info(chop.Config().String(true)) - log.V(1).Infof("Log options parsed\n") - // Create Informers kubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions( kubeClient, @@ -145,7 +144,7 @@ func Run() { // // Start Controller // - log.V(1).Info("Starting CHI controller\n") + log.V(1).A().Info("Starting CHI controller") wg := &sync.WaitGroup{} wg.Add(1) go func() { diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go index fcfa966c3..f0576fb47 100644 --- a/pkg/announcer/announcer.go +++ b/pkg/announcer/announcer.go @@ -15,7 +15,11 @@ package announcer import ( + "strconv" + log "github.com/golang/glog" + + "github.com/altinity/clickhouse-operator/pkg/util" ) // Announcer handler all log/event/status messages going outside of controller/worker @@ -24,6 +28,16 @@ type Announcer struct { // writeLog specifies whether to write log file writeLog bool + + // file specifies file where logger is called from + file string + // line specifies line where logger is called from + line int + // function specifies function where logger is called from + function string + + // prefix specifies prefix used by logger + prefix string } // announcer which would be used in top-level functions, can be called as default @@ -34,6 +48,8 @@ func init() { announcer = New() } +const skip = "announcer.go" + // New creates new announcer func New() Announcer { return Announcer{ @@ -54,22 +70,109 @@ func V(level log.Level) Announcer { return announcer.V(level) } +func (a Announcer) F() Announcer { + b := a + b.writeLog = true + _, _, b.function = util.Caller(skip) + return b +} + +func F() Announcer { + return announcer.F() +} + +func (a Announcer) L() Announcer { + b := a + b.writeLog = true + _, b.line, _ = util.Caller(skip) + return b +} + +func L() Announcer { + return announcer.L() +} + +func (a Announcer) FL() Announcer { + b := a + b.writeLog = true + b.file, _, _ = util.Caller(skip) + return b +} + +func FL() Announcer { + return announcer.FL() +} + +func (a Announcer) A() Announcer { + b := a + b.writeLog = true + b.file, b.line, b.function = util.Caller(skip) + return b +} + +func A() Announcer { + return announcer.A() +} + +func (a Announcer) S() Announcer { + b := a + b.writeLog = true + b.prefix = "start" + b.file, b.line, b.function = util.Caller(skip) + return b +} + +func S() Announcer { + return announcer.S() +} + +func (a Announcer) E() Announcer { + b := a + b.writeLog = true + b.prefix = "end" + b.file, b.line, b.function = util.Caller(skip) + return b +} + +func E() Announcer { + return announcer.E() +} + +func (a Announcer) prependFormat(format string) string { + if a.prefix != "" { + format = a.prefix + ":" + format + } + if a.function != "" { + format = a.function + ":" + format + } + if a.line != 0 { + format = strconv.Itoa(a.line) + ":" + format + } + if a.file != "" { + format = a.file + ":" + format + } + return format +} + // Info is inspired by log.Infof() func (a Announcer) Info(format string, args ...interface{}) { // Produce classic log line - if a.writeLog { - if a.v > 0 { - if len(args) > 0 { - log.V(a.v).Infof(format, args...) - } else { - log.V(a.v).Info(format) - } + if !a.writeLog { + return + } + + format = a.prependFormat(format) + if a.v > 0 { + if len(args) > 0 { + log.V(a.v).Infof(format, args...) } else { - if len(args) > 0 { - log.Infof(format, args...) - } else { - log.Info(format) - } + log.V(a.v).Info(format) + } + } else { + if len(args) > 0 { + log.Infof(format, args...) + } else { + log.Info(format) } } } @@ -82,12 +185,15 @@ func Info(format string, args ...interface{}) { // Warning is inspired by log.Warningf() func (a Announcer) Warning(format string, args ...interface{}) { // Produce classic log line - if a.writeLog { - if len(args) > 0 { - log.Warningf(format, args...) - } else { - log.Warning(format) - } + if !a.writeLog { + return + } + + format = a.prependFormat(format) + if len(args) > 0 { + log.Warningf(format, args...) + } else { + log.Warning(format) } } @@ -99,12 +205,15 @@ func Warning(format string, args ...interface{}) { // Error is inspired by log.Errorf() func (a Announcer) Error(format string, args ...interface{}) { // Produce classic log line - if a.writeLog { - if len(args) > 0 { - log.Errorf(format, args...) - } else { - log.Error(format) - } + if !a.writeLog { + return + } + + format = a.prependFormat(format) + if len(args) > 0 { + log.Errorf(format, args...) + } else { + log.Error(format) } } @@ -115,6 +224,7 @@ func Error(format string, args ...interface{}) { // Fatal is inspired by log.Fatalf() func (a Announcer) Fatal(format string, args ...interface{}) { + format = a.prependFormat(format) // Write and exit if len(args) > 0 { log.Fatalf(format, args...) diff --git a/pkg/model/schemer.go b/pkg/model/schemer.go index 1d4441e8c..8460a7bd6 100644 --- a/pkg/model/schemer.go +++ b/pkg/model/schemer.go @@ -388,7 +388,9 @@ func (s *Schemer) applySQLs(hosts []string, sqls []string, retry bool) error { return errors[0] } return nil - }) + }, + log.V(1).Info, + ) if err != nil { errors = append(errors, err) diff --git a/pkg/util/retry.go b/pkg/util/retry.go index eb46964fd..a4db22570 100644 --- a/pkg/util/retry.go +++ b/pkg/util/retry.go @@ -16,12 +16,10 @@ package util import ( "time" - - log "github.com/altinity/clickhouse-operator/pkg/announcer" ) // Retry -func Retry(tries int, desc string, f func() error) error { +func Retry(tries int, desc string, f func() error, log func(format string, args ...interface{})) error { var err error for try := 1; try <= tries; try++ { err = f() @@ -29,7 +27,7 @@ func Retry(tries int, desc string, f func() error) error { // All ok, no need to retry more if try > 1 { // Done, but after some retries, this is not 'clean' - log.V(1).Info("DONE attempt %d of %d: %s", try, tries, desc) + log("DONE attempt %d of %d: %s", try, tries, desc) } return nil } @@ -37,16 +35,16 @@ func Retry(tries int, desc string, f func() error) error { if try < tries { // Try failed, need to sleep and retry seconds := try * 5 - log.V(1).Info("FAILED attempt %d of %d, sleep %d sec and retry: %s", try, tries, seconds, desc) + log("FAILED attempt %d of %d, sleep %d sec and retry: %s", try, tries, seconds, desc) select { case <-time.After(time.Duration(seconds) * time.Second): } } else if tries == 1 { // On single try do not put so much emotion. It just failed and user is not intended to retry - log.V(1).Info("FAILED single try. No retries will be made for %s", desc) + log("FAILED single try. No retries will be made for %s", desc) } else { // On last try no need to wait more - log.V(1).Info("FAILED AND ABORT. All %d attempts: %s", tries, desc) + log("FAILED AND ABORT. All %d attempts: %s", tries, desc) } } diff --git a/pkg/util/runtime.go b/pkg/util/runtime.go new file mode 100644 index 000000000..d34bb0b98 --- /dev/null +++ b/pkg/util/runtime.go @@ -0,0 +1,47 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "path" + "runtime" + "strings" +) + +func Caller(skip string) (string, int, string) { + pc := make([]uintptr, 7) + n := runtime.Callers(2, pc) + frames := runtime.CallersFrames(pc[:n]) + for { + frame, more := frames.Next() + // frame.File = /tmp/sandbox469341579/prog.go + // frame.Line = 28 + // frame.Function = main.Announcer.Info + + // file = prog.go + file := path.Base(frame.File) + // function = Info + function := path.Base(strings.Replace(frame.Function, ".", "/", -1)) + + if file != skip { + return file, frame.Line, function + } + + if !more { + break + } + } + return "", 0, "" +} From 6ab31bde15ec191106bf9d9784f3de87946edafb Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 26 Jan 2021 14:45:06 +0300 Subject: [PATCH 14/78] dev: introduce announcer aux functions --- pkg/announcer/announcer.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go index f0576fb47..4b7446d9a 100644 --- a/pkg/announcer/announcer.go +++ b/pkg/announcer/announcer.go @@ -139,6 +139,9 @@ func E() Announcer { } func (a Announcer) prependFormat(format string) string { + // Format is expected to be 'file:line:function:prefix:_old_format_' + // Prepend each component in reverse order + if a.prefix != "" { format = a.prefix + ":" + format } From 5fb9c4270b295439df192e9505c5858033b76fca Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 26 Jan 2021 19:10:45 +0300 Subject: [PATCH 15/78] dev: progress with host reconcile attribute --- pkg/apis/clickhouse.altinity.com/v1/types.go | 40 ++++--- pkg/controller/chi/worker.go | 113 +++++++++++-------- pkg/model/creator.go | 7 +- 3 files changed, 89 insertions(+), 71 deletions(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go index bdb76de1a..46f7b3a90 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse.altinity.com/v1/types.go @@ -178,15 +178,22 @@ type ChiHostConfig struct { FilesFingerprint string `json:"filesfingerprint"` } +type StatefulSetStatus string + +const ( + StatefulSetStatusModified StatefulSetStatus = "modified" + StatefulSetStatusNew StatefulSetStatus = "new" + StatefulSetStatusSame StatefulSetStatus = "same" + StatefulSetStatusUnknown StatefulSetStatus = "unknown" +) + // ChiHostReconcileAttributes defines host reconcile status type ChiHostReconcileAttributes struct { + status StatefulSetStatus add bool remove bool modify bool unclear bool - - migrate bool - reconciled bool } func NewChiHostReconcileAttributes() *ChiHostReconcileAttributes { @@ -207,6 +214,15 @@ func (s *ChiHostReconcileAttributes) Any(to ChiHostReconcileAttributes) bool { return (s.add && to.add) || (s.remove && to.remove) || (s.modify && to.modify) || (s.unclear && to.unclear) } +func (s *ChiHostReconcileAttributes) SetStatus(status StatefulSetStatus) *ChiHostReconcileAttributes { + s.status = status + return s +} + +func (s *ChiHostReconcileAttributes) GetStatus() StatefulSetStatus { + return s.status +} + func (s *ChiHostReconcileAttributes) SetAdd() *ChiHostReconcileAttributes { s.add = true return s @@ -232,16 +248,6 @@ func (s *ChiHostReconcileAttributes) SetUnclear() *ChiHostReconcileAttributes { return s } -func (s *ChiHostReconcileAttributes) SetMigrate() *ChiHostReconcileAttributes { - s.migrate = true - return s -} - -func (s *ChiHostReconcileAttributes) SetReconciled() *ChiHostReconcileAttributes { - s.reconciled = true - return s -} - func (s *ChiHostReconcileAttributes) IsAdd() bool { return s.add } @@ -258,14 +264,6 @@ func (s *ChiHostReconcileAttributes) IsUnclear() bool { return s.unclear } -func (s *ChiHostReconcileAttributes) IsMigrate() bool { - return s.migrate -} - -func (s *ChiHostReconcileAttributes) IsReconciled() bool { - return s.reconciled -} - // CHITemplates defines templates section of .spec type ChiTemplates struct { // Templates diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index f87ef7ea2..592af07ca 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -291,13 +291,6 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { }, ) - new.WalkHosts(func(host *chop.ChiHost) error { - if update { - host.ReconcileAttributes.SetMigrate() - } - return nil - }) - new.WalkHosts(func(host *chop.ChiHost) error { if host.ReconcileAttributes.IsAdd() { // Already added @@ -318,7 +311,7 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { } else if host.ReconcileAttributes.IsUnclear() { w.a.Info("UNCLEAR host: %s", host.Address.ShortString()) } else { - w.a.Info("UNTOUCH host: %s", host.Address.ShortString()) + w.a.Info("UNTOUCHED host: %s", host.Address.ShortString()) } return nil }) @@ -501,9 +494,9 @@ func (w *worker) reconcileHost(host *chop.ChiHost) error { configMap := w.creator.CreateConfigMapHost(host) statefulSet := w.creator.CreateStatefulSet(host) service := w.creator.CreateServiceHost(host) - status := w.getStatefulSetStatus(host.StatefulSet) + (&host.ReconcileAttributes).SetStatus(w.getStatefulSetStatus(statefulSet)) - if err := w.excludeHost(host, status); err != nil { + if err := w.excludeHost(host); err != nil { return err } @@ -527,7 +520,7 @@ func (w *worker) reconcileHost(host *chop.ChiHost) error { host.ReconcileAttributes.UnsetAdd() - if host.ReconcileAttributes.IsMigrate() { + if w.migrateTables(host) { w.a.V(1). WithEvent(host.CHI, eventActionCreate, eventReasonCreateStarted). WithStatusAction(host.CHI). @@ -540,13 +533,11 @@ func (w *worker) reconcileHost(host *chop.ChiHost) error { Info("As CHI is just created, not need to add tables on host %d to shard %d in cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) } - if err := w.includeHost(host, status); err != nil { + if err := w.includeHost(host); err != nil { // If host is not ready - fallback return err } - host.ReconcileAttributes.SetReconciled() - w.a.V(1). WithEvent(host.CHI, eventActionReconcile, eventReasonReconcileCompleted). WithStatusAction(host.CHI). @@ -555,8 +546,18 @@ func (w *worker) reconcileHost(host *chop.ChiHost) error { return nil } -func (w *worker) excludeHost(host *chop.ChiHost, status StatefulSetStatus) error { - if w.waitExcludeHost(host, status) { +func (w *worker) migrateTables(host *chop.ChiHost) bool { + if host.GetCHI().IsStopped() { + return false + } + if host.ReconcileAttributes.GetStatus() == chop.StatefulSetStatusSame { + return false + } + return true +} + +func (w *worker) excludeHost(host *chop.ChiHost) error { + if w.waitExcludeHost(host) { w.a.V(1). Info("Exclude from cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) w.excludeHostFromService(host) @@ -588,8 +589,9 @@ func (w *worker) excludeHostFromClickHouseCluster(host *chop.ChiHost) { } // determines whether reconciler should wait for host to be excluded from cluster -func (w *worker) waitExcludeHost(host *chop.ChiHost, status StatefulSetStatus) bool { - if (status == statefulSetStatusNew) || (status == statefulSetStatusSame) { +func (w *worker) waitExcludeHost(host *chop.ChiHost) bool { + status := host.ReconcileAttributes.GetStatus() + if (status == chop.StatefulSetStatusNew) || (status == chop.StatefulSetStatusSame) { // No need to wait for new and non-modified StatefulSets return false } @@ -612,8 +614,9 @@ func (w *worker) waitExcludeHost(host *chop.ChiHost, status StatefulSetStatus) b } // determines whether reconciler should wait for host to be included into cluster -func (w *worker) waitIncludeHost(host *chop.ChiHost, status StatefulSetStatus) bool { - if (status == statefulSetStatusNew) || (status == statefulSetStatusSame) { +func (w *worker) waitIncludeHost(host *chop.ChiHost) bool { + status := host.ReconcileAttributes.GetStatus() + if (status == chop.StatefulSetStatusNew) || (status == chop.StatefulSetStatusSame) { return false } @@ -635,16 +638,16 @@ func (w *worker) waitIncludeHost(host *chop.ChiHost, status StatefulSetStatus) b } // Include host back to ClickHouse clusters -func (w *worker) includeHost(host *chop.ChiHost, status StatefulSetStatus) error { +func (w *worker) includeHost(host *chop.ChiHost) error { w.a.V(1). Info("Include into cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) - w.includeHostIntoClickHouseCluster(host, status) + w.includeHostIntoClickHouseCluster(host) w.includeHostIntoService(host) return nil } -func (w *worker) includeHostIntoClickHouseCluster(host *chop.ChiHost, status StatefulSetStatus) { +func (w *worker) includeHostIntoClickHouseCluster(host *chop.ChiHost) { options := chopmodel.NewClickHouseConfigFilesGeneratorOptions(). SetRemoteServersGeneratorOptions(chopmodel.NewRemoteServersGeneratorOptions(). ExcludeReconcileAttributes( @@ -652,7 +655,7 @@ func (w *worker) includeHostIntoClickHouseCluster(host *chop.ChiHost, status Sta ), ) _ = w.reconcileCHIConfigMaps(host.CHI, options, true) - if w.waitIncludeHost(host, status) { + if w.waitIncludeHost(host) { _ = w.waitHostInCluster(host) } } @@ -1099,16 +1102,7 @@ func (w *worker) reconcileService(chi *chop.ClickHouseInstallation, service *cor return err } -type StatefulSetStatus string - -const ( - statefulSetStatusModified StatefulSetStatus = "modified" - statefulSetStatusNew StatefulSetStatus = "new" - statefulSetStatusSame StatefulSetStatus = "same" - statefulSetStatusUnknown StatefulSetStatus = "unknown" -) - -func (w *worker) getStatefulSetStatus(statefulSet *apps.StatefulSet) StatefulSetStatus { +func (w *worker) getStatefulSetStatus(statefulSet *apps.StatefulSet) chop.StatefulSetStatus { w.a.V(2).Info("getStatefulSetStatus() - start") defer w.a.V(2).Info("getStatefulSetStatus() - end") @@ -1116,26 +1110,48 @@ func (w *worker) getStatefulSetStatus(statefulSet *apps.StatefulSet) StatefulSet curStatefulSet, err := w.c.getStatefulSet(&statefulSet.ObjectMeta, false) if curStatefulSet != nil { - if _cur, ok := curStatefulSet.Labels[chopmodel.LabelStatefulSetVersion]; ok { - if _new, _ok := statefulSet.Labels[chopmodel.LabelStatefulSetVersion]; _ok { - if _cur == _new { - w.a.Info("INFO StatefulSet ARE EQUAL no reconcile is actually needed") - return statefulSetStatusSame - } + // Try to perform label-based comparison + curLabel, curHasLabel := curStatefulSet.Labels[chopmodel.LabelStatefulSetVersion] + newLabel, newHasLabel := statefulSet.Labels[chopmodel.LabelStatefulSetVersion] + if curHasLabel && newHasLabel { + if curLabel == newLabel { + w.a.Info("INFO StatefulSet ARE EQUAL based on labels no reconcile is actually needed") + return chop.StatefulSetStatusSame + } else { + /* + if diff, equal := messagediff.DeepDiff(curStatefulSet.Spec, statefulSet.Spec); equal { + w.a.Info("INFO StatefulSet ARE EQUAL based on diff no reconcile is actually needed") + // return chop.StatefulSetStatusSame + } else { + w.a.Info("INFO StatefulSet ARE DIFFERENT based on diff reconcile is required: a:%v m:%v r:%v", diff.Added, diff.Modified, diff.Removed) + // return chop.StatefulSetStatusModified + } + + w.a.Info("INFO StatefulSet ARE DIFFERENT based on labels reconcile needed") + return chop.StatefulSetStatusModified + + */ } } - if diff, equal := messagediff.DeepDiff(curStatefulSet.Spec, statefulSet.Spec); equal { - w.a.Info("INFO StatefulSet ARE DIFFERENT reconcile is required: a:%v m:%v r:%v", diff.Added, diff.Modified, diff.Removed) - return statefulSetStatusModified - } + /* + // No labels to compare, use spec diff + if diff, equal := messagediff.DeepDiff(curStatefulSet.Spec, statefulSet.Spec); equal { + w.a.Info("INFO StatefulSet ARE EQUAL based on diff no reconcile is actually needed") + return chop.StatefulSetStatusSame + } else { + w.a.Info("INFO StatefulSet ARE DIFFERENT based on diff reconcile is required: a:%v m:%v r:%v", diff.Added, diff.Modified, diff.Removed) + return chop.StatefulSetStatusModified + } + */ } + // No cur StatefulSet available + if apierrors.IsNotFound(err) { - // StatefulSet not found - even during Update process - try to create it - return statefulSetStatusNew + return chop.StatefulSetStatusNew } - return statefulSetStatusUnknown + return chop.StatefulSetStatusUnknown } // reconcileStatefulSet reconciles apps.StatefulSet @@ -1143,8 +1159,7 @@ func (w *worker) reconcileStatefulSet(newStatefulSet *apps.StatefulSet, host *ch w.a.V(2).Info("reconcileStatefulSet() - start") defer w.a.V(2).Info("reconcileStatefulSet() - end") - status := w.getStatefulSetStatus(host.StatefulSet) - if status == statefulSetStatusSame { + if host.ReconcileAttributes.GetStatus() == chop.StatefulSetStatusSame { defer w.a.V(2).Info("reconcileStatefulSet() - no need to reconcile the same StaetfulSet") return nil } diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 5e6109a58..2ec855390 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -313,7 +313,12 @@ func (c *Creator) CreateStatefulSet(host *chiv1.ChiHost) *apps.StatefulSet { c.setupStatefulSetPodTemplate(statefulSet, host) c.setupStatefulSetVolumeClaimTemplates(statefulSet, host) - statefulSet.Labels = util.MergeStringMapsOverwrite(statefulSet.Labels, map[string]string{LabelStatefulSetVersion: util.Fingerprint(statefulSet)}) + statefulSet.Labels = util.MergeStringMapsOverwrite( + statefulSet.Labels, + map[string]string{ + LabelStatefulSetVersion: util.Fingerprint(statefulSet), + }, + ) host.StatefulSet = statefulSet return statefulSet From 606f0eac392feb5dfb2efbe112d2399af72d2bf8 Mon Sep 17 00:00:00 2001 From: alz Date: Thu, 28 Jan 2021 10:03:03 +0300 Subject: [PATCH 16/78] Minor cleanup --- pkg/controller/chi/pods.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/controller/chi/pods.go b/pkg/controller/chi/pods.go index 5c95602a0..dd59e5a94 100644 --- a/pkg/controller/chi/pods.go +++ b/pkg/controller/chi/pods.go @@ -25,8 +25,10 @@ import ( func (c *Controller) appendLabelReady(host *chop.ChiHost) error { pod, err := c.getPod(host) if err != nil { + log.Error("FAIL get pod for host %s/%s err:%v", host.Address.Namespace, host.Name, err) return err } + chopmodel.AppendLabelReady(&pod.ObjectMeta) _, err = c.kubeClient.CoreV1().Pods(pod.Namespace).Update(pod) return err @@ -35,19 +37,19 @@ func (c *Controller) appendLabelReady(host *chop.ChiHost) error { func (c *Controller) deleteLabelReady(host *chop.ChiHost) error { pod, err := c.getPod(host) if err != nil { - return err + log.Error("FAIL get pod for host %s/%s err:%v", host.Address.Namespace, host.Name, err) + return } + chopmodel.DeleteLabelReady(&pod.ObjectMeta) _, err = c.kubeClient.CoreV1().Pods(pod.Namespace).Update(pod) return err } func (c *Controller) walkContainers(host *chop.ChiHost, f func(container *v1.Container)) { - namespace := host.Address.Namespace - name := chopmodel.CreatePodName(host) - pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(name, newGetOptions()) + pod, err := c.getPod(host) if err != nil { - log.Error("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) + log.Error("FAIL get pod for host %s/%s err:%v", host.Address.Namespace, host.Name, err) return } @@ -58,11 +60,9 @@ func (c *Controller) walkContainers(host *chop.ChiHost, f func(container *v1.Con } func (c *Controller) walkContainerStatuses(host *chop.ChiHost, f func(status *v1.ContainerStatus)) { - namespace := host.Address.Namespace - name := chopmodel.CreatePodName(host) - pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(name, newGetOptions()) + pod, err := c.getPod(host) if err != nil { - log.Error("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) + log.Error("FAIL get pod for host %s/%s err:%v", host.Address.Namespace, host.Name, err) return } From e40e5abc96a2f5e87723f88c5abd6eb159abc04e Mon Sep 17 00:00:00 2001 From: alz Date: Thu, 28 Jan 2021 10:21:42 +0300 Subject: [PATCH 17/78] Fix build --- pkg/controller/chi/pods.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/chi/pods.go b/pkg/controller/chi/pods.go index dd59e5a94..79e0affd2 100644 --- a/pkg/controller/chi/pods.go +++ b/pkg/controller/chi/pods.go @@ -38,7 +38,7 @@ func (c *Controller) deleteLabelReady(host *chop.ChiHost) error { pod, err := c.getPod(host) if err != nil { log.Error("FAIL get pod for host %s/%s err:%v", host.Address.Namespace, host.Name, err) - return + return err } chopmodel.DeleteLabelReady(&pod.ObjectMeta) From c54d65a78785c6847345ef047aaaa81e623c0f60 Mon Sep 17 00:00:00 2001 From: alz Date: Thu, 28 Jan 2021 14:16:04 +0300 Subject: [PATCH 18/78] Cleanup reconcile wait logic --- pkg/controller/chi/worker.go | 99 ++++++++++++++++++++---------------- 1 file changed, 56 insertions(+), 43 deletions(-) diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index 592af07ca..13b148859 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -418,8 +418,10 @@ func (w *worker) reconcileCHIAuxObjectsPreliminary(chi *chop.ClickHouseInstallat } } - // 2. CHI ConfigMaps without update - create only - return w.reconcileCHIConfigMaps(chi, nil, false) + // 2. CHI common ConfigMap without update - create only + w.reconcileCHIConfigMapCommon(chi, nil, false) + // 3. CHI users ConfigMap + w.reconcileCHIConfigMapUsers(chi, nil, true) } // reconcileCHIAuxObjectsFinal reconciles CHI global objects @@ -428,25 +430,25 @@ func (w *worker) reconcileCHIAuxObjectsFinal(chi *chop.ClickHouseInstallation) e defer w.a.V(2).Info("reconcileCHIAuxObjectsFinal() - end") // CHI ConfigMaps with update - return w.reconcileCHIConfigMaps(chi, nil, true) + return w.reconcileCHIConfigMapCommon(chi, nil, true) } -// reconcileCHIConfigMaps reconciles all CHI's ConfigMaps -func (w *worker) reconcileCHIConfigMaps(chi *chop.ClickHouseInstallation, options *chopmodel.ClickHouseConfigFilesGeneratorOptions, update bool) error { - // ConfigMap common for all resources in CHI - // contains several sections, mapped as separated chopConfig files, - // such as remote servers, zookeeper setup, etc +// reconcileCHIConfigMapCommon reconciles all CHI's common ConfigMap +func (w *worker) reconcileCHIConfigMapCommon(chi *chop.ClickHouseInstallation, options *chopmodel.ClickHouseConfigFilesGeneratorOptions, update bool) error { configMapCommon := w.creator.CreateConfigMapCHICommon(options) if err := w.reconcileConfigMap(chi, configMapCommon, update); err != nil { return err } + return nil +} +// reconcileCHIConfigMapUser reconciles all CHI's users ConfigMap +func (w *worker) reconcileCHIConfigMapCommon(chi *chop.ClickHouseInstallation, options *chopmodel.ClickHouseConfigFilesGeneratorOptions, update bool) error { // ConfigMap common for all users resources in CHI configMapUsers := w.creator.CreateConfigMapCHICommonUsers() if err := w.reconcileConfigMap(chi, configMapUsers, update); err != nil { return err } - return nil } @@ -556,22 +558,39 @@ func (w *worker) migrateTables(host *chop.ChiHost) bool { return true } +// Exclude host from ClickHouse clusters if required func (w *worker) excludeHost(host *chop.ChiHost) error { - if w.waitExcludeHost(host) { + if w.ifExcludeHost(host) { w.a.V(1). Info("Exclude from cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) + w.excludeHostFromService(host) w.excludeHostFromClickHouseCluster(host) } + return nil +} + +// Always include host back to ClickHouse clusters +func (w *worker) includeHost(host *chop.ChiHost) error { + w.a.V(1). + Info("Include into cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) + + w.includeHostIntoClickHouseCluster(host) + w.includeHostIntoService(host) return nil } + func (w *worker) excludeHostFromService(host *chop.ChiHost) { w.c.deleteLabelReady(host) } -// excludeHostFromClickHouseCluster excludes host from all ClickHouse clusters +func (w *worker) includeHostIntoService(host *chop.ChiHost) { + w.c.appendLabelReady(host) +} + +// excludeHostFromClickHouseCluster excludes host from ClickHouse configuration func (w *worker) excludeHostFromClickHouseCluster(host *chop.ChiHost) { // Specify in options to exclude host from ClickHouse config file options := chopmodel.NewClickHouseConfigFilesGeneratorOptions(). @@ -584,23 +603,43 @@ func (w *worker) excludeHostFromClickHouseCluster(host *chop.ChiHost) { ) // Remove host from cluster config and wait for ClickHouse to pick-up the change - _ = w.reconcileCHIConfigMaps(host.CHI, options, true) - _ = w.waitHostNotInCluster(host) + if w.waitExcludeHost(host) { + _ = w.reconcileCHIConfigMapCommon(host.CHI, options, true) + _ = w.waitHostNotInCluster(host) + } } -// determines whether reconciler should wait for host to be excluded from cluster -func (w *worker) waitExcludeHost(host *chop.ChiHost) bool { +// includeHostIntoClickHouseCluster includes host to ClickHouse configuration +func (w *worker) includeHostIntoClickHouseCluster(host *chop.ChiHost) { + options := chopmodel.NewClickHouseConfigFilesGeneratorOptions(). + SetRemoteServersGeneratorOptions(chopmodel.NewRemoteServersGeneratorOptions(). + ExcludeReconcileAttributes( + chop.NewChiHostReconcileAttributes().SetAdd(), + ), + ) + // Add host to the cluster config (always) and wait for ClickHouse to pick-up the change + _ = w.reconcileCHIConfigMapCommon(host.CHI, options, true) + if w.waitIncludeHost(host) { + _ = w.waitHostInCluster(host) + } +} + +// determines whether host to be excluded from cluster +func (w *worker) ifExcludeHost(host *chop.ChiHost) bool { status := host.ReconcileAttributes.GetStatus() if (status == chop.StatefulSetStatusNew) || (status == chop.StatefulSetStatusSame) { - // No need to wait for new and non-modified StatefulSets + // No need to exclude for new and non-modified StatefulSets return false } if host.GetShard().HostsCount() == 1 { - // In case shard where current host is located has only one host (means no replication), no need to wait + // In case shard where current host is located has only one host (means no replication), no need to exclude return false } +} +// determines whether reconciler should wait for host to be excluded from cluster +func (w *worker) waitExcludeHost(host *chop.ChiHost) bool { // Check CHI settings switch { case host.CHI.IsReconcilingPolicyWait(): @@ -637,32 +676,6 @@ func (w *worker) waitIncludeHost(host *chop.ChiHost) bool { return w.c.chop.Config().ReconcileWaitInclude } -// Include host back to ClickHouse clusters -func (w *worker) includeHost(host *chop.ChiHost) error { - w.a.V(1). - Info("Include into cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) - w.includeHostIntoClickHouseCluster(host) - w.includeHostIntoService(host) - - return nil -} - -func (w *worker) includeHostIntoClickHouseCluster(host *chop.ChiHost) { - options := chopmodel.NewClickHouseConfigFilesGeneratorOptions(). - SetRemoteServersGeneratorOptions(chopmodel.NewRemoteServersGeneratorOptions(). - ExcludeReconcileAttributes( - chop.NewChiHostReconcileAttributes().SetAdd(), - ), - ) - _ = w.reconcileCHIConfigMaps(host.CHI, options, true) - if w.waitIncludeHost(host) { - _ = w.waitHostInCluster(host) - } -} - -func (w *worker) includeHostIntoService(host *chop.ChiHost) { - w.c.appendLabelReady(host) -} // waitHostInCluster waits until host is a member of at least one ClickHouse cluster func (w *worker) waitHostInCluster(host *chop.ChiHost) error { From f4f93db2ae003ed25033172b837961bb0a9a0bd1 Mon Sep 17 00:00:00 2001 From: alz Date: Thu, 28 Jan 2021 14:20:53 +0300 Subject: [PATCH 19/78] Fix build --- pkg/controller/chi/worker.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index 13b148859..c4dc9914a 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -422,6 +422,8 @@ func (w *worker) reconcileCHIAuxObjectsPreliminary(chi *chop.ClickHouseInstallat w.reconcileCHIConfigMapCommon(chi, nil, false) // 3. CHI users ConfigMap w.reconcileCHIConfigMapUsers(chi, nil, true) + + return nil } // reconcileCHIAuxObjectsFinal reconciles CHI global objects @@ -442,8 +444,8 @@ func (w *worker) reconcileCHIConfigMapCommon(chi *chop.ClickHouseInstallation, o return nil } -// reconcileCHIConfigMapUser reconciles all CHI's users ConfigMap -func (w *worker) reconcileCHIConfigMapCommon(chi *chop.ClickHouseInstallation, options *chopmodel.ClickHouseConfigFilesGeneratorOptions, update bool) error { +// reconcileCHIConfigMapUsers reconciles all CHI's users ConfigMap +func (w *worker) reconcileCHIConfigMapUsers(chi *chop.ClickHouseInstallation, options *chopmodel.ClickHouseConfigFilesGeneratorOptions, update bool) error { // ConfigMap common for all users resources in CHI configMapUsers := w.creator.CreateConfigMapCHICommonUsers() if err := w.reconcileConfigMap(chi, configMapUsers, update); err != nil { @@ -636,6 +638,8 @@ func (w *worker) ifExcludeHost(host *chop.ChiHost) bool { // In case shard where current host is located has only one host (means no replication), no need to exclude return false } + + return true } // determines whether reconciler should wait for host to be excluded from cluster From 53b018a01f5f95c7316cae0de51514c733a8230f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 28 Jan 2021 14:29:32 +0300 Subject: [PATCH 20/78] dev: reiterate over statefulset version --- go.mod | 1 + pkg/controller/chi/pods.go | 4 ++-- pkg/controller/chi/worker.go | 40 +++++++++++++----------------------- pkg/model/creator.go | 21 +++++++++++++++++-- pkg/util/dump.go | 21 +++++++++++++++++++ pkg/util/fingerprint.go | 2 +- pkg/util/hash.go | 11 +++++++++- 7 files changed, 68 insertions(+), 32 deletions(-) create mode 100644 pkg/util/dump.go diff --git a/go.mod b/go.mod index 8654fb84d..52bcc1e75 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/mailru/go-clickhouse v1.3.0 github.com/prometheus/client_golang v1.6.0 github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a + github.com/sanity-io/litter v1.3.0 github.com/spf13/pflag v1.0.5 // indirect gopkg.in/d4l3k/messagediff.v1 v1.2.1 gopkg.in/yaml.v2 v2.2.8 diff --git a/pkg/controller/chi/pods.go b/pkg/controller/chi/pods.go index 79e0affd2..c2808c9e1 100644 --- a/pkg/controller/chi/pods.go +++ b/pkg/controller/chi/pods.go @@ -28,7 +28,7 @@ func (c *Controller) appendLabelReady(host *chop.ChiHost) error { log.Error("FAIL get pod for host %s/%s err:%v", host.Address.Namespace, host.Name, err) return err } - + chopmodel.AppendLabelReady(&pod.ObjectMeta) _, err = c.kubeClient.CoreV1().Pods(pod.Namespace).Update(pod) return err @@ -40,7 +40,7 @@ func (c *Controller) deleteLabelReady(host *chop.ChiHost) error { log.Error("FAIL get pod for host %s/%s err:%v", host.Address.Namespace, host.Name, err) return err } - + chopmodel.DeleteLabelReady(&pod.ObjectMeta) _, err = c.kubeClient.CoreV1().Pods(pod.Namespace).Update(pod) return err diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index 592af07ca..ff823cf4e 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -530,7 +530,7 @@ func (w *worker) reconcileHost(host *chop.ChiHost) error { } } else { w.a.V(1). - Info("As CHI is just created, not need to add tables on host %d to shard %d in cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) + Info("No need to add tables on host %d to shard %d in cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) } if err := w.includeHost(host); err != nil { @@ -1111,38 +1111,26 @@ func (w *worker) getStatefulSetStatus(statefulSet *apps.StatefulSet) chop.Statef if curStatefulSet != nil { // Try to perform label-based comparison - curLabel, curHasLabel := curStatefulSet.Labels[chopmodel.LabelStatefulSetVersion] - newLabel, newHasLabel := statefulSet.Labels[chopmodel.LabelStatefulSetVersion] + curLabel, curHasLabel := w.creator.GetStatefulSetVersion(curStatefulSet) + newLabel, newHasLabel := w.creator.GetStatefulSetVersion(statefulSet) if curHasLabel && newHasLabel { if curLabel == newLabel { w.a.Info("INFO StatefulSet ARE EQUAL based on labels no reconcile is actually needed") return chop.StatefulSetStatusSame } else { - /* - if diff, equal := messagediff.DeepDiff(curStatefulSet.Spec, statefulSet.Spec); equal { - w.a.Info("INFO StatefulSet ARE EQUAL based on diff no reconcile is actually needed") - // return chop.StatefulSetStatusSame - } else { - w.a.Info("INFO StatefulSet ARE DIFFERENT based on diff reconcile is required: a:%v m:%v r:%v", diff.Added, diff.Modified, diff.Removed) - // return chop.StatefulSetStatusModified - } - - w.a.Info("INFO StatefulSet ARE DIFFERENT based on labels reconcile needed") - return chop.StatefulSetStatusModified - - */ - } - } - /* - // No labels to compare, use spec diff - if diff, equal := messagediff.DeepDiff(curStatefulSet.Spec, statefulSet.Spec); equal { - w.a.Info("INFO StatefulSet ARE EQUAL based on diff no reconcile is actually needed") - return chop.StatefulSetStatusSame - } else { - w.a.Info("INFO StatefulSet ARE DIFFERENT based on diff reconcile is required: a:%v m:%v r:%v", diff.Added, diff.Modified, diff.Removed) + //if diff, equal := messagediff.DeepDiff(curStatefulSet.Spec, statefulSet.Spec); equal { + // w.a.Info("INFO StatefulSet ARE EQUAL based on diff no reconcile is actually needed") + // // return chop.StatefulSetStatusSame + //} else { + // w.a.Info("INFO StatefulSet ARE DIFFERENT based on diff reconcile is required: a:%v m:%v r:%v", diff.Added, diff.Modified, diff.Removed) + // // return chop.StatefulSetStatusModified + //} + w.a.Info("INFO StatefulSet ARE DIFFERENT based on labels reconcile needed") return chop.StatefulSetStatusModified } - */ + } + // No labels to compare, we can not say for sure what exactly is going on + return chop.StatefulSetStatusUnknown } // No cur StatefulSet available diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 2ec855390..40fa784cc 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -312,16 +312,33 @@ func (c *Creator) CreateStatefulSet(host *chiv1.ChiHost) *apps.StatefulSet { c.setupStatefulSetPodTemplate(statefulSet, host) c.setupStatefulSetVolumeClaimTemplates(statefulSet, host) + c.setupStatefulSetVersion(statefulSet) + host.StatefulSet = statefulSet + + return statefulSet +} + +// setupStatefulSetVersion +// TODO property of the labeler? +func (c *Creator) setupStatefulSetVersion(statefulSet *apps.StatefulSet) { statefulSet.Labels = util.MergeStringMapsOverwrite( statefulSet.Labels, map[string]string{ LabelStatefulSetVersion: util.Fingerprint(statefulSet), }, ) - host.StatefulSet = statefulSet + log.V(3).Info("StatefulSet(%s/%s)\n%s", statefulSet.Namespace, statefulSet.Name, util.Dump(statefulSet)) +} - return statefulSet +// GetStatefulSetVersion +// TODO property of the labeler? +func (c *Creator) GetStatefulSetVersion(statefulSet *apps.StatefulSet) (string, bool) { + if statefulSet == nil { + return "", false + } + label, ok := statefulSet.Labels[LabelStatefulSetVersion] + return label, ok } // PreparePersistentVolume diff --git a/pkg/util/dump.go b/pkg/util/dump.go new file mode 100644 index 000000000..e1e33595e --- /dev/null +++ b/pkg/util/dump.go @@ -0,0 +1,21 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import "github.com/sanity-io/litter" + +func Dump(obj interface{}) string { + return litter.Sdump(obj) +} diff --git a/pkg/util/fingerprint.go b/pkg/util/fingerprint.go index 495539e94..46e51ecfd 100644 --- a/pkg/util/fingerprint.go +++ b/pkg/util/fingerprint.go @@ -15,5 +15,5 @@ package util func Fingerprint(obj interface{}) string { - return HashIntoString(serialize(obj)) + return HashIntoString(serializeRepeatable(obj)) } diff --git a/pkg/util/hash.go b/pkg/util/hash.go index 209e2a0fa..a5e94763a 100644 --- a/pkg/util/hash.go +++ b/pkg/util/hash.go @@ -21,9 +21,12 @@ import ( "encoding/hex" "fmt" "hash/fnv" + + "github.com/sanity-io/litter" + // "github.com/davecgh/go-spew/spew" ) -func serialize(obj interface{}) []byte { +func serializeUnrepeatable(obj interface{}) []byte { b := bytes.Buffer{} encoder := gob.NewEncoder(&b) err := encoder.Encode(obj) @@ -34,6 +37,12 @@ func serialize(obj interface{}) []byte { return b.Bytes() } +func serializeRepeatable(obj interface{}) []byte { + //s := spew.NewDefaultConfig() + //s.SortKeys = true + return []byte(litter.Sdump(obj)) +} + func HashIntoString(b []byte) string { hasher := sha1.New() hasher.Write(b) From 9b5db6ae9ec4bfd2dc4ca198a72bb396454ceba4 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 28 Jan 2021 14:29:44 +0300 Subject: [PATCH 21/78] dev: dependencies --- go.sum | 5 + vendor/github.com/sanity-io/litter/.gitignore | 1 + .../github.com/sanity-io/litter/.travis.yml | 3 + .../github.com/sanity-io/litter/CHANGELOG.md | 7 + vendor/github.com/sanity-io/litter/LICENSE | 21 + vendor/github.com/sanity-io/litter/README.md | 198 +++++++ vendor/github.com/sanity-io/litter/dump.go | 504 ++++++++++++++++++ vendor/github.com/sanity-io/litter/go.mod | 9 + vendor/github.com/sanity-io/litter/go.sum | 6 + .../github.com/sanity-io/litter/pointers.go | 126 +++++ vendor/github.com/sanity-io/litter/print.go | 44 ++ vendor/github.com/sanity-io/litter/util.go | 28 + vendor/modules.txt | 2 + 13 files changed, 954 insertions(+) create mode 100644 vendor/github.com/sanity-io/litter/.gitignore create mode 100644 vendor/github.com/sanity-io/litter/.travis.yml create mode 100644 vendor/github.com/sanity-io/litter/CHANGELOG.md create mode 100644 vendor/github.com/sanity-io/litter/LICENSE create mode 100644 vendor/github.com/sanity-io/litter/README.md create mode 100644 vendor/github.com/sanity-io/litter/dump.go create mode 100644 vendor/github.com/sanity-io/litter/go.mod create mode 100644 vendor/github.com/sanity-io/litter/go.sum create mode 100644 vendor/github.com/sanity-io/litter/pointers.go create mode 100644 vendor/github.com/sanity-io/litter/print.go create mode 100644 vendor/github.com/sanity-io/litter/util.go diff --git a/go.sum b/go.sum index 514b85aa9..9a3aaf285 100644 --- a/go.sum +++ b/go.sum @@ -19,6 +19,7 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U= github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkEQxENCrlLo= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -112,6 +113,7 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -132,6 +134,8 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4 github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a h1:2v4Ipjxa3sh+xn6GvtgrMub2ci4ZLQMvTaYIba2lfdc= github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a/go.mod h1:ozniNEFS3j1qCwHKdvraMn1WJOsUxHd7lYfukEIS4cs= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/sanity-io/litter v1.3.0 h1:5ZO+weUsqdSWMUng5JnpkW/Oz8iTXiIdeumhQr1sSjs= +github.com/sanity-io/litter v1.3.0/go.mod h1:5Z71SvaYy5kcGtyglXOC9rrUi3c1E8CamFWjQsazTh0= github.com/satori/go.uuid v1.1.0 h1:B9KXyj+GzIpJbV7gmr873NsY6zpbxNy24CBtGrk7jHo= github.com/satori/go.uuid v1.1.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -142,6 +146,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/sanity-io/litter/.gitignore b/vendor/github.com/sanity-io/litter/.gitignore new file mode 100644 index 000000000..e43b0f988 --- /dev/null +++ b/vendor/github.com/sanity-io/litter/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/vendor/github.com/sanity-io/litter/.travis.yml b/vendor/github.com/sanity-io/litter/.travis.yml new file mode 100644 index 000000000..a939af480 --- /dev/null +++ b/vendor/github.com/sanity-io/litter/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: +- 1.14.x diff --git a/vendor/github.com/sanity-io/litter/CHANGELOG.md b/vendor/github.com/sanity-io/litter/CHANGELOG.md new file mode 100644 index 000000000..0be671fbc --- /dev/null +++ b/vendor/github.com/sanity-io/litter/CHANGELOG.md @@ -0,0 +1,7 @@ +# 1.1.0 (2017-11-1) + +A slight breaking change. The dump-method of the `Dumper` interface has changed from `Dump` to `LitterDump` to mitigate potential collisions. + +# 1.0.0 (2017-10-29) + +Tagged 1.0.0. diff --git a/vendor/github.com/sanity-io/litter/LICENSE b/vendor/github.com/sanity-io/litter/LICENSE new file mode 100644 index 000000000..6a27c654f --- /dev/null +++ b/vendor/github.com/sanity-io/litter/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016-2017 Sanity.io. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sanity-io/litter/README.md b/vendor/github.com/sanity-io/litter/README.md new file mode 100644 index 000000000..b2acb08c4 --- /dev/null +++ b/vendor/github.com/sanity-io/litter/README.md @@ -0,0 +1,198 @@ +[![!Build Status](https://travis-ci.org/sanity-io/litter.svg?branch=master)](https://travis-ci.org/sanity-io/litter) + +# Litter + +**Litter is a pretty printer library for Go data structures to aid in debugging and testing.** + +--- + +Litter is provided by + + +
+ Sanity: The Headless CMS Construction Kit +
+ +--- + +Litter named for the fact that it outputs *literals*, which you *litter* your output with. As a side benefit, all Litter output is syntactically correct Go. You can use Litter to emit data during debug, and it's also really nice for "snapshot data" in unit tests, since it produces consistent, sorted output. Litter was inspired by [Spew](https://github.com/davecgh/go-spew), but focuses on terseness and readability. + +### Basic example + +This: + +```go +type Person struct { + Name string + Age int + Parent *Person +} + +litter.Dump(Person{ + Name: "Bob", + Age: 20, + Parent: &Person{ + Name: "Jane", + Age: 50, + }, +}) +``` + +will output: + +``` +Person{ + Name: "Bob", + Age: 20, + Parent: &Person{ + Name: "Jane", + Age: 50, + }, +} +``` + +### Use in tests + +Litter is a great alternative to JSON or YAML for providing "snapshots" or example data. For example: + +```go +func TestSearch(t *testing.T) { + result := DoSearch() + + actual := litterOpts.Sdump(result) + expected, err := ioutil.ReadFile("testdata.txt") + if err != nil { + // First run, write test data since it doesn't exist + if !os.IsNotExist(err) { + t.Error(err) + } + ioutil.Write("testdata.txt", actual, 0644) + actual = expected + } + if expected != actual { + t.Errorf("Expected %s, got %s", expected, actual) + } +} +``` + +The first run will use Litter to write the data to `testdata.txt`. On subsequent runs, the test will compare the data. Since Litter always provides a consistent view of a value, you can compare the strings directly. + +### Circular references + +Litter detects circular references or aliasing, and will replace additional references to the same object with aliases. For example: + +```go +type Circular struct { + Self *Circular +} + +selfref := Circular{} +selfref.Self = &selfref + +litter.Dump(selfref) +``` + +will output: + +``` +Circular { // p0 + Self: p0, +} +``` + +## Installation + +```bash +$ go get -u github.com/sanity-io/litter +``` + +## Quick start + +Add this import line to the file you're working in: + +```go +import "github.com/sanity-io/litter" +``` + +To dump a variable with full newlines, indentation, type, and aliasing information, use `Dump` or `Sdump`: + +```go +litter.Dump(myVar1) +str := litter.Sdump(myVar1) +``` + +### `litter.Dump(value, ...)` + +Dumps the data structure to STDOUT. + +### `litter.Sdump(value, ...)` + +Returns the dump as a string + +## Configuration + +You can configure litter globally by modifying the default `litter.Config` + +```go +// Strip all package names from types +litter.Config.StripPackageNames = true + +// Hide private struct fields from dumped structs +litter.Config.HidePrivateFields = true + +// Hide fields matched with given regexp if it is not nil. It is set up to hide fields generate with protoc-gen-go +litter.Config.FieldExclusions = regexp.MustCompile(`^(XXX_.*)$`) + +// Sets a "home" package. The package name will be stripped from all its types +litter.Config.HomePackage = "mypackage" + +// Sets separator used when multiple arguments are passed to Dump() or Sdump(). +litter.Config.Separator = "\n" + +// Use compact output: strip newlines and other unnecessary whitespace +litter.Config.Compact = true +``` + +### `litter.Options` + +Allows you to configure a local configuration of litter to allow for proper compartmentalization of state at the expense of some comfort: + +``` go + sq := litter.Options { + HidePrivateFields: true, + HomePackage: "thispack", + Separator: " ", + } + + sq.Dump("dumped", "with", "local", "settings") +``` + +## Custom dumpers + +Implement the interface Dumper on your types to take control of how your type is dumped. + +``` go +type Dumper interface { + LitterDump(w io.Writer) +} +``` + +Just write your custom dump to the provided stream, using multiple lines divided by `"\n"` if you need. Litter +might indent your output according to context, and optionally decorate your first line with a pointer comment +where appropriate. + +A couple of examples from the test suite: + +``` go +type CustomMultiLineDumper struct {} + +func (cmld *CustomMultiLineDumper) LitterDump(w io.Writer) { + w.Write([]byte("{\n multi\n line\n}")) +} + +type CustomSingleLineDumper int + +func (csld CustomSingleLineDumper) LitterDump(w io.Writer) { + w.Write([]byte("")) +} +```` diff --git a/vendor/github.com/sanity-io/litter/dump.go b/vendor/github.com/sanity-io/litter/dump.go new file mode 100644 index 000000000..54d0fafe6 --- /dev/null +++ b/vendor/github.com/sanity-io/litter/dump.go @@ -0,0 +1,504 @@ +package litter + +import ( + "bytes" + "fmt" + "io" + "os" + "reflect" + "regexp" + "runtime" + "sort" + "strconv" + "strings" +) + +var ( + packageNameStripperRegexp = regexp.MustCompile(`\b[a-zA-Z_]+[a-zA-Z_0-9]+\.`) + compactTypeRegexp = regexp.MustCompile(`\s*([,;{}()])\s*`) +) + +// Dumper is the interface for implementing custom dumper for your types. +type Dumper interface { + LitterDump(w io.Writer) +} + +// Options represents configuration options for litter +type Options struct { + Compact bool + StripPackageNames bool + HidePrivateFields bool + HideZeroValues bool + FieldExclusions *regexp.Regexp + FieldFilter func(reflect.StructField, reflect.Value) bool + HomePackage string + Separator string + StrictGo bool + + // DisablePointerReplacement, if true, disables the replacing of pointer data with variable names + // when it's safe. This is useful for diffing two structures, where pointer variables would cause + // false changes. However, circular graphs are still detected and elided to avoid infinite output. + DisablePointerReplacement bool +} + +// Config is the default config used when calling Dump +var Config = Options{ + StripPackageNames: false, + HidePrivateFields: true, + FieldExclusions: regexp.MustCompile(`^(XXX_.*)$`), // XXX_ is a prefix of fields generated by protoc-gen-go + Separator: " ", +} + +type dumpState struct { + w io.Writer + depth int + config *Options + pointers ptrmap + visitedPointers ptrmap + parentPointers ptrmap + currentPointerName string + homePackageRegexp *regexp.Regexp +} + +func (s *dumpState) write(b []byte) { + if _, err := s.w.Write(b); err != nil { + panic(err) + } +} + +func (s *dumpState) writeString(str string) { + s.write([]byte(str)) +} + +func (s *dumpState) indent() { + if !s.config.Compact { + s.write(bytes.Repeat([]byte(" "), s.depth)) + } +} + +func (s *dumpState) newlineWithPointerNameComment() { + if name := s.currentPointerName; name != "" { + if s.config.Compact { + s.write([]byte(fmt.Sprintf("/*%s*/", name))) + } else { + s.write([]byte(fmt.Sprintf(" // %s\n", name))) + } + s.currentPointerName = "" + return + } + if !s.config.Compact { + s.write([]byte("\n")) + } +} + +func (s *dumpState) dumpType(v reflect.Value) { + typeName := v.Type().String() + if s.config.StripPackageNames { + typeName = packageNameStripperRegexp.ReplaceAllLiteralString(typeName, "") + } else if s.homePackageRegexp != nil { + typeName = s.homePackageRegexp.ReplaceAllLiteralString(typeName, "") + } + if s.config.Compact { + typeName = compactTypeRegexp.ReplaceAllString(typeName, "$1") + } + s.write([]byte(typeName)) +} + +func (s *dumpState) dumpSlice(v reflect.Value) { + s.dumpType(v) + numEntries := v.Len() + if numEntries == 0 { + s.write([]byte("{}")) + if s.config.Compact { + s.write([]byte(";")) + } + s.newlineWithPointerNameComment() + return + } + s.write([]byte("{")) + s.newlineWithPointerNameComment() + s.depth++ + for i := 0; i < numEntries; i++ { + s.indent() + s.dumpVal(v.Index(i)) + if !s.config.Compact || i < numEntries-1 { + s.write([]byte(",")) + } + s.newlineWithPointerNameComment() + } + s.depth-- + s.indent() + s.write([]byte("}")) +} + +func (s *dumpState) dumpStruct(v reflect.Value) { + dumpPreamble := func() { + s.dumpType(v) + s.write([]byte("{")) + s.newlineWithPointerNameComment() + s.depth++ + } + preambleDumped := false + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + vtf := vt.Field(i) + if s.config.HidePrivateFields && vtf.PkgPath != "" || s.config.FieldExclusions != nil && s.config.FieldExclusions.MatchString(vtf.Name) { + continue + } + if s.config.FieldFilter != nil && !s.config.FieldFilter(vtf, v.Field(i)) { + continue + } + if s.config.HideZeroValues && isZeroValue(v.Field(i)) { + continue + } + if !preambleDumped { + dumpPreamble() + preambleDumped = true + } + s.indent() + s.write([]byte(vtf.Name)) + if s.config.Compact { + s.write([]byte(":")) + } else { + s.write([]byte(": ")) + } + s.dumpVal(v.Field(i)) + if !s.config.Compact || i < numFields-1 { + s.write([]byte(",")) + } + s.newlineWithPointerNameComment() + } + if preambleDumped { + s.depth-- + s.indent() + s.write([]byte("}")) + } else { + // There were no fields dumped + s.dumpType(v) + s.write([]byte("{}")) + } +} + +func (s *dumpState) dumpMap(v reflect.Value) { + s.dumpType(v) + s.write([]byte("{")) + s.newlineWithPointerNameComment() + s.depth++ + keys := v.MapKeys() + sort.Sort(mapKeySorter{ + keys: keys, + options: s.config, + }) + numKeys := len(keys) + for i, key := range keys { + s.indent() + s.dumpVal(key) + if s.config.Compact { + s.write([]byte(":")) + } else { + s.write([]byte(": ")) + } + s.dumpVal(v.MapIndex(key)) + if !s.config.Compact || i < numKeys-1 { + s.write([]byte(",")) + } + s.newlineWithPointerNameComment() + } + s.depth-- + s.indent() + s.write([]byte("}")) +} + +func (s *dumpState) dumpFunc(v reflect.Value) { + parts := strings.Split(runtime.FuncForPC(v.Pointer()).Name(), "/") + name := parts[len(parts)-1] + + // Anonymous function + if strings.Count(name, ".") > 1 { + s.dumpType(v) + } else { + if s.config.StripPackageNames { + name = packageNameStripperRegexp.ReplaceAllLiteralString(name, "") + } else if s.homePackageRegexp != nil { + name = s.homePackageRegexp.ReplaceAllLiteralString(name, "") + } + if s.config.Compact { + name = compactTypeRegexp.ReplaceAllString(name, "$1") + } + s.write([]byte(name)) + } +} + +func (s *dumpState) dumpCustom(v reflect.Value) { + // Run the custom dumper buffering the output + buf := new(bytes.Buffer) + dumpFunc := v.MethodByName("LitterDump") + dumpFunc.Call([]reflect.Value{reflect.ValueOf(buf)}) + + // Dump the type + s.dumpType(v) + + if s.config.Compact { + s.write(buf.Bytes()) + return + } + + // Now output the dump taking care to apply the current indentation-level + // and pointer name comments. + var err error + firstLine := true + for err == nil { + var lineBytes []byte + lineBytes, err = buf.ReadBytes('\n') + line := strings.TrimRight(string(lineBytes), " \n") + + if err != nil && err != io.EOF { + break + } + // Do not indent first line + if firstLine { + firstLine = false + } else { + s.indent() + } + s.write([]byte(line)) + + // At EOF we're done + if err == io.EOF { + return + } + s.newlineWithPointerNameComment() + } + panic(err) +} + +func (s *dumpState) dump(value interface{}) { + if value == nil { + printNil(s.w) + return + } + v := reflect.ValueOf(value) + s.dumpVal(v) +} + +func (s *dumpState) descendIntoPossiblePointer(value reflect.Value, f func()) { + canonicalize := true + if isPointerValue(value) { + ptr := value.Pointer() + + // If elision disabled, and this is not a circular reference, don't canonicalize + if s.config.DisablePointerReplacement && s.parentPointers.add(ptr) { + canonicalize = false + } + + // Add to stack of pointers we're recursively descending into + s.parentPointers.add(ptr) + defer s.parentPointers.remove(ptr) + } + + if !canonicalize { + pointerName, _ := s.pointerNameFor(value) + s.currentPointerName = pointerName + f() + return + } + + pointerName, firstVisit := s.pointerNameFor(value) + if pointerName == "" { + f() + return + } + if firstVisit { + s.currentPointerName = pointerName + f() + return + } + s.write([]byte(pointerName)) +} + +func (s *dumpState) dumpVal(value reflect.Value) { + if value.Kind() == reflect.Ptr && value.IsNil() { + s.write([]byte("nil")) + return + } + + v := deInterface(value) + kind := v.Kind() + + // Handle custom dumpers + dumperType := reflect.TypeOf((*Dumper)(nil)).Elem() + if v.Type().Implements(dumperType) { + s.descendIntoPossiblePointer(v, func() { + s.dumpCustom(v) + }) + return + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + s.write([]byte("")) + + case reflect.Bool: + printBool(s.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(s.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(s.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(s.w, v.Float(), 32) + + case reflect.Float64: + printFloat(s.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(s.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(s.w, v.Complex(), 64) + + case reflect.String: + s.write([]byte(strconv.Quote(v.String()))) + + case reflect.Slice: + if v.IsNil() { + printNil(s.w) + break + } + fallthrough + + case reflect.Array: + s.descendIntoPossiblePointer(v, func() { + s.dumpSlice(v) + }) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + printNil(s.w) + } + + case reflect.Ptr: + s.descendIntoPossiblePointer(v, func() { + if s.config.StrictGo { + s.writeString(fmt.Sprintf("(func(v %s) *%s { return &v })(", v.Elem().Type(), v.Elem().Type())) + s.dumpVal(v.Elem()) + s.writeString(")") + } else { + s.writeString("&") + s.dumpVal(v.Elem()) + } + }) + + case reflect.Map: + s.descendIntoPossiblePointer(v, func() { + s.dumpMap(v) + }) + + case reflect.Struct: + s.dumpStruct(v) + + case reflect.Func: + s.dumpFunc(v) + + default: + if v.CanInterface() { + s.writeString(fmt.Sprintf("%v", v.Interface())) + } else { + s.writeString(fmt.Sprintf("%v", v.String())) + } + } +} + +// registers that the value has been visited and checks to see if it is one of the +// pointers we will see multiple times. If it is, it returns a temporary name for this +// pointer. It also returns a boolean value indicating whether this is the first time +// this name is returned so the caller can decide whether the contents of the pointer +// has been dumped before or not. +func (s *dumpState) pointerNameFor(v reflect.Value) (string, bool) { + if isPointerValue(v) { + ptr := v.Pointer() + if info, ok := s.pointers[ptr]; ok { + firstVisit := s.visitedPointers.add(ptr) + return fmt.Sprintf("p%d", info.order), firstVisit + } + } + return "", false +} + +// prepares a new state object for dumping the provided value +func newDumpState(value interface{}, options *Options, writer io.Writer) *dumpState { + result := &dumpState{ + config: options, + pointers: mapReusedPointers(reflect.ValueOf(value)), + w: writer, + } + + if options.HomePackage != "" { + result.homePackageRegexp = regexp.MustCompile(fmt.Sprintf("\\b%s\\.", options.HomePackage)) + } + + return result +} + +// Dump a value to stdout +func Dump(value ...interface{}) { + (&Config).Dump(value...) +} + +// Sdump dumps a value to a string +func Sdump(value ...interface{}) string { + return (&Config).Sdump(value...) +} + +// Dump a value to stdout according to the options +func (o Options) Dump(values ...interface{}) { + for i, value := range values { + state := newDumpState(value, &o, os.Stdout) + if i > 0 { + state.write([]byte(o.Separator)) + } + state.dump(value) + } + _, _ = os.Stdout.Write([]byte("\n")) +} + +// Sdump dumps a value to a string according to the options +func (o Options) Sdump(values ...interface{}) string { + buf := new(bytes.Buffer) + for i, value := range values { + if i > 0 { + _, _ = buf.Write([]byte(o.Separator)) + } + state := newDumpState(value, &o, buf) + state.dump(value) + } + return buf.String() +} + +type mapKeySorter struct { + keys []reflect.Value + options *Options +} + +func (s mapKeySorter) Len() int { + return len(s.keys) +} + +func (s mapKeySorter) Swap(i, j int) { + s.keys[i], s.keys[j] = s.keys[j], s.keys[i] +} + +func (s mapKeySorter) Less(i, j int) bool { + ibuf := new(bytes.Buffer) + jbuf := new(bytes.Buffer) + newDumpState(s.keys[i], s.options, ibuf).dumpVal(s.keys[i]) + newDumpState(s.keys[j], s.options, jbuf).dumpVal(s.keys[j]) + return ibuf.String() < jbuf.String() +} diff --git a/vendor/github.com/sanity-io/litter/go.mod b/vendor/github.com/sanity-io/litter/go.mod new file mode 100644 index 000000000..c1c20c939 --- /dev/null +++ b/vendor/github.com/sanity-io/litter/go.mod @@ -0,0 +1,9 @@ +module github.com/sanity-io/litter + +go 1.14 + +require ( + github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b // indirect + github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0 // indirect + github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312 +) diff --git a/vendor/github.com/sanity-io/litter/go.sum b/vendor/github.com/sanity-io/litter/go.sum new file mode 100644 index 000000000..800ae0053 --- /dev/null +++ b/vendor/github.com/sanity-io/litter/go.sum @@ -0,0 +1,6 @@ +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b h1:XxMZvQZtTXpWMNWK82vdjCLCe7uGMFXdTsJH0v3Hkvw= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0 h1:GD+A8+e+wFkqje55/2fOVnZPkoDIu1VooBWfNrnY8Uo= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312 h1:UsFdQ3ZmlzS0BqZYGxvYaXvFGUbCmPGy8DM7qWJJiIQ= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/sanity-io/litter/pointers.go b/vendor/github.com/sanity-io/litter/pointers.go new file mode 100644 index 000000000..cbb417933 --- /dev/null +++ b/vendor/github.com/sanity-io/litter/pointers.go @@ -0,0 +1,126 @@ +package litter + +import ( + "reflect" + "sort" +) + +// mapReusedPointers takes a structure, and recursively maps all pointers mentioned in the tree, +// detecting circular references, and providing a list of all pointers that was referenced at +// least twice by the provided structure. +func mapReusedPointers(v reflect.Value) ptrmap { + pm := &pointerVisitor{} + pm.consider(v) + return pm.reused +} + +// A map of pointers. +type ( + ptrinfo struct { + order int + } + ptrmap map[uintptr]ptrinfo +) + +// Returns true if contains a pointer. +func (pm *ptrmap) contains(p uintptr) bool { + if *pm != nil { + _, ok := (*pm)[p] + return ok + } + return false +} + +// Removes a pointer. +func (pm *ptrmap) remove(p uintptr) { + if *pm != nil { + delete(*pm, p) + } +} + +// Adds a pointer. +func (pm *ptrmap) add(p uintptr) bool { + if pm.contains(p) { + return false + } + pm.put(p) + return true +} + +// Adds a pointer (slow path). +func (pm *ptrmap) put(p uintptr) { + if *pm == nil { + *pm = make(map[uintptr]ptrinfo, 31) + } + (*pm)[p] = ptrinfo{order: len(*pm)} +} + +type pointerVisitor struct { + pointers ptrmap + reused ptrmap +} + +// Recursively consider v and each of its children, updating the map according to the +// semantics of MapReusedPointers +func (pv *pointerVisitor) consider(v reflect.Value) { + if v.Kind() == reflect.Invalid { + return + } + if isPointerValue(v) && v.Pointer() != 0 { // pointer is 0 for unexported fields + if pv.tryAddPointer(v.Pointer()) { + // No use descending inside this value, since it have been seen before and all its descendants + // have been considered + return + } + } + + // Now descend into any children of this value + switch v.Kind() { + case reflect.Slice, reflect.Array: + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + pv.consider(v.Index(i)) + } + + case reflect.Interface: + pv.consider(v.Elem()) + + case reflect.Ptr: + pv.consider(v.Elem()) + + case reflect.Map: + keys := v.MapKeys() + sort.Sort(mapKeySorter{ + keys: keys, + options: &Config, + }) + for _, key := range keys { + pv.consider(v.MapIndex(key)) + } + + case reflect.Struct: + numFields := v.NumField() + for i := 0; i < numFields; i++ { + pv.consider(v.Field(i)) + } + } +} + +// addPointer to the pointerMap, update reusedPointers. Returns true if pointer was reused +func (pv *pointerVisitor) tryAddPointer(p uintptr) bool { + // Is this allready known to be reused? + if pv.reused.contains(p) { + return true + } + + // Have we seen it once before? + if pv.pointers.contains(p) { + // Add it to the register of pointers we have seen more than once + pv.reused.add(p) + return true + } + + // This pointer was new to us + pv.pointers.add(p) + return false +} diff --git a/vendor/github.com/sanity-io/litter/print.go b/vendor/github.com/sanity-io/litter/print.go new file mode 100644 index 000000000..700646dd3 --- /dev/null +++ b/vendor/github.com/sanity-io/litter/print.go @@ -0,0 +1,44 @@ +package litter + +import ( + "io" + "strconv" +) + +func printBool(w io.Writer, value bool) { + if value { + w.Write([]byte("true")) + return + } + w.Write([]byte("false")) +} + +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +func printComplex(w io.Writer, c complex128, floatPrecision int) { + w.Write([]byte("complex")) + printInt(w, int64(floatPrecision*2), 10) + r := real(c) + w.Write([]byte("(")) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write([]byte("+")) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write([]byte("i)")) +} + +func printNil(w io.Writer) { + w.Write([]byte("nil")) +} diff --git a/vendor/github.com/sanity-io/litter/util.go b/vendor/github.com/sanity-io/litter/util.go new file mode 100644 index 000000000..58be4751e --- /dev/null +++ b/vendor/github.com/sanity-io/litter/util.go @@ -0,0 +1,28 @@ +package litter + +import ( + "reflect" +) + +// deInterface returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func deInterface(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +func isPointerValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return true + } + return false +} + +func isZeroValue(v reflect.Value) bool { + return (isPointerValue(v) && v.IsNil()) || + (v.IsValid() && v.CanInterface() && reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface())) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 774eb3824..e8eadfc4e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -70,6 +70,8 @@ github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util # github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a github.com/r3labs/diff +# github.com/sanity-io/litter v1.3.0 +github.com/sanity-io/litter # github.com/satori/go.uuid v1.1.0 github.com/satori/go.uuid # github.com/spf13/pflag v1.0.5 From ee083eb2d999507a4ad6cb76e5a26c7a598c2f44 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 28 Jan 2021 14:32:54 +0300 Subject: [PATCH 22/78] dev: minor function naming --- pkg/controller/chi/worker.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index a0354d375..e46e98725 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -421,9 +421,9 @@ func (w *worker) reconcileCHIAuxObjectsPreliminary(chi *chop.ClickHouseInstallat // 2. CHI common ConfigMap without update - create only w.reconcileCHIConfigMapCommon(chi, nil, false) // 3. CHI users ConfigMap - w.reconcileCHIConfigMapUsers(chi, nil, true) + w.reconcileCHIConfigMapUsers(chi, nil, true) - return nil + return nil } // reconcileCHIAuxObjectsFinal reconciles CHI global objects @@ -562,7 +562,7 @@ func (w *worker) migrateTables(host *chop.ChiHost) bool { // Exclude host from ClickHouse clusters if required func (w *worker) excludeHost(host *chop.ChiHost) error { - if w.ifExcludeHost(host) { + if w.shouldExcludeHost(host) { w.a.V(1). Info("Exclude from cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) @@ -583,7 +583,6 @@ func (w *worker) includeHost(host *chop.ChiHost) error { return nil } - func (w *worker) excludeHostFromService(host *chop.ChiHost) { w.c.deleteLabelReady(host) } @@ -619,15 +618,15 @@ func (w *worker) includeHostIntoClickHouseCluster(host *chop.ChiHost) { chop.NewChiHostReconcileAttributes().SetAdd(), ), ) - // Add host to the cluster config (always) and wait for ClickHouse to pick-up the change + // Add host to the cluster config (always) and wait for ClickHouse to pick-up the change _ = w.reconcileCHIConfigMapCommon(host.CHI, options, true) if w.waitIncludeHost(host) { _ = w.waitHostInCluster(host) } } -// determines whether host to be excluded from cluster -func (w *worker) ifExcludeHost(host *chop.ChiHost) bool { +// shouldExcludeHost determines whether host to be excluded from cluster +func (w *worker) shouldExcludeHost(host *chop.ChiHost) bool { status := host.ReconcileAttributes.GetStatus() if (status == chop.StatefulSetStatusNew) || (status == chop.StatefulSetStatusSame) { // No need to exclude for new and non-modified StatefulSets @@ -639,7 +638,7 @@ func (w *worker) ifExcludeHost(host *chop.ChiHost) bool { return false } - return true + return true } // determines whether reconciler should wait for host to be excluded from cluster @@ -680,7 +679,6 @@ func (w *worker) waitIncludeHost(host *chop.ChiHost) bool { return w.c.chop.Config().ReconcileWaitInclude } - // waitHostInCluster waits until host is a member of at least one ClickHouse cluster func (w *worker) waitHostInCluster(host *chop.ChiHost) error { return w.c.pollHost(host, nil, w.schemer.IsHostInCluster) From d91a514043aed458176398b66f61243f5e7b10f6 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 28 Jan 2021 15:20:16 +0300 Subject: [PATCH 23/78] dev: general announcer metadata --- pkg/announcer/announcer.go | 50 +++++++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 3 deletions(-) diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go index 4b7446d9a..1e228c04d 100644 --- a/pkg/announcer/announcer.go +++ b/pkg/announcer/announcer.go @@ -15,6 +15,7 @@ package announcer import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "strconv" log "github.com/golang/glog" @@ -38,9 +39,11 @@ type Announcer struct { // prefix specifies prefix used by logger prefix string + // meta specifies meta-information of the object, if required + meta string } -// announcer which would be used in top-level functions, can be called as default +// announcer which would be used in top-level functions, can be called as a 'default announcer' var announcer Announcer // init creates default announcer @@ -48,6 +51,7 @@ func init() { announcer = New() } +// skip specifies file name which to be skipped from address const skip = "announcer.go" // New creates new announcer @@ -70,6 +74,7 @@ func V(level log.Level) Announcer { return announcer.V(level) } +// F adds function name func (a Announcer) F() Announcer { b := a b.writeLog = true @@ -77,10 +82,12 @@ func (a Announcer) F() Announcer { return b } +// F adds function name func F() Announcer { return announcer.F() } +// L adds line number func (a Announcer) L() Announcer { b := a b.writeLog = true @@ -88,10 +95,12 @@ func (a Announcer) L() Announcer { return b } +// L adds line number func L() Announcer { return announcer.L() } +// FL adds filename func (a Announcer) FL() Announcer { b := a b.writeLog = true @@ -99,10 +108,12 @@ func (a Announcer) FL() Announcer { return b } +// FL adds filename func FL() Announcer { return announcer.FL() } +// A adds full code address as 'file:line:function' func (a Announcer) A() Announcer { b := a b.writeLog = true @@ -110,10 +121,12 @@ func (a Announcer) A() Announcer { return b } +// A adds full code address as 'file:line:function' func A() Announcer { return announcer.A() } +// S adds 'start of the function' tag func (a Announcer) S() Announcer { b := a b.writeLog = true @@ -122,10 +135,12 @@ func (a Announcer) S() Announcer { return b } +// S adds 'start of the function' tag func S() Announcer { return announcer.S() } +// E adds 'end of the function' tag func (a Announcer) E() Announcer { b := a b.writeLog = true @@ -134,14 +149,43 @@ func (a Announcer) E() Announcer { return b } +// E adds 'end of the function' tag func E() Announcer { return announcer.E() } +// M adds object meta as 'namespace/name' +func (a Announcer) M(m *v1.ObjectMeta) Announcer { + if m == nil { + return a + } + b := a + b.writeLog = true + b.meta = m.Namespace + "/" + m.Name + return b +} + +// M adds object meta as 'namespace/name' +func M(m *v1.ObjectMeta) Announcer { + return announcer.M(m) +} + +// P triggers log to print line +func (a Announcer) P() { + Info("") +} + +// P triggers log to print line +func P() { + announcer.P() +} + func (a Announcer) prependFormat(format string) string { - // Format is expected to be 'file:line:function:prefix:_old_format_' + // Format is expected to be 'file:line:function:prefix:meta:_old_format_' // Prepend each component in reverse order - + if a.meta != "" { + format = a.meta + ":" + format + } if a.prefix != "" { format = a.prefix + ":" + format } From 5043eecdc271a913d39e405a6b2036661dd61bc4 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 28 Jan 2021 15:37:46 +0300 Subject: [PATCH 24/78] dev: worker announcer --- pkg/announcer/announcer.go | 44 ++++----- pkg/controller/chi/announcer.go | 161 +++++++++++++++++++++----------- 2 files changed, 130 insertions(+), 75 deletions(-) diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go index 1e228c04d..aa91fbc11 100644 --- a/pkg/announcer/announcer.go +++ b/pkg/announcer/announcer.go @@ -172,7 +172,7 @@ func M(m *v1.ObjectMeta) Announcer { // P triggers log to print line func (a Announcer) P() { - Info("") + a.Info("") } // P triggers log to print line @@ -180,27 +180,6 @@ func P() { announcer.P() } -func (a Announcer) prependFormat(format string) string { - // Format is expected to be 'file:line:function:prefix:meta:_old_format_' - // Prepend each component in reverse order - if a.meta != "" { - format = a.meta + ":" + format - } - if a.prefix != "" { - format = a.prefix + ":" + format - } - if a.function != "" { - format = a.function + ":" + format - } - if a.line != 0 { - format = strconv.Itoa(a.line) + ":" + format - } - if a.file != "" { - format = a.file + ":" + format - } - return format -} - // Info is inspired by log.Infof() func (a Announcer) Info(format string, args ...interface{}) { // Produce classic log line @@ -284,3 +263,24 @@ func (a Announcer) Fatal(format string, args ...interface{}) { func Fatal(format string, args ...interface{}) { announcer.Fatal(format, args...) } + +func (a Announcer) prependFormat(format string) string { + // Format is expected to be 'file:line:function:prefix:meta:_old_format_' + // Prepend each component in reverse order + if a.meta != "" { + format = a.meta + ":" + format + } + if a.prefix != "" { + format = a.prefix + ":" + format + } + if a.function != "" { + format = a.function + ":" + format + } + if a.line != 0 { + format = strconv.Itoa(a.line) + ":" + format + } + if a.file != "" { + format = a.file + ":" + format + } + return format +} diff --git a/pkg/controller/chi/announcer.go b/pkg/controller/chi/announcer.go index 0b21f19fa..c8cb6153c 100644 --- a/pkg/controller/chi/announcer.go +++ b/pkg/controller/chi/announcer.go @@ -16,10 +16,11 @@ package chi import ( "fmt" + log "github.com/golang/glog" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" a "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" - log "github.com/golang/glog" ) // Announcer handler all log/event/status messages going outside of controller/worker @@ -56,82 +57,67 @@ func NewAnnouncer() Announcer { } } -// WithController specifies controller to be used in case `chi`-related announces need to be done -func (a Announcer) WithController(ctrl *Controller) Announcer { +// V is inspired by log.V() +func (a Announcer) V(level log.Level) Announcer { b := a - b.ctrl = ctrl + b.Announcer = b.Announcer.V(level) return b } -// V is inspired by log.V() -func (a Announcer) V(level log.Level) Announcer { +// F adds function name +func (a Announcer) F() Announcer { b := a - b.Announcer = b.Announcer.V(level) + b.Announcer = b.Announcer.F() return b } -// WithEvent is used in chained calls in order to produce event into `chi` -func (a Announcer) WithEvent( - chi *chop.ClickHouseInstallation, - action string, - reason string, -) Announcer { +// L adds line number +func (a Announcer) L() Announcer { b := a - if chi == nil { - b.writeEvent = false - b.chi = nil - b.eventAction = "" - b.eventReason = "" - } else { - b.writeEvent = true - b.chi = chi - b.eventAction = action - b.eventReason = reason - } + b.Announcer = b.Announcer.L() return b } -// WithStatusAction is used in chained calls in order to produce action into `ClickHouseInstallation.Status.Action` -func (a Announcer) WithStatusAction(chi *chop.ClickHouseInstallation) Announcer { +// FL adds filename +func (a Announcer) FL() Announcer { b := a - if chi == nil { - b.chi = nil - b.writeStatusAction = false - b.writeStatusActions = false - } else { - b.chi = chi - b.writeStatusAction = true - b.writeStatusActions = true - } + b.Announcer = b.Announcer.FL() return b } -// WithStatusActions is used in chained calls in order to produce action in ClickHouseInstallation.Status.Actions -func (a Announcer) WithStatusActions(chi *chop.ClickHouseInstallation) Announcer { +// A adds full code address as 'file:line:function' +func (a Announcer) A() Announcer { b := a - if chi == nil { - b.chi = nil - b.writeStatusActions = false - } else { - b.chi = chi - b.writeStatusActions = true - } + b.Announcer = b.Announcer.A() return b } -// WithStatusAction is used in chained calls in order to produce error in ClickHouseInstallation.Status.Error -func (a Announcer) WithStatusError(chi *chop.ClickHouseInstallation) Announcer { +// S adds 'start of the function' tag +func (a Announcer) S() Announcer { b := a - if chi == nil { - b.chi = nil - b.writeStatusError = false - } else { - b.chi = chi - b.writeStatusError = true - } + b.Announcer = b.Announcer.S() return b } +// E adds 'end of the function' tag +func (a Announcer) E() Announcer { + b := a + b.Announcer = b.Announcer.E() + return b +} + +// M adds object meta as 'namespace/name' +func (a Announcer) M(m *v1.ObjectMeta) Announcer { + b := a + b.Announcer = b.Announcer.M(m) + return b +} + +// P triggers log to print line +func (a Announcer) P() { + a.Info("") +} + // Info is inspired by log.Infof() func (a Announcer) Info(format string, args ...interface{}) { // Produce classic log line @@ -204,6 +190,75 @@ func (a Announcer) Fatal(format string, args ...interface{}) { a.Announcer.Fatal(format, args...) } +// WithController specifies controller to be used in case `chi`-related announces need to be done +func (a Announcer) WithController(ctrl *Controller) Announcer { + b := a + b.ctrl = ctrl + return b +} + +// WithEvent is used in chained calls in order to produce event into `chi` +func (a Announcer) WithEvent( + chi *chop.ClickHouseInstallation, + action string, + reason string, +) Announcer { + b := a + if chi == nil { + b.writeEvent = false + b.chi = nil + b.eventAction = "" + b.eventReason = "" + } else { + b.writeEvent = true + b.chi = chi + b.eventAction = action + b.eventReason = reason + } + return b +} + +// WithStatusAction is used in chained calls in order to produce action into `ClickHouseInstallation.Status.Action` +func (a Announcer) WithStatusAction(chi *chop.ClickHouseInstallation) Announcer { + b := a + if chi == nil { + b.chi = nil + b.writeStatusAction = false + b.writeStatusActions = false + } else { + b.chi = chi + b.writeStatusAction = true + b.writeStatusActions = true + } + return b +} + +// WithStatusActions is used in chained calls in order to produce action in ClickHouseInstallation.Status.Actions +func (a Announcer) WithStatusActions(chi *chop.ClickHouseInstallation) Announcer { + b := a + if chi == nil { + b.chi = nil + b.writeStatusActions = false + } else { + b.chi = chi + b.writeStatusActions = true + } + return b +} + +// WithStatusAction is used in chained calls in order to produce error in ClickHouseInstallation.Status.Error +func (a Announcer) WithStatusError(chi *chop.ClickHouseInstallation) Announcer { + b := a + if chi == nil { + b.chi = nil + b.writeStatusError = false + } else { + b.chi = chi + b.writeStatusError = true + } + return b +} + // chiCapable checks whether announcer is capable to produce chi-based announcements func (a Announcer) chiCapable() bool { return (a.ctrl != nil) && (a.chi != nil) From a267a8e2634f654ccf6a8a0ce74752c26a3b761b Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 28 Jan 2021 17:08:59 +0300 Subject: [PATCH 25/78] dev: sts version --- pkg/model/creator.go | 2 +- pkg/util/dump.go | 7 +++++-- pkg/util/hash.go | 7 +++++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 40fa784cc..b30a7c4f0 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -328,7 +328,7 @@ func (c *Creator) setupStatefulSetVersion(statefulSet *apps.StatefulSet) { LabelStatefulSetVersion: util.Fingerprint(statefulSet), }, ) - log.V(3).Info("StatefulSet(%s/%s)\n%s", statefulSet.Namespace, statefulSet.Name, util.Dump(statefulSet)) + log.V(2).Info("StatefulSet(%s/%s)\n%s", statefulSet.Namespace, statefulSet.Name, util.Dump(statefulSet)) } // GetStatefulSetVersion diff --git a/pkg/util/dump.go b/pkg/util/dump.go index e1e33595e..a63f63eed 100644 --- a/pkg/util/dump.go +++ b/pkg/util/dump.go @@ -14,8 +14,11 @@ package util -import "github.com/sanity-io/litter" +import dumper "github.com/sanity-io/litter" func Dump(obj interface{}) string { - return litter.Sdump(obj) + d := dumper.Options{ + Separator: " ", + } + return d.Sdump(obj) } diff --git a/pkg/util/hash.go b/pkg/util/hash.go index a5e94763a..977d333db 100644 --- a/pkg/util/hash.go +++ b/pkg/util/hash.go @@ -22,7 +22,7 @@ import ( "fmt" "hash/fnv" - "github.com/sanity-io/litter" + dumper "github.com/sanity-io/litter" // "github.com/davecgh/go-spew/spew" ) @@ -40,7 +40,10 @@ func serializeUnrepeatable(obj interface{}) []byte { func serializeRepeatable(obj interface{}) []byte { //s := spew.NewDefaultConfig() //s.SortKeys = true - return []byte(litter.Sdump(obj)) + d := dumper.Options{ + Separator: " ", + } + return []byte(d.Sdump(obj)) } func HashIntoString(b []byte) string { From aaec18fcdfbb61f481dda9dbf9a579809f7ce283 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 28 Jan 2021 19:34:03 +0300 Subject: [PATCH 26/78] docs: bring up-to-date replication table create statement --- docs/replication_setup.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/replication_setup.md b/docs/replication_setup.md index 1f2c64c09..be7e84019 100644 --- a/docs/replication_setup.md +++ b/docs/replication_setup.md @@ -73,7 +73,9 @@ CREATE TABLE events_local on cluster '{cluster}' ( event_type Int32, article_id Int32, title String -) engine=ReplicatedMergeTree('/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}', '{replica}', event_date, (event_type, article_id), 8192); +) engine=ReplicatedMergeTree('/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}', '{replica}') +PARTITION BY toYYYYMM(event_date) +ORDER BY (event_type, article_id); ``` ```sql From 8078af65e33826719d3047edca1b11b2503e1dc6 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 29 Jan 2021 12:29:10 +0300 Subject: [PATCH 27/78] dev: announcer minor --- pkg/announcer/announcer.go | 4 ++-- pkg/controller/chi/announcer.go | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go index aa91fbc11..58cb82960 100644 --- a/pkg/announcer/announcer.go +++ b/pkg/announcer/announcer.go @@ -15,10 +15,10 @@ package announcer import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "strconv" log "github.com/golang/glog" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/altinity/clickhouse-operator/pkg/util" ) @@ -265,7 +265,7 @@ func Fatal(format string, args ...interface{}) { } func (a Announcer) prependFormat(format string) string { - // Format is expected to be 'file:line:function:prefix:meta:_old_format_' + // Result format is expected to be 'file:line:function:prefix:meta:_start_format_' // Prepend each component in reverse order if a.meta != "" { format = a.meta + ":" + format diff --git a/pkg/controller/chi/announcer.go b/pkg/controller/chi/announcer.go index c8cb6153c..763f05d9b 100644 --- a/pkg/controller/chi/announcer.go +++ b/pkg/controller/chi/announcer.go @@ -16,6 +16,7 @@ package chi import ( "fmt" + log "github.com/golang/glog" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" From 15bd6f0b59f3fc315e9913f5ab795142a26c5704 Mon Sep 17 00:00:00 2001 From: alz Date: Tue, 2 Feb 2021 09:50:17 +0300 Subject: [PATCH 28/78] Use Ordinary database engine by default --- config/users.d/03-database-ordinary.xml | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 config/users.d/03-database-ordinary.xml diff --git a/config/users.d/03-database-ordinary.xml b/config/users.d/03-database-ordinary.xml new file mode 100644 index 000000000..2a76b2212 --- /dev/null +++ b/config/users.d/03-database-ordinary.xml @@ -0,0 +1,8 @@ + + + + + Ordinary + + + \ No newline at end of file From 3176a30ada8cb20d62eb6674ac9d2df0285206ee Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 3 Feb 2021 14:59:22 +0300 Subject: [PATCH 29/78] dev: clarify logger --- pkg/announcer/announcer.go | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go index 58cb82960..097919b58 100644 --- a/pkg/announcer/announcer.go +++ b/pkg/announcer/announcer.go @@ -268,19 +268,39 @@ func (a Announcer) prependFormat(format string) string { // Result format is expected to be 'file:line:function:prefix:meta:_start_format_' // Prepend each component in reverse order if a.meta != "" { - format = a.meta + ":" + format + if format == "" { + format = a.meta + } else { + format = a.meta + ":" + format + } } if a.prefix != "" { - format = a.prefix + ":" + format + if format == "" { + format = a.prefix + } else { + format = a.prefix + ":" + format + } } if a.function != "" { - format = a.function + ":" + format + if format == "" { + format = a.function + "()" + } else { + format = a.function + "()" + ":" + format + } } if a.line != 0 { - format = strconv.Itoa(a.line) + ":" + format + if format == "" { + format = strconv.Itoa(a.line) + } else { + format = strconv.Itoa(a.line) + ":" + format + } } if a.file != "" { - format = a.file + ":" + format + if format == "" { + format = a.file + } else { + format = a.file + ":" + format + } } return format } From aa47a5ece0a302ef1507ad3c3d7531fdd6ffecd2 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 3 Feb 2021 15:01:28 +0300 Subject: [PATCH 30/78] env: rebuild manifests --- deploy/dev/clickhouse-operator-install-dev.yaml | 9 +++++++++ .../operator/clickhouse-operator-install-deployment.yaml | 9 +++++++++ .../clickhouse-operator-install-template-deployment.yaml | 9 +++++++++ .../operator/clickhouse-operator-install-template.yaml | 9 +++++++++ deploy/operator/clickhouse-operator-install.yaml | 9 +++++++++ 5 files changed, 45 insertions(+) diff --git a/deploy/dev/clickhouse-operator-install-dev.yaml b/deploy/dev/clickhouse-operator-install-dev.yaml index 11ff2b733..a5e49b26a 100644 --- a/deploy/dev/clickhouse-operator-install-dev.yaml +++ b/deploy/dev/clickhouse-operator-install-dev.yaml @@ -1826,6 +1826,15 @@ data: + 03-database-ordinary.xml: | + + + + + Ordinary + + + --- # Possible Template Parameters: # diff --git a/deploy/operator/clickhouse-operator-install-deployment.yaml b/deploy/operator/clickhouse-operator-install-deployment.yaml index bd5cd9a9d..95e93620e 100644 --- a/deploy/operator/clickhouse-operator-install-deployment.yaml +++ b/deploy/operator/clickhouse-operator-install-deployment.yaml @@ -349,6 +349,15 @@ data: + 03-database-ordinary.xml: | + + + + + Ordinary + + + --- # Possible Template Parameters: # diff --git a/deploy/operator/clickhouse-operator-install-template-deployment.yaml b/deploy/operator/clickhouse-operator-install-template-deployment.yaml index e4dbb897a..44ec9067b 100644 --- a/deploy/operator/clickhouse-operator-install-template-deployment.yaml +++ b/deploy/operator/clickhouse-operator-install-template-deployment.yaml @@ -354,6 +354,15 @@ data: + 03-database-ordinary.xml: | + + + + + Ordinary + + + --- # Possible Template Parameters: # diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml index 1c666d872..4db52974f 100644 --- a/deploy/operator/clickhouse-operator-install-template.yaml +++ b/deploy/operator/clickhouse-operator-install-template.yaml @@ -1826,6 +1826,15 @@ data: + 03-database-ordinary.xml: | + + + + + Ordinary + + + --- # Possible Template Parameters: # diff --git a/deploy/operator/clickhouse-operator-install.yaml b/deploy/operator/clickhouse-operator-install.yaml index cf765609d..5b8ad9e10 100644 --- a/deploy/operator/clickhouse-operator-install.yaml +++ b/deploy/operator/clickhouse-operator-install.yaml @@ -1826,6 +1826,15 @@ data: + 03-database-ordinary.xml: | + + + + + Ordinary + + + --- # Possible Template Parameters: # From 868b73cda81e1965e102e700537565b46ec9a443 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 3 Feb 2021 15:12:54 +0300 Subject: [PATCH 31/78] dev: simplify logging --- cmd/operator/app/clickhouse_operator.go | 4 ++-- pkg/chop/chop.go | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cmd/operator/app/clickhouse_operator.go b/cmd/operator/app/clickhouse_operator.go index 09201c0d5..0a083382b 100644 --- a/cmd/operator/app/clickhouse_operator.go +++ b/cmd/operator/app/clickhouse_operator.go @@ -84,8 +84,8 @@ func Run() { os.Exit(0) } - log.S().Info("operator") - defer log.E().Info("operator") + log.S().P() + defer log.E().P() if debugRequest { kubeInformerFactoryResyncPeriod = defaultInformerFactoryResyncDebugPeriod diff --git a/pkg/chop/chop.go b/pkg/chop/chop.go index 7aa9e70c0..ccf8197d7 100644 --- a/pkg/chop/chop.go +++ b/pkg/chop/chop.go @@ -49,26 +49,22 @@ func (c *CHOp) Config() *v1.OperatorConfig { func (c *CHOp) SetupLog() { updated := false if c.Config().Logtostderr != "" { - log.V(1).Info("Log option cur value %s=%s", "logtostderr", flag.Lookup("logtostderr").Value) - log.V(1).Info("Log option new value %s=%s", "logtostderr", c.Config().Logtostderr) + c.logUpdate("logtostderr", c.Config().Logtostderr) updated = true _ = flag.Set("logtostderr", c.Config().Logtostderr) } if c.Config().Alsologtostderr != "" { - log.V(1).Info("Log option cur value %s=%s", "alsologtostderr", flag.Lookup("alsologtostderr").Value) - log.V(1).Info("Log option new value %s=%s", "alsologtostderr", c.Config().Alsologtostderr) + c.logUpdate("alsologtostderr", c.Config().Alsologtostderr) updated = true _ = flag.Set("alsologtostderr", c.Config().Alsologtostderr) } if c.Config().Stderrthreshold != "" { - log.V(1).Info("Log option cur value %s=%s", "stderrthreshold", flag.Lookup("stderrthreshold").Value) - log.V(1).Info("Log option new value %s=%s", "stderrthreshold", c.Config().Stderrthreshold) + c.logUpdate("stderrthreshold", c.Config().Stderrthreshold) updated = true _ = flag.Set("stderrthreshold", c.Config().Stderrthreshold) } if c.Config().V != "" { - log.V(1).Info("Log option cur value %s=%s", "v", flag.Lookup("v").Value) - log.V(1).Info("Log option new value %s=%s", "v", c.Config().V) + c.logUpdate("v", c.Config().V) updated = true _ = flag.Set("v", c.Config().V) } @@ -77,3 +73,7 @@ func (c *CHOp) SetupLog() { log.V(1).Info("Additional log options applied") } } + +func (c *CHOp) logUpdate(name, value string) { + log.V(1).Info("Log option '%s' change value from '%s' to '%s'", name, flag.Lookup(name).Value, value) +} From e27bde6438d9aeeceace577e045edf51cbea8f8f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 3 Feb 2021 16:01:17 +0300 Subject: [PATCH 32/78] dev: make announcer to understand ObjectMeta fields --- pkg/announcer/announcer.go | 17 +++++++++++------ pkg/controller/chi/announcer.go | 6 ++---- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go index 097919b58..11b8e02f4 100644 --- a/pkg/announcer/announcer.go +++ b/pkg/announcer/announcer.go @@ -15,12 +15,11 @@ package announcer import ( + "reflect" "strconv" - log "github.com/golang/glog" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/altinity/clickhouse-operator/pkg/util" + log "github.com/golang/glog" ) // Announcer handler all log/event/status messages going outside of controller/worker @@ -155,18 +154,24 @@ func E() Announcer { } // M adds object meta as 'namespace/name' -func (a Announcer) M(m *v1.ObjectMeta) Announcer { +func (a Announcer) M(m interface{}) Announcer { if m == nil { return a } + meta := reflect.ValueOf(m) + namespace := meta.Elem().FieldByName("Namespace") + name := meta.Elem().FieldByName("Name") + if !namespace.IsValid() || !name.IsValid() { + return a + } b := a b.writeLog = true - b.meta = m.Namespace + "/" + m.Name + b.meta = namespace.String() + "/" + name.String() return b } // M adds object meta as 'namespace/name' -func M(m *v1.ObjectMeta) Announcer { +func M(m interface{}) Announcer { return announcer.M(m) } diff --git a/pkg/controller/chi/announcer.go b/pkg/controller/chi/announcer.go index 763f05d9b..6c14fb28f 100644 --- a/pkg/controller/chi/announcer.go +++ b/pkg/controller/chi/announcer.go @@ -17,11 +17,9 @@ package chi import ( "fmt" - log "github.com/golang/glog" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - a "github.com/altinity/clickhouse-operator/pkg/announcer" chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + log "github.com/golang/glog" ) // Announcer handler all log/event/status messages going outside of controller/worker @@ -108,7 +106,7 @@ func (a Announcer) E() Announcer { } // M adds object meta as 'namespace/name' -func (a Announcer) M(m *v1.ObjectMeta) Announcer { +func (a Announcer) M(m interface{}) Announcer { b := a b.Announcer = b.Announcer.M(m) return b From 843bd163542d753101edb8e36f6531ea0ba2de5e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 3 Feb 2021 16:01:56 +0300 Subject: [PATCH 33/78] dev: controller logger --- pkg/chop/config_manager.go | 2 +- pkg/chop/kube_machinery.go | 8 +-- pkg/controller/chi/controller.go | 109 +++++++++++++++---------------- 3 files changed, 58 insertions(+), 61 deletions(-) diff --git a/pkg/chop/config_manager.go b/pkg/chop/config_manager.go index b21be48c2..1f6e7ebbf 100644 --- a/pkg/chop/config_manager.go +++ b/pkg/chop/config_manager.go @@ -115,7 +115,7 @@ func (cm *ConfigManager) getCRBasedConfigs(namespace string) { // Get list of ClickHouseOperatorConfiguration objects var err error if cm.chopConfigList, err = cm.chopClient.ClickhouseV1().ClickHouseOperatorConfigurations(namespace).List(metav1.ListOptions{}); err != nil { - log.V(1).Error("Error read ClickHouseOperatorConfigurations %v", err) + log.V(1).A().Error("Error read ClickHouseOperatorConfigurations %v", err) return } diff --git a/pkg/chop/kube_machinery.go b/pkg/chop/kube_machinery.go index 31a12efd0..3ad3f0fca 100644 --- a/pkg/chop/kube_machinery.go +++ b/pkg/chop/kube_machinery.go @@ -65,18 +65,18 @@ func getKubeConfig(kubeConfigFile, masterURL string) (*kuberest.Config, error) { func GetClientset(kubeConfigFile, masterURL string) (*kube.Clientset, *chopclientset.Clientset) { kubeConfig, err := getKubeConfig(kubeConfigFile, masterURL) if err != nil { - log.Fatal("Unable to build kubeconf: %s", err.Error()) + log.A().Fatal("Unable to build kubeconf: %s", err.Error()) os.Exit(1) } kubeClientset, err := kube.NewForConfig(kubeConfig) if err != nil { - log.Fatal("Unable to initialize kubernetes API clientset: %s", err.Error()) + log.A().Fatal("Unable to initialize kubernetes API clientset: %s", err.Error()) } chopClientset, err := chopclientset.NewForConfig(kubeConfig) if err != nil { - log.Fatal("Unable to initialize clickhouse-operator API clientset: %s", err.Error()) + log.A().Fatal("Unable to initialize clickhouse-operator API clientset: %s", err.Error()) } return kubeClientset, chopClientset @@ -87,7 +87,7 @@ func GetCHOp(chopClient *chopclientset.Clientset, initCHOpConfigFilePath string) // Create operator instance chop := NewCHOp(version.Version, chopClient, initCHOpConfigFilePath) if err := chop.Init(); err != nil { - log.Fatal("Unable to init CHOP instance %v", err) + log.A().Fatal("Unable to init CHOP instance %v", err) os.Exit(1) } diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index c71f330ba..2292b406b 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -122,7 +122,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chi.Namespace) { return } - log.V(2).Info("chiInformer.AddFunc - %s/%s added", chi.Namespace, chi.Name) + log.V(2).M(chi).Info("chiInformer.AddFunc") c.enqueueObject(chi.Namespace, chi.Name, NewReconcileChi(reconcileAdd, nil, chi)) }, UpdateFunc: func(old, new interface{}) { @@ -131,7 +131,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(newChi.Namespace) { return } - log.V(2).Info("chiInformer.UpdateFunc") + log.V(2).M(newChi).Info("chiInformer.UpdateFunc") c.enqueueObject(newChi.Namespace, newChi.Name, NewReconcileChi(reconcileUpdate, oldChi, newChi)) }, DeleteFunc: func(obj interface{}) { @@ -139,7 +139,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chi.Namespace) { return } - log.V(2).Info("chiInformer.DeleteFunc - CHI %s/%s deleted", chi.Namespace, chi.Name) + log.V(2).M(chi).Info("chiInformer.DeleteFunc") c.enqueueObject(chi.Namespace, chi.Name, NewReconcileChi(reconcileDelete, chi, nil)) }, }) @@ -150,7 +150,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chit.Namespace) { return } - log.V(2).Info("chitInformer.AddFunc - %s/%s added", chit.Namespace, chit.Name) + log.V(2).M(chit).Info("chitInformer.AddFunc") c.enqueueObject(chit.Namespace, chit.Name, NewReconcileChit(reconcileAdd, nil, chit)) }, UpdateFunc: func(old, new interface{}) { @@ -159,7 +159,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(newChit.Namespace) { return } - log.V(2).Info("chitInformer.UpdateFunc - %s/%s", newChit.Namespace, newChit.Name) + log.V(2).M(newChit).Info("chitInformer.UpdateFunc") c.enqueueObject(newChit.Namespace, newChit.Name, NewReconcileChit(reconcileUpdate, oldChit, newChit)) }, DeleteFunc: func(obj interface{}) { @@ -167,7 +167,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chit.Namespace) { return } - log.V(2).Info("chitInformer.DeleteFunc - %s/%s deleted", chit.Namespace, chit.Name) + log.V(2).M(chit).Info("chitInformer.DeleteFunc") c.enqueueObject(chit.Namespace, chit.Name, NewReconcileChit(reconcileDelete, chit, nil)) }, }) @@ -178,7 +178,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chopConfig.Namespace) { return } - log.V(2).Info("chopInformer.AddFunc - %s/%s added", chopConfig.Namespace, chopConfig.Name) + log.V(2).M(chopConfig).Info("chopInformer.AddFunc") c.enqueueObject(chopConfig.Namespace, chopConfig.Name, NewReconcileChopConfig(reconcileAdd, nil, chopConfig)) }, UpdateFunc: func(old, new interface{}) { @@ -187,7 +187,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(newChopConfig.Namespace) { return } - log.V(2).Info("chopInformer.UpdateFunc - %s/%s", newChopConfig.Namespace, newChopConfig.Name) + log.V(2).M(newChopConfig).Info("chopInformer.UpdateFunc") c.enqueueObject(newChopConfig.Namespace, newChopConfig.Name, NewReconcileChopConfig(reconcileUpdate, oldChopConfig, newChopConfig)) }, DeleteFunc: func(obj interface{}) { @@ -195,7 +195,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chopConfig.Namespace) { return } - log.V(2).Info("chopInformer.DeleteFunc - %s/%s deleted", chopConfig.Namespace, chopConfig.Name) + log.V(2).M(chopConfig).Info("chopInformer.DeleteFunc") c.enqueueObject(chopConfig.Namespace, chopConfig.Name, NewReconcileChopConfig(reconcileDelete, chopConfig, nil)) }, }) @@ -206,20 +206,21 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&service.ObjectMeta) { return } - log.V(2).Info("serviceInformer AddFunc %s/%s", service.Namespace, service.Name) + log.V(2).M(service).Info("serviceInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { oldService := old.(*core.Service) if !c.isTrackedObject(&oldService.ObjectMeta) { return } + log.V(2).M(oldService).Info("serviceInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { service := obj.(*core.Service) if !c.isTrackedObject(&service.ObjectMeta) { return } - log.V(2).Info("serviceInformer DeleteFunc %s/%s", service.Namespace, service.Name) + log.V(2).M(service).Info("serviceInformer.DeleteFunc") }, }) @@ -229,7 +230,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&endpoints.ObjectMeta) { return } - log.V(2).Info("endpointsInformer AddFunc %s/%s", endpoints.Namespace, endpoints.Name) + log.V(2).M(endpoints).Info("endpointsInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { oldEndpoints := old.(*core.Endpoints) @@ -240,14 +241,14 @@ func (c *Controller) addEventHandlers( diff, equal := messagediff.DeepDiff(oldEndpoints, newEndpoints) if equal { - log.V(2).Info("onUpdateEndpoints(%s/%s): no changes found", oldEndpoints.Namespace, oldEndpoints.Name) + log.V(2).M(oldEndpoints).Info("endpointsInformer.UpdateFunc: no changes found") // No need to react return } added := false for path := range diff.Added { - log.V(2).Info("onUpdateEndpoints(%s/%s): added %v", oldEndpoints.Namespace, oldEndpoints.Name, path) + log.V(2).M(oldEndpoints).Info("endpointsInformer.UpdateFunc: added %v", path) for _, pathnode := range *path { s := pathnode.String() if s == ".Addresses" { @@ -256,14 +257,14 @@ func (c *Controller) addEventHandlers( } } for path := range diff.Removed { - log.V(2).Info("onUpdateEndpoints(%s/%s): removed %v", oldEndpoints.Namespace, oldEndpoints.Name, path) + log.V(2).M(oldEndpoints).Info("endpointsInformer.UpdateFunc: removed %v", path) } for path := range diff.Modified { - log.V(2).Info("onUpdateEndpoints(%s/%s): modified %v", oldEndpoints.Namespace, oldEndpoints.Name, path) + log.V(2).M(oldEndpoints).Info("endpointsInformer.UpdateFunc: modified %v", path) } if added { - log.V(1).Info("endpointsInformer UpdateFunc(%s/%s) IP ASSIGNED %v", newEndpoints.Namespace, newEndpoints.Name, newEndpoints.Subsets) + log.V(1).M(oldEndpoints).Info("endpointsInformer.UpdateFunc: IP ASSIGNED %v", newEndpoints.Subsets) c.enqueueObject(newEndpoints.Namespace, newEndpoints.Name, NewDropDns(&newEndpoints.ObjectMeta)) } }, @@ -272,7 +273,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&endpoints.ObjectMeta) { return } - log.V(2).Info("endpointsInformer DeleteFunc %s/%s", endpoints.Namespace, endpoints.Name) + log.V(2).M(endpoints).Info("endpointsInformer.DeleteFunc") }, }) @@ -282,21 +283,21 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&configMap.ObjectMeta) { return } - log.V(2).Info("configMapInformer AddFunc %s/%s", configMap.Namespace, configMap.Name) + log.V(2).M(configMap).Info("configMapInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { configMap := old.(*core.ConfigMap) if !c.isTrackedObject(&configMap.ObjectMeta) { return } - log.V(2).Info("configMapInformer UpdateFunc %s/%s", configMap.Namespace, configMap.Name) + log.V(2).M(configMap).Info("configMapInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { configMap := obj.(*core.ConfigMap) if !c.isTrackedObject(&configMap.ObjectMeta) { return } - log.V(2).Info("configMapInformer DeleteFunc %s/%s", configMap.Namespace, configMap.Name) + log.V(2).M(configMap).Info("configMapInformer.DeleteFunc") }, }) @@ -306,7 +307,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - log.V(2).Info("statefulSetInformer AddFunc %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(2).M(statefulSet).Info("statefulSetInformer.AddFunc") //controller.handleObject(obj) }, UpdateFunc: func(old, new interface{}) { @@ -314,14 +315,14 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - log.V(2).Info("statefulSetInformer UpdateFunc %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(2).M(statefulSet).Info("statefulSetInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { statefulSet := obj.(*apps.StatefulSet) if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - log.V(2).Info("statefulSetInformer DeleteFunc %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(2).M(statefulSet).Info("statefulSetInformer.DeleteFunc") //controller.handleObject(obj) }, }) @@ -332,21 +333,21 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&pod.ObjectMeta) { return } - log.V(2).Info("podInformer AddFunc %s/%s", pod.Namespace, pod.Name) + log.V(2).M(pod).Info("podInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { pod := old.(*core.Pod) if !c.isTrackedObject(&pod.ObjectMeta) { return } - log.V(2).Info("podInformer UpdateFunc %s/%s", pod.Namespace, pod.Name) + log.V(2).M(pod).Info("podInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { pod := obj.(*core.Pod) if !c.isTrackedObject(&pod.ObjectMeta) { return } - log.V(2).Info("podInformer DeleteFunc %s/%s", pod.Namespace, pod.Name) + log.V(2).M(pod).Info("podInformer.DeleteFunc") }, }) } @@ -385,15 +386,15 @@ func (c *Controller) Run(ctx context.Context) { // Start threads // workersNum := len(c.queues) - log.V(1).Info("ClickHouseInstallation controller: starting workers number: %d", workersNum) + log.V(1).A().Info("ClickHouseInstallation controller: starting workers number: %d", workersNum) for i := 0; i < workersNum; i++ { - log.V(1).Info("ClickHouseInstallation controller: starting worker %d out of %d", i+1, workersNum) + log.V(1).A().Info("ClickHouseInstallation controller: starting worker %d out of %d", i+1, workersNum) worker := c.newWorker(c.queues[i]) go wait.Until(worker.run, runWorkerPeriod, ctx.Done()) } - defer log.V(1).Info("ClickHouseInstallation controller: shutting down workers") + defer log.V(1).A().Info("ClickHouseInstallation controller: shutting down workers") - log.V(1).Info("ClickHouseInstallation controller: workers started") + log.V(1).A().Info("ClickHouseInstallation controller: workers started") <-ctx.Done() } @@ -425,7 +426,7 @@ func (c *Controller) updateWatch(namespace, name string, hostnames []string) { // updateWatchAsync func (c *Controller) updateWatchAsync(namespace, name string, hostnames []string) { if err := metrics.InformMetricsExporterAboutWatchedCHI(namespace, name, hostnames); err != nil { - log.V(1).Info("FAIL update watch (%s/%s): %q", namespace, name, err) + log.V(1).A().Info("FAIL update watch (%s/%s): %q", namespace, name, err) } else { log.V(2).Info("OK update watch (%s/%s)", namespace, name) } @@ -439,7 +440,7 @@ func (c *Controller) deleteWatch(namespace, name string) { // deleteWatchAsync func (c *Controller) deleteWatchAsync(namespace, name string) { if err := metrics.InformMetricsExporterToDeleteWatchedCHI(namespace, name); err != nil { - log.V(1).Info("FAIL delete watch (%s/%s): %q", namespace, name, err) + log.V(1).A().Info("FAIL delete watch (%s/%s): %q", namespace, name, err) } else { log.V(2).Info("OK delete watch (%s/%s)", namespace, name) } @@ -447,7 +448,7 @@ func (c *Controller) deleteWatchAsync(namespace, name string) { // addChit sync new CHIT - creates all its resources func (c *Controller) addChit(chit *chi.ClickHouseInstallationTemplate) error { - log.V(1).Info("addChit(%s/%s)", chit.Namespace, chit.Name) + log.V(1).M(chit).F().P() c.chop.Config().AddCHITemplate((*chi.ClickHouseInstallation)(chit)) return nil } @@ -455,19 +456,19 @@ func (c *Controller) addChit(chit *chi.ClickHouseInstallationTemplate) error { // updateChit sync CHIT which was already created earlier func (c *Controller) updateChit(old, new *chi.ClickHouseInstallationTemplate) error { if old.ObjectMeta.ResourceVersion == new.ObjectMeta.ResourceVersion { - log.V(2).Info("updateChit(%s/%s): ResourceVersion did not change: %s", old.Namespace, old.Name, old.ObjectMeta.ResourceVersion) + log.V(2).M(old).F().Info("ResourceVersion did not change: %s", old.ObjectMeta.ResourceVersion) // No need to react return nil } - log.V(2).Info("updateChit(%s/%s):", new.Namespace, new.Name) + log.V(2).M(new).F().Info("ResourceVersion change: %s to %s", old.ObjectMeta.ResourceVersion, new.ObjectMeta.ResourceVersion) c.chop.Config().UpdateCHITemplate((*chi.ClickHouseInstallation)(new)) return nil } // deleteChit deletes CHIT func (c *Controller) deleteChit(chit *chi.ClickHouseInstallationTemplate) error { - log.V(2).Info("deleteChit(%s/%s):", chit.Namespace, chit.Name) + log.V(2).M(chit).F().P() c.chop.Config().DeleteCHITemplate((*chi.ClickHouseInstallation)(chit)) return nil } @@ -475,9 +476,9 @@ func (c *Controller) deleteChit(chit *chi.ClickHouseInstallationTemplate) error // addChopConfig func (c *Controller) addChopConfig(chopConfig *chi.ClickHouseOperatorConfiguration) error { if c.chop.ConfigManager.IsConfigListed(chopConfig) { - log.V(1).Info("addChopConfig(%s/%s) already known config - do nothing", chopConfig.Namespace, chopConfig.Name) + log.V(1).M(chopConfig).F().Info("already known config - do nothing") } else { - log.V(1).Info("addChopConfig(%s/%s) new, previously unknown config, need to apply", chopConfig.Namespace, chopConfig.Name) + log.V(1).M(chopConfig).F().Info("new, previously unknown config, need to apply") // TODO // NEED REFACTORING // os.Exit(0) @@ -489,12 +490,12 @@ func (c *Controller) addChopConfig(chopConfig *chi.ClickHouseOperatorConfigurati // updateChopConfig func (c *Controller) updateChopConfig(old, new *chi.ClickHouseOperatorConfiguration) error { if old.ObjectMeta.ResourceVersion == new.ObjectMeta.ResourceVersion { - log.V(2).Info("updateChopConfig(%s/%s): ResourceVersion did not change: %s", old.Namespace, old.Name, old.ObjectMeta.ResourceVersion) + log.V(2).M(old).F().Info("ResourceVersion did not change: %s", old.ObjectMeta.ResourceVersion) // No need to react return nil } - log.V(2).Info("updateChopConfig(%s/%s):", new.Namespace, new.Name) + log.V(2).M(new).F().Info("ResourceVersion change: %s to %s", old.ObjectMeta.ResourceVersion, new.ObjectMeta.ResourceVersion) // TODO // NEED REFACTORING //os.Exit(0) @@ -504,7 +505,7 @@ func (c *Controller) updateChopConfig(old, new *chi.ClickHouseOperatorConfigurat // deleteChit deletes CHIT func (c *Controller) deleteChopConfig(chopConfig *chi.ClickHouseOperatorConfiguration) error { - log.V(2).Info("deleteChopConfig(%s/%s):", chopConfig.Namespace, chopConfig.Name) + log.V(2).M(chopConfig).F().P() // TODO // NEED REFACTORING //os.Exit(0) @@ -514,20 +515,16 @@ func (c *Controller) deleteChopConfig(chopConfig *chi.ClickHouseOperatorConfigur // updateCHIObject updates ClickHouseInstallation object func (c *Controller) updateCHIObject(chi *chi.ClickHouseInstallation) error { - namespace, name := util.NamespaceName(chi.ObjectMeta) - new, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Update(chi) - + new, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.ObjectMeta.Namespace).Update(chi) if err != nil { // Error update - log.V(1).Info("ERROR update CHI (%s/%s): %q", namespace, name, err) + log.V(1).M(chi).A().Error("%q", err) return err } if chi.ObjectMeta.ResourceVersion != new.ObjectMeta.ResourceVersion { // Updated - log.V(2).Info("updateCHIObject(%s/%s): ResourceVersion bump %s=>%s", - namespace, name, chi.ObjectMeta.ResourceVersion, new.ObjectMeta.ResourceVersion, - ) + log.V(2).M(chi).F().Info("ResourceVersion change: %s to %s", chi.ObjectMeta.ResourceVersion, new.ObjectMeta.ResourceVersion) chi.ObjectMeta.ResourceVersion = new.ObjectMeta.ResourceVersion return nil } @@ -540,21 +537,21 @@ func (c *Controller) updateCHIObject(chi *chi.ClickHouseInstallation) error { // updateCHIObjectStatus updates ClickHouseInstallation object's Status func (c *Controller) updateCHIObjectStatus(chi *chi.ClickHouseInstallation, tolerateAbsence bool) error { namespace, name := util.NamespaceName(chi.ObjectMeta) - log.V(2).Info("Update CHI status (%s/%s)", namespace, name) + log.V(2).M(chi).F().Info("Update CHI status") cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(name, newGetOptions()) if err != nil { if tolerateAbsence { return nil } - log.V(1).Info("ERROR GetCHI (%s/%s): %q", namespace, name, err) + log.V(1).M(chi).A().Error("%q", err) return err } if cur == nil { if tolerateAbsence { return nil } - log.V(1).Info("ERROR GetCHI (%s/%s): NULL returned", namespace, name) + log.V(1).M(chi).A().Error("NULL returned") return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", namespace, name) } @@ -566,7 +563,7 @@ func (c *Controller) updateCHIObjectStatus(chi *chi.ClickHouseInstallation, tole // installFinalizer func (c *Controller) installFinalizer(chi *chi.ClickHouseInstallation) error { namespace, name := util.NamespaceName(chi.ObjectMeta) - log.V(2).Info("Update CHI status (%s/%s)", namespace, name) + log.V(2).M(chi).Info("Update CHI status") cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(name, newGetOptions()) if err != nil { @@ -588,7 +585,7 @@ func (c *Controller) installFinalizer(chi *chi.ClickHouseInstallation) error { // uninstallFinalizer func (c *Controller) uninstallFinalizer(chi *chi.ClickHouseInstallation) error { namespace, name := util.NamespaceName(chi.ObjectMeta) - log.V(2).Info("Update CHI status (%s/%s)", namespace, name) + log.V(2).M(chi).Info("Update CHI status") cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(name, newGetOptions()) if err != nil { @@ -650,11 +647,11 @@ func (c *Controller) handleObject(obj interface{}) { // waitForCacheSync is a logger-wrapper over cache.WaitForCacheSync() and it waits for caches to populate func waitForCacheSync(name string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) bool { - log.V(1).Info("Syncing caches for %s controller", name) + log.V(1).F().Info("Syncing caches for %s controller", name) if !cache.WaitForCacheSync(stopCh, cacheSyncs...) { utilruntime.HandleError(fmt.Errorf(messageUnableToSync, name)) return false } - log.V(1).Info("Caches are synced for %s controller", name) + log.V(1).F().Info("Caches are synced for %s controller", name) return true } From 6025431eab532cbdbb28a9e785a746dd3de48eec Mon Sep 17 00:00:00 2001 From: alz Date: Wed, 3 Feb 2021 16:53:53 +0300 Subject: [PATCH 34/78] Change initial delay for liveness probe --- pkg/model/creator.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/model/creator.go b/pkg/model/creator.go index b30a7c4f0..e3de5bcd0 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -853,8 +853,9 @@ func newDefaultLivenessProbe() *corev1.Probe { Port: intstr.Parse(chDefaultHTTPPortName), }, }, - InitialDelaySeconds: 10, + InitialDelaySeconds: 60, PeriodSeconds: 3, + FailureThreshold: 5, } } From 7b7154c72ee11891258468eb8c14b6721d14933d Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 3 Feb 2021 17:43:51 +0300 Subject: [PATCH 35/78] dev: add namespace stringifier --- pkg/util/k8s.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/util/k8s.go b/pkg/util/k8s.go index 2aa712f3e..97f85e517 100644 --- a/pkg/util/k8s.go +++ b/pkg/util/k8s.go @@ -20,6 +20,10 @@ func NamespaceName(meta v1.ObjectMeta) (string, string) { return meta.Namespace, meta.Name } +func NamespaceNameString(meta v1.ObjectMeta) string { + return meta.Namespace + "/" + meta.Name +} + // IsAnnotationToBeSkipped checks whether an annotation should be skipped func IsAnnotationToBeSkipped(annotation string) bool { switch annotation { From 5b9eb5b28e0ad8c3da8e2350c0a09b9c3060ed63 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 3 Feb 2021 17:44:05 +0300 Subject: [PATCH 36/78] dev: creator logging --- pkg/controller/chi/creator.go | 45 ++++++++++++++--------------------- 1 file changed, 18 insertions(+), 27 deletions(-) diff --git a/pkg/controller/chi/creator.go b/pkg/controller/chi/creator.go index e03a418de..06c396e29 100644 --- a/pkg/controller/chi/creator.go +++ b/pkg/controller/chi/creator.go @@ -18,6 +18,7 @@ package chi import ( "errors" "fmt" + "github.com/altinity/clickhouse-operator/pkg/util" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -28,7 +29,7 @@ import ( // createStatefulSet is an internal function, used in reconcileStatefulSet only func (c *Controller) createStatefulSet(statefulSet *apps.StatefulSet, host *chop.ChiHost) error { - log.V(1).Info("Create StatefulSet %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(1).M(statefulSet).F().P() if statefulSet, err := c.kubeClient.AppsV1().StatefulSets(statefulSet.Namespace).Create(statefulSet); err != nil { // Error call Create() return err @@ -45,16 +46,13 @@ func (c *Controller) createStatefulSet(statefulSet *apps.StatefulSet, host *chop // updateStatefulSet is an internal function, used in reconcileStatefulSet only func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStatefulSet *apps.StatefulSet, host *chop.ChiHost) error { - // Convenience shortcuts - namespace := newStatefulSet.Namespace - name := newStatefulSet.Name - log.V(2).Info("updateStatefulSet(%s/%s)", namespace, name) + log.V(2).M(oldStatefulSet).F().P() // Apply newStatefulSet and wait for Generation to change - updatedStatefulSet, err := c.kubeClient.AppsV1().StatefulSets(namespace).Update(newStatefulSet) + updatedStatefulSet, err := c.kubeClient.AppsV1().StatefulSets(newStatefulSet.Namespace).Update(newStatefulSet) if err != nil { // Update failed - log.V(1).Info("updateStatefulSet(%s/%s) - git err: %v", namespace, name, err) + log.V(1).M(newStatefulSet).A().Error("%v", err) return err } @@ -64,11 +62,11 @@ func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStat if updatedStatefulSet.Generation == oldStatefulSet.Generation { // Generation is not updated - no changes in .spec section were made - log.V(2).Info("updateStatefulSet(%s/%s) - no generation change", namespace, name) + log.V(2).M(oldStatefulSet).F().Info("no generation change") return nil } - log.V(1).Info("updateStatefulSet(%s/%s) - generation change %d=>%d", namespace, name, oldStatefulSet.Generation, updatedStatefulSet.Generation) + log.V(1).M(oldStatefulSet).F().Info("generation change %d=>%d", oldStatefulSet.Generation, updatedStatefulSet.Generation) if err := c.waitHostReady(host); err == nil { // Target generation reached, StatefulSet updated successfully @@ -83,16 +81,13 @@ func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStat // updateStatefulSet is an internal function, used in reconcileStatefulSet only func (c *Controller) updatePersistentVolume(pv *v1.PersistentVolume) error { - // Convenience shortcuts - namespace := pv.Namespace - name := pv.Name - log.V(2).Info("updatePersistentVolume(%s/%s)", namespace, name) + log.V(2).M(pv).F().P() // Apply newStatefulSet and wait for Generation to change _, err := c.kubeClient.CoreV1().PersistentVolumes().Update(pv) if err != nil { // Update failed - log.V(1).Info("updatePersistentVolume(%s/%s) - git err: %v", namespace, name, err) + log.V(1).M(pv).A().Error("%v", err) return err } @@ -102,30 +97,26 @@ func (c *Controller) updatePersistentVolume(pv *v1.PersistentVolume) error { // onStatefulSetCreateFailed handles situation when StatefulSet create failed // It can just delete failed StatefulSet or do nothing func (c *Controller) onStatefulSetCreateFailed(failedStatefulSet *apps.StatefulSet, host *chop.ChiHost) error { - // Convenience shortcuts - namespace := failedStatefulSet.Namespace - name := failedStatefulSet.Name - // What to do with StatefulSet - look into chop configuration settings switch c.chop.Config().OnStatefulSetCreateFailureAction { case chop.OnStatefulSetCreateFailureActionAbort: // Report appropriate error, it will break reconcile loop - log.V(1).Info("onStatefulSetCreateFailed(%s/%s) - abort", namespace, name) - return errors.New(fmt.Sprintf("Create failed on %s/%s", namespace, name)) + log.V(1).M(failedStatefulSet).F().Info("abort") + return errors.New(fmt.Sprintf("Create failed on %s", util.NamespaceNameString(failedStatefulSet.ObjectMeta))) case chop.OnStatefulSetCreateFailureActionDelete: // Delete gracefully failed StatefulSet - log.V(1).Info("onStatefulSetCreateFailed(%s/%s) - going to DELETE FAILED StatefulSet", namespace, name) + log.V(1).M(failedStatefulSet).F().Info("going to DELETE FAILED StatefulSet") _ = c.deleteHost(host) return c.shouldContinueOnCreateFailed() case chop.OnStatefulSetCreateFailureActionIgnore: // Ignore error, continue reconcile loop - log.V(1).Info("onStatefulSetCreateFailed(%s/%s) - going to ignore error", namespace, name) + log.V(1).M(failedStatefulSet).F().Info("going to ignore error") return nil default: - log.V(1).Info("Unknown c.chop.Config().OnStatefulSetCreateFailureAction=%s", c.chop.Config().OnStatefulSetCreateFailureAction) + log.V(1).M(failedStatefulSet).A().Error("Unknown c.chop.Config().OnStatefulSetCreateFailureAction=%s", c.chop.Config().OnStatefulSetCreateFailureAction) return nil } @@ -143,12 +134,12 @@ func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.Statefu switch c.chop.Config().OnStatefulSetUpdateFailureAction { case chop.OnStatefulSetUpdateFailureActionAbort: // Report appropriate error, it will break reconcile loop - log.V(1).Info("onStatefulSetUpdateFailed(%s/%s) - abort", namespace, name) + log.V(1).M(rollbackStatefulSet).F().Info("abort") return errors.New(fmt.Sprintf("Update failed on %s/%s", namespace, name)) case chop.OnStatefulSetUpdateFailureActionRollback: // Need to revert current StatefulSet to oldStatefulSet - log.V(1).Info("onStatefulSetUpdateFailed(%s/%s) - going to ROLLBACK FAILED StatefulSet", namespace, name) + log.V(1).M(rollbackStatefulSet).F().Info("going to ROLLBACK FAILED StatefulSet") if statefulSet, err := c.statefulSetLister.StatefulSets(namespace).Get(name); err != nil { // Unable to get StatefulSet return err @@ -167,11 +158,11 @@ func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.Statefu case chop.OnStatefulSetUpdateFailureActionIgnore: // Ignore error, continue reconcile loop - log.V(1).Info("onStatefulSetUpdateFailed(%s/%s) - going to ignore error", namespace, name) + log.V(1).M(rollbackStatefulSet).F().Info("going to ignore error") return nil default: - log.V(1).Info("Unknown c.chop.Config().OnStatefulSetUpdateFailureAction=%s", c.chop.Config().OnStatefulSetUpdateFailureAction) + log.V(1).M(rollbackStatefulSet).A().Error("Unknown c.chop.Config().OnStatefulSetUpdateFailureAction=%s", c.chop.Config().OnStatefulSetUpdateFailureAction) return nil } From 160d9d337f04ea6c0b10fb44800e6b4d7f944307 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 3 Feb 2021 19:07:12 +0300 Subject: [PATCH 37/78] dev: add host address in tiny form --- pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go index 051ca4383..f3837f4bb 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go @@ -43,3 +43,7 @@ type ChiHostAddress struct { func (a ChiHostAddress) ShortString() string { return fmt.Sprintf("ns:%s|chi:%s|clu:%s|sha:%s|rep:%s|host:%s", a.Namespace, a.CHIName, a.ClusterName, a.ShardName, a.ReplicaName, a.HostName) } + +func (a ChiHostAddress) TinyString() string { + return fmt.Sprintf("%s/%s", a.ClusterName, a.HostName) +} From 34387ae0701bb54b00e60989a710fd3a07abc59a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 3 Feb 2021 19:07:35 +0300 Subject: [PATCH 38/78] dev: clarify start/end functions --- pkg/announcer/announcer.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go index 11b8e02f4..b3c3417ee 100644 --- a/pkg/announcer/announcer.go +++ b/pkg/announcer/announcer.go @@ -125,7 +125,8 @@ func A() Announcer { return announcer.A() } -// S adds 'start of the function' tag +// S adds 'start of the function' tag, which includes: +// file, line, function and start prefix func (a Announcer) S() Announcer { b := a b.writeLog = true @@ -134,12 +135,14 @@ func (a Announcer) S() Announcer { return b } -// S adds 'start of the function' tag +// S adds 'start of the function' tag, which includes: +// file, line, function and start prefix func S() Announcer { return announcer.S() } -// E adds 'end of the function' tag +// E adds 'end of the function' tag, which includes: +// file, line, function and start prefix func (a Announcer) E() Announcer { b := a b.writeLog = true @@ -148,7 +151,8 @@ func (a Announcer) E() Announcer { return b } -// E adds 'end of the function' tag +// E adds 'end of the function' tag, which includes: +// file, line, function and start prefix func E() Announcer { return announcer.E() } From 28fa0051bb09729cdcfb50fe290f156b3f81764e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 3 Feb 2021 19:14:38 +0300 Subject: [PATCH 39/78] dev: introduce direct namespace/nme into logger --- pkg/announcer/announcer.go | 27 +++++++++++++++++---------- pkg/controller/chi/announcer.go | 4 ++-- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go index b3c3417ee..11f651f44 100644 --- a/pkg/announcer/announcer.go +++ b/pkg/announcer/announcer.go @@ -158,25 +158,32 @@ func E() Announcer { } // M adds object meta as 'namespace/name' -func (a Announcer) M(m interface{}) Announcer { +func (a Announcer) M(m ...interface{}) Announcer { if m == nil { return a } - meta := reflect.ValueOf(m) - namespace := meta.Elem().FieldByName("Namespace") - name := meta.Elem().FieldByName("Name") - if !namespace.IsValid() || !name.IsValid() { - return a - } b := a b.writeLog = true - b.meta = namespace.String() + "/" + name.String() + switch len(m) { + case 1: + meta := reflect.ValueOf(m) + namespace := meta.Elem().FieldByName("Namespace") + name := meta.Elem().FieldByName("Name") + if !namespace.IsValid() || !name.IsValid() { + return a + } + b.meta = namespace.String() + "/" + name.String() + case 2: + namespace, _ := m[0].(string) + name, _ := m[1].(string) + b.meta = namespace + "/" + name + } return b } // M adds object meta as 'namespace/name' -func M(m interface{}) Announcer { - return announcer.M(m) +func M(m ...interface{}) Announcer { + return announcer.M(m...) } // P triggers log to print line diff --git a/pkg/controller/chi/announcer.go b/pkg/controller/chi/announcer.go index 6c14fb28f..10f964447 100644 --- a/pkg/controller/chi/announcer.go +++ b/pkg/controller/chi/announcer.go @@ -106,9 +106,9 @@ func (a Announcer) E() Announcer { } // M adds object meta as 'namespace/name' -func (a Announcer) M(m interface{}) Announcer { +func (a Announcer) M(m ...interface{}) Announcer { b := a - b.Announcer = b.Announcer.M(m) + b.Announcer = b.Announcer.M(m...) return b } From 62924625f009ddc0a97246c7b0797c48e514de7a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 3 Feb 2021 19:17:51 +0300 Subject: [PATCH 40/78] dev: deleter logging --- pkg/controller/chi/deleter.go | 68 +++++++++++++++++------------------ 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/pkg/controller/chi/deleter.go b/pkg/controller/chi/deleter.go index 37eb0ceff..04d5c5965 100644 --- a/pkg/controller/chi/deleter.go +++ b/pkg/controller/chi/deleter.go @@ -36,14 +36,14 @@ func (c *Controller) deleteHost(host *chop.ChiHost) error { // 5. Service // Need to delete all these item - log.V(1).Info("Controller delete host started %s/%s", host.Address.ClusterName, host.Name) + log.V(1).M(host.GetCHI()).S().Info(host.Address.TinyString()) _ = c.deleteStatefulSet(host) _ = c.deletePVC(host) _ = c.deleteConfigMap(host) _ = c.deleteServiceHost(host) - log.V(1).Info("Controller delete host completed %s/%s", host.Address.ClusterName, host.Name) + log.V(1).M(host.GetCHI()).E().Info(host.Address.TinyString()) return nil } @@ -64,22 +64,22 @@ func (c *Controller) deleteConfigMapsCHI(chi *chop.ClickHouseInstallation) error // Delete ConfigMap err = c.kubeClient.CoreV1().ConfigMaps(chi.Namespace).Delete(configMapCommon, newDeleteOptions()) if err == nil { - log.V(1).Info("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommon) + log.V(1).M(chi).Info("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommon) } else if apierrors.IsNotFound(err) { - log.V(1).Info("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommon) + log.V(1).M(chi).Info("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommon) err = nil } else { - log.V(1).Info("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommon, err) + log.V(1).M(chi).A().Error("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommon, err) } err = c.kubeClient.CoreV1().ConfigMaps(chi.Namespace).Delete(configMapCommonUsersName, newDeleteOptions()) if err == nil { - log.V(1).Info("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName) + log.V(1).M(chi).Info("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName) } else if apierrors.IsNotFound(err) { - log.V(1).Info("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName) + log.V(1).M(chi).Info("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName) err = nil } else { - log.V(1).Info("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommonUsersName, err) + log.V(1).M(chi).A().Error("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommonUsersName, err) } return err @@ -88,15 +88,15 @@ func (c *Controller) deleteConfigMapsCHI(chi *chop.ClickHouseInstallation) error // statefulSetDeletePod delete a pod of a StatefulSet. This requests StatefulSet to relaunch deleted pod func (c *Controller) statefulSetDeletePod(statefulSet *apps.StatefulSet) error { name := chopmodel.CreatePodName(statefulSet) - log.V(1).Info("Delete Pod %s/%s", statefulSet.Namespace, name) + log.V(1).M(statefulSet).Info("Delete Pod %s/%s", statefulSet.Namespace, name) err := c.kubeClient.CoreV1().Pods(statefulSet.Namespace).Delete(name, newDeleteOptions()) if err == nil { - log.V(1).Info("OK delete Pod %s/%s", statefulSet.Namespace, name) + log.V(1).M(statefulSet).Info("OK delete Pod %s/%s", statefulSet.Namespace, name) } else if apierrors.IsNotFound(err) { - log.V(1).Info("NEUTRAL not found Pod %s/%s", statefulSet.Namespace, name) + log.V(1).M(statefulSet).Info("NEUTRAL not found Pod %s/%s", statefulSet.Namespace, name) err = nil } else { - log.V(1).Info("FAIL delete ConfigMap %s/%s err:%v", statefulSet.Namespace, name, err) + log.V(1).M(statefulSet).A().Error("FAIL delete ConfigMap %s/%s err:%v", statefulSet.Namespace, name, err) } return err @@ -113,14 +113,14 @@ func (c *Controller) deleteStatefulSet(host *chop.ChiHost) error { name := chopmodel.CreateStatefulSetName(host) namespace := host.Address.Namespace - log.V(1).Info("deleteStatefulSet(%s/%s)", namespace, name) + log.V(1).M(host.GetCHI()).F().Info("%s/%s", namespace, name) statefulSet, err := c.getStatefulSet(host) if err != nil { if apierrors.IsNotFound(err) { - log.V(1).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) + log.V(1).M(host.GetCHI()).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) } else { - log.V(1).Info("error get StatefulSet %s/%s err:%v", namespace, name, err) + log.V(1).M(host.GetCHI()).A().Error("FAIL get StatefulSet %s/%s err:%v", namespace, name, err) } return nil } @@ -135,13 +135,13 @@ func (c *Controller) deleteStatefulSet(host *chop.ChiHost) error { // And now delete empty StatefulSet if err := c.kubeClient.AppsV1().StatefulSets(namespace).Delete(name, newDeleteOptions()); err == nil { - log.V(1).Info("OK delete StatefulSet %s/%s", namespace, name) + log.V(1).M(host.GetCHI()).Info("OK delete StatefulSet %s/%s", namespace, name) c.syncStatefulSet(host) } else if apierrors.IsNotFound(err) { - log.V(1).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) + log.V(1).M(host.GetCHI()).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) err = nil } else { - log.V(1).Info("FAIL delete StatefulSet %s/%s err: %v", namespace, name, err) + log.V(1).M(host.GetCHI()).A().Error("FAIL delete StatefulSet %s/%s err: %v", namespace, name, err) return nil } @@ -165,26 +165,26 @@ func (c *Controller) syncStatefulSet(host *chop.ChiHost) { // deletePVC deletes PersistentVolumeClaim func (c *Controller) deletePVC(host *chop.ChiHost) error { - log.V(2).Info("deletePVC() - start") - defer log.V(2).Info("deletePVC() - end") + log.V(2).M(host.GetCHI()).S().P() + defer log.V(2).M(host.GetCHI()).E().P() namespace := host.Address.Namespace c.walkActualPVCs(host, func(pvc *v1.PersistentVolumeClaim) { if !chopmodel.HostCanDeletePVC(host, pvc.Name) { - log.V(1).Info("PVC %s/%s should not be deleted, leave it intact", namespace, pvc.Name) + log.V(1).M(host.GetCHI()).Info("PVC %s/%s should not be deleted, leave it intact", namespace, pvc.Name) // Move to the next PVC return } // Actually delete PVC if err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Delete(pvc.Name, newDeleteOptions()); err == nil { - log.V(1).Info("OK delete PVC %s/%s", namespace, pvc.Name) + log.V(1).M(host.GetCHI()).Info("OK delete PVC %s/%s", namespace, pvc.Name) } else if apierrors.IsNotFound(err) { - log.V(1).Info("NEUTRAL not found PVC %s/%s", namespace, pvc.Name) + log.V(1).M(host.GetCHI()).Info("NEUTRAL not found PVC %s/%s", namespace, pvc.Name) err = nil } else { - log.Error("FAIL to delete PVC %s/%s err:%v", namespace, pvc.Name, err) + log.M(host.GetCHI()).A().Error("FAIL to delete PVC %s/%s err:%v", namespace, pvc.Name, err) } }) @@ -196,15 +196,15 @@ func (c *Controller) deleteConfigMap(host *chop.ChiHost) error { name := chopmodel.CreateConfigMapPodName(host) namespace := host.Address.Namespace - log.V(1).Info("deleteConfigMap(%s/%s)", namespace, name) + log.V(1).M(host.GetCHI()).F().Info("%s/%s", namespace, name) if err := c.kubeClient.CoreV1().ConfigMaps(namespace).Delete(name, newDeleteOptions()); err == nil { - log.V(1).Info("OK delete ConfigMap %s/%s", namespace, name) + log.V(1).M(host.GetCHI()).Info("OK delete ConfigMap %s/%s", namespace, name) } else if apierrors.IsNotFound(err) { - log.V(1).Info("NEUTRAL not found ConfigMap %s/%s", namespace, name) + log.V(1).M(host.GetCHI()).Info("NEUTRAL not found ConfigMap %s/%s", namespace, name) err = nil } else { - log.V(1).Info("FAIL delete ConfigMap %s/%s err:%v", namespace, name, err) + log.V(1).M(host.GetCHI()).A().Error("FAIL delete ConfigMap %s/%s err:%v", namespace, name, err) } return nil @@ -214,7 +214,7 @@ func (c *Controller) deleteConfigMap(host *chop.ChiHost) error { func (c *Controller) deleteServiceHost(host *chop.ChiHost) error { serviceName := chopmodel.CreateStatefulSetServiceName(host) namespace := host.Address.Namespace - log.V(1).Info("deleteServiceReplica(%s/%s)", namespace, serviceName) + log.V(1).M(host.GetCHI()).F().Info("%s/%s", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -222,7 +222,7 @@ func (c *Controller) deleteServiceHost(host *chop.ChiHost) error { func (c *Controller) deleteServiceShard(shard *chop.ChiShard) error { serviceName := chopmodel.CreateShardServiceName(shard) namespace := shard.Address.Namespace - log.V(1).Info("deleteServiceShard(%s/%s)", namespace, serviceName) + log.V(1).M(shard.GetCHI()).F().Info("%s/%s", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -230,7 +230,7 @@ func (c *Controller) deleteServiceShard(shard *chop.ChiShard) error { func (c *Controller) deleteServiceCluster(cluster *chop.ChiCluster) error { serviceName := chopmodel.CreateClusterServiceName(cluster) namespace := cluster.Address.Namespace - log.V(1).Info("deleteServiceCluster(%s/%s)", namespace, serviceName) + log.V(1).M(cluster.GetCHI()).F().Info("%s/%s", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -238,7 +238,7 @@ func (c *Controller) deleteServiceCluster(cluster *chop.ChiCluster) error { func (c *Controller) deleteServiceCHI(chi *chop.ClickHouseInstallation) error { serviceName := chopmodel.CreateCHIServiceName(chi) namespace := chi.Namespace - log.V(1).Info("deleteServiceCHI(%s/%s)", namespace, serviceName) + log.V(1).M(chi).F().Info("%s/%s", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -257,9 +257,9 @@ func (c *Controller) deleteServiceIfExists(namespace, name string) error { // Delete service err = c.kubeClient.CoreV1().Services(namespace).Delete(name, newDeleteOptions()) if err == nil { - log.V(1).Info("OK delete Service %s/%s", namespace, name) + log.V(1).M(namespace, name).Info("OK delete Service %s/%s", namespace, name) } else { - log.V(1).Info("FAIL delete Service %s/%s err:%v", namespace, name, err) + log.V(1).M(namespace, name).A().Error("FAIL delete Service %s/%s err:%v", namespace, name, err) } return err From 46f3928c8a83dfc21cb01934d8f5fdf3962e18d9 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 3 Feb 2021 19:19:01 +0300 Subject: [PATCH 41/78] dev: event logger --- pkg/controller/chi/event.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/chi/event.go b/pkg/controller/chi/event.go index d189049eb..92233a4a8 100644 --- a/pkg/controller/chi/event.go +++ b/pkg/controller/chi/event.go @@ -138,6 +138,6 @@ func (c *Controller) emitEvent( _, err := c.kubeClient.CoreV1().Events(namespace).Create(event) if err != nil { - log.V(1).Error("Create Event failed: %v", err) + log.M(chi).A().Error("Create Event failed: %v", err) } } From f9cbd0ca00240d16102c0d6d445dfd4399747d9d Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 11:04:40 +0300 Subject: [PATCH 42/78] dev: labeler --- pkg/controller/chi/labeler.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/controller/chi/labeler.go b/pkg/controller/chi/labeler.go index 6fc23021a..398537dda 100644 --- a/pkg/controller/chi/labeler.go +++ b/pkg/controller/chi/labeler.go @@ -55,21 +55,21 @@ func (c *Controller) labelMyObjectsTree() { namespace, ok2 := c.chop.ConfigManager.GetRuntimeParam(chiv1.OPERATOR_POD_NAMESPACE) if !ok1 || !ok2 { - log.V(1).Info("ERROR fetch Pod name out of %s/%s", namespace, podName) + log.V(1).M(namespace, podName).A().Error("ERROR fetch Pod name out of %s/%s", namespace, podName) return } // Pod namespaced name found, fetch the Pod pod, err := c.podLister.Pods(namespace).Get(podName) if err != nil { - log.V(1).Info("ERROR get Pod %s/%s", namespace, podName) + log.V(1).M(namespace, podName).A().Error("ERROR get Pod %s/%s", namespace, podName) return } // Put label on the Pod c.addLabels(&pod.ObjectMeta) if _, err := c.kubeClient.CoreV1().Pods(namespace).Update(pod); err != nil { - log.V(1).Info("ERROR put label on Pod %s/%s %v", namespace, podName, err) + log.V(1).M(namespace, podName).A().Error("ERROR put label on Pod %s/%s %v", namespace, podName, err) } // Find parent ReplicaSet @@ -85,21 +85,21 @@ func (c *Controller) labelMyObjectsTree() { if replicaSetName == "" { // ReplicaSet not found - log.V(1).Info("ERROR ReplicaSet for Pod %s/%s not found", namespace, podName) + log.V(1).M(namespace, podName).A().Error("ERROR ReplicaSet for Pod %s/%s not found", namespace, podName) return } // ReplicaSet namespaced name found, fetch the ReplicaSet replicaSet, err := c.kubeClient.AppsV1().ReplicaSets(namespace).Get(replicaSetName, v1.GetOptions{}) if err != nil { - log.V(1).Info("ERROR get ReplicaSet %s/%s %v", namespace, replicaSetName, err) + log.V(1).M(namespace, replicaSetName).A().Error("ERROR get ReplicaSet %s/%s %v", namespace, replicaSetName, err) return } // Put label on the ReplicaSet c.addLabels(&replicaSet.ObjectMeta) if _, err := c.kubeClient.AppsV1().ReplicaSets(namespace).Update(replicaSet); err != nil { - log.V(1).Info("ERROR put label on ReplicaSet %s/%s %v", namespace, replicaSetName, err) + log.V(1).M(namespace, replicaSetName).A().Error("ERROR put label on ReplicaSet %s/%s %v", namespace, replicaSetName, err) } // Find parent Deployment @@ -115,21 +115,21 @@ func (c *Controller) labelMyObjectsTree() { if deploymentName == "" { // Deployment not found - log.V(1).Info("ERROR Deployment for %s Pod %s ReplicaSet %s not found", namespace, podName, replicaSetName) + log.V(1).M(namespace, replicaSetName).A().Error("ERROR Deployment for %s Pod %s ReplicaSet %s not found", namespace, podName, replicaSetName) return } // Deployment namespaced name found, fetch the Deployment deployment, err := c.kubeClient.AppsV1().Deployments(namespace).Get(deploymentName, v1.GetOptions{}) if err != nil { - log.V(1).Info("ERROR get Deployment %s/%s", namespace, deploymentName) + log.V(1).M(namespace, deploymentName).A().Error("ERROR get Deployment %s/%s", namespace, deploymentName) return } // Put label on the Deployment c.addLabels(&deployment.ObjectMeta) if _, err := c.kubeClient.AppsV1().Deployments(namespace).Update(deployment); err != nil { - log.V(1).Info("ERROR put label on Deployment %s/%s %v", namespace, deploymentName, err) + log.V(1).M(namespace, deploymentName).A().Error("ERROR put label on Deployment %s/%s %v", namespace, deploymentName, err) } } From 65eb32d05ffd45f2953596e7b17b0ee3fd0afa8a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 11:06:07 +0300 Subject: [PATCH 43/78] dev: host addresses --- .../clickhouse.altinity.com/v1/type_chi_host_address.go | 8 ++++++-- pkg/controller/chi/deleter.go | 4 ++-- pkg/controller/chi/worker.go | 8 ++++---- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go index f3837f4bb..5e7872815 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go @@ -40,10 +40,14 @@ type ChiHostAddress struct { ClusterScopeCycleOffset int `json:"clusterScopeCycleOffset"` } -func (a ChiHostAddress) ShortString() string { +func (a ChiHostAddress) CompactString() string { return fmt.Sprintf("ns:%s|chi:%s|clu:%s|sha:%s|rep:%s|host:%s", a.Namespace, a.CHIName, a.ClusterName, a.ShardName, a.ReplicaName, a.HostName) } -func (a ChiHostAddress) TinyString() string { +func (a ChiHostAddress) ClusterNameString() string { return fmt.Sprintf("%s/%s", a.ClusterName, a.HostName) } + +func (a ChiHostAddress) NamespaceNameString() string { + return fmt.Sprintf("%s/%s", a.Namespace, a.HostName) +} diff --git a/pkg/controller/chi/deleter.go b/pkg/controller/chi/deleter.go index 04d5c5965..a166422db 100644 --- a/pkg/controller/chi/deleter.go +++ b/pkg/controller/chi/deleter.go @@ -36,14 +36,14 @@ func (c *Controller) deleteHost(host *chop.ChiHost) error { // 5. Service // Need to delete all these item - log.V(1).M(host.GetCHI()).S().Info(host.Address.TinyString()) + log.V(1).M(host.GetCHI()).S().Info(host.Address.ClusterNameString()) _ = c.deleteStatefulSet(host) _ = c.deletePVC(host) _ = c.deleteConfigMap(host) _ = c.deleteServiceHost(host) - log.V(1).M(host.GetCHI()).E().Info(host.Address.TinyString()) + log.V(1).M(host.GetCHI()).E().Info(host.Address.ClusterNameString()) return nil } diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index e46e98725..58071be69 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -305,13 +305,13 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { new.WalkHosts(func(host *chop.ChiHost) error { if host.ReconcileAttributes.IsAdd() { - w.a.Info("ADD host: %s", host.Address.ShortString()) + w.a.Info("ADD host: %s", host.Address.CompactString()) } else if host.ReconcileAttributes.IsModify() { - w.a.Info("MODIFY host: %s", host.Address.ShortString()) + w.a.Info("MODIFY host: %s", host.Address.CompactString()) } else if host.ReconcileAttributes.IsUnclear() { - w.a.Info("UNCLEAR host: %s", host.Address.ShortString()) + w.a.Info("UNCLEAR host: %s", host.Address.CompactString()) } else { - w.a.Info("UNTOUCHED host: %s", host.Address.ShortString()) + w.a.Info("UNTOUCHED host: %s", host.Address.CompactString()) } return nil }) From 479a269a37e67c584ee5fc7f36aa94a5e75ebcc6 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 11:14:13 +0300 Subject: [PATCH 44/78] dev: chi name from host --- pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go index 5e7872815..907152501 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi_host_address.go @@ -51,3 +51,7 @@ func (a ChiHostAddress) ClusterNameString() string { func (a ChiHostAddress) NamespaceNameString() string { return fmt.Sprintf("%s/%s", a.Namespace, a.HostName) } + +func (a ChiHostAddress) NamespaceCHINameString() string { + return fmt.Sprintf("%s/%s", a.Namespace, a.CHIName) +} From 25dbf10fe92fe326448c1961c4c13ac3614931f2 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 11:14:34 +0300 Subject: [PATCH 45/78] dev: accept one string for logger M --- pkg/announcer/announcer.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go index 11f651f44..48bd570a2 100644 --- a/pkg/announcer/announcer.go +++ b/pkg/announcer/announcer.go @@ -159,20 +159,26 @@ func E() Announcer { // M adds object meta as 'namespace/name' func (a Announcer) M(m ...interface{}) Announcer { - if m == nil { + if len(m) == 0 { return a } + b := a b.writeLog = true switch len(m) { case 1: - meta := reflect.ValueOf(m) - namespace := meta.Elem().FieldByName("Namespace") - name := meta.Elem().FieldByName("Name") - if !namespace.IsValid() || !name.IsValid() { - return a + switch typed := m[0].(type) { + case string: + b.meta = typed + default: + meta := reflect.ValueOf(m[0]) + namespace := meta.Elem().FieldByName("Namespace") + name := meta.Elem().FieldByName("Name") + if !namespace.IsValid() || !name.IsValid() { + return a + } + b.meta = namespace.String() + "/" + name.String() } - b.meta = namespace.String() + "/" + name.String() case 2: namespace, _ := m[0].(string) name, _ := m[1].(string) From 8baa68566eb8b0ddda1c9233193318155f769eaa Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 11:22:19 +0300 Subject: [PATCH 46/78] dev: pod --- pkg/controller/chi/pods.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/controller/chi/pods.go b/pkg/controller/chi/pods.go index c2808c9e1..3197dfd33 100644 --- a/pkg/controller/chi/pods.go +++ b/pkg/controller/chi/pods.go @@ -25,7 +25,7 @@ import ( func (c *Controller) appendLabelReady(host *chop.ChiHost) error { pod, err := c.getPod(host) if err != nil { - log.Error("FAIL get pod for host %s/%s err:%v", host.Address.Namespace, host.Name, err) + log.M(host.Address.NamespaceCHINameString()).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) return err } @@ -37,7 +37,7 @@ func (c *Controller) appendLabelReady(host *chop.ChiHost) error { func (c *Controller) deleteLabelReady(host *chop.ChiHost) error { pod, err := c.getPod(host) if err != nil { - log.Error("FAIL get pod for host %s/%s err:%v", host.Address.Namespace, host.Name, err) + log.M(host.Address.NamespaceCHINameString()).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) return err } @@ -49,7 +49,7 @@ func (c *Controller) deleteLabelReady(host *chop.ChiHost) error { func (c *Controller) walkContainers(host *chop.ChiHost, f func(container *v1.Container)) { pod, err := c.getPod(host) if err != nil { - log.Error("FAIL get pod for host %s/%s err:%v", host.Address.Namespace, host.Name, err) + log.M(host.Address.NamespaceCHINameString()).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) return } @@ -62,7 +62,7 @@ func (c *Controller) walkContainers(host *chop.ChiHost, f func(container *v1.Con func (c *Controller) walkContainerStatuses(host *chop.ChiHost, f func(status *v1.ContainerStatus)) { pod, err := c.getPod(host) if err != nil { - log.Error("FAIL get pod for host %s/%s err:%v", host.Address.Namespace, host.Name, err) + log.M(host.Address.NamespaceCHINameString()).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) return } From 06b4d3f73c985c11ef187ffcdf761136f98a3883 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 11:36:43 +0300 Subject: [PATCH 47/78] dev: poller --- pkg/controller/chi/poller.go | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/pkg/controller/chi/poller.go b/pkg/controller/chi/poller.go index 92c12ed68..4c043d6c1 100644 --- a/pkg/controller/chi/poller.go +++ b/pkg/controller/chi/poller.go @@ -69,24 +69,24 @@ func (c *Controller) waitHostRunning(host *chop.ChiHost) error { for { if c.isHostRunning(host) { // All is good, job done, exit - log.V(1).Info("waitHostRunning(%s/%s)-OK", namespace, name) + log.V(1).M(host.Address.NamespaceCHINameString()).F().Info("%s/%s-OK", namespace, name) return nil } // Object is found, function not positive if time.Since(start) >= (time.Duration(waitStatefulSetGenerationTimeoutBeforeStartBothering) * time.Second) { // Start bothering with log messages after some time only - log.V(1).Info("waitHostRunning(%s/%s)-WAIT", namespace, name) + log.V(1).M(host.Address.NamespaceCHINameString()).F().Info("%s/%s-WAIT", namespace, name) } if time.Since(start) >= (time.Duration(c.chop.Config().StatefulSetUpdateTimeout) * time.Second) { // Timeout reached, no good result available, time to quit - log.V(1).Info("ERROR waitHostRunning(%s/%s) - TIMEOUT reached", namespace, name) + log.V(1).M(host.Address.NamespaceCHINameString()).F().Error("%s/%s-TIMEOUT reached", namespace, name) return errors.New(fmt.Sprintf("waitHostRunning(%s/%s) - wait timeout", namespace, name)) } // Wait some more time - log.V(2).Info("waithostRunning(%s/%s)", namespace, name) + log.V(2).M(host.Address.NamespaceCHINameString()).F().Info("%s/%s", namespace, name) select { case <-time.After(time.Duration(c.chop.Config().StatefulSetUpdatePollPeriod) * time.Second): } @@ -151,31 +151,31 @@ func (c *Controller) pollStatefulSet(entity interface{}, opts *StatefulSetPollOp // Object is found if f(statefulSet) { // All is good, job done, exit - log.V(1).Info("pollStatefulSet(%s/%s)-OK :%s", namespace, name, model.StrStatefulSetStatus(&statefulSet.Status)) + log.V(1).M(namespace, name).F().Info("OK :%s", model.StrStatefulSetStatus(&statefulSet.Status)) return nil } // Object is found, but function is not positive if time.Since(start) >= opts.StartBotheringAfterTimeout { // Start bothering with log messages after some time only - log.V(1).Info("pollStatefulSet(%s/%s)-WAIT:%s", namespace, name, model.StrStatefulSetStatus(&statefulSet.Status)) + log.V(1).M(namespace, name).F().Info("WAIT:%s", model.StrStatefulSetStatus(&statefulSet.Status)) } } else if apierrors.IsNotFound(err) { // Object is not found - it either failed to be created or just still not created if time.Since(start) >= opts.CreateTimeout { // No more wait for object to be created. Consider create as failed. if opts.CreateTimeout > 0 { - log.V(1).Info("ERROR pollStatefulSet(%s/%s) Get() FAILED - StatefulSet still not found, abort", namespace, name) + log.V(1).M(namespace, name).F().Error("Get() FAILED - StatefulSet still not found, abort") } else { - log.V(1).Info("pollStatefulSet(%s/%s) Get() NEUTRAL StatefulSet not found and no wait required", namespace, name) + log.V(1).M(namespace, name).F().Info("Get() NEUTRAL StatefulSet not found and no wait required") } return err } // Object with such name not found - may be is still being created - wait for it - log.V(1).Info("pollStatefulSet(%s/%s)-WAIT: object not found. Not created yet?", namespace, name) + log.V(1).M(namespace, name).F().Info("WAIT: object not found. Not created yet?") } else { // Some kind of total error - log.Error("ERROR pollStatefulSet(%s/%s) Get() FAILED", namespace, name) + log.M(namespace, name).A().Error("%s/%s Get() FAILED", namespace, name) return err } @@ -183,7 +183,7 @@ func (c *Controller) pollStatefulSet(entity interface{}, opts *StatefulSetPollOp if time.Since(start) >= opts.Timeout { // Timeout reached, no good result available, time to quit - log.V(1).Info("ERROR pollStatefulSet(%s/%s) - TIMEOUT reached", namespace, name) + log.V(1).M(namespace, name).F().Info("%s/%s - TIMEOUT reached") return errors.New(fmt.Sprintf("waitStatefulSet(%s/%s) - wait timeout", namespace, name)) } @@ -204,30 +204,31 @@ func (c *Controller) pollHost(host *chop.ChiHost, opts *StatefulSetPollOptions, } namespace := host.Address.Namespace name := host.Address.HostName + m := host.Address.NamespaceCHINameString() // Wait timeout is specified in c.chopConfig.StatefulSetUpdateTimeout in seconds start := time.Now() for { if f(host) { // All is good, job done, exit - log.V(1).Info("pollHost(%s/%s)-OK", namespace, name) + log.V(1).M(m).F().Info("%s/%s-OK", namespace, name) return nil } // Object is found, but function is not positive if time.Since(start) >= opts.StartBotheringAfterTimeout { // Start bothering with log messages after some time only - log.V(1).Info("pollHost(%s/%s)-WAIT", namespace, name) + log.V(1).M(m).F().Info("%s/%s-WAIT", namespace, name) } if time.Since(start) >= opts.Timeout { // Timeout reached, no good result available, time to quit - log.V(1).Info("ERROR pollHost(%s/%s) - TIMEOUT reached", namespace, name) + log.V(1).M(m).F().Error("%s/%s-TIMEOUT reached", namespace, name) return errors.New(fmt.Sprintf("pollHost(%s/%s) - wait timeout", namespace, name)) } // Wait some more time - log.V(2).Info("pollHost(%s/%s)", namespace, name) + log.V(2).M(m).F().Info("%s/%s", namespace, name) select { case <-time.After(opts.Interval): } From 0945cddb3884f89a3fac6222dc97e320c7ed083c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 15:55:36 +0300 Subject: [PATCH 48/78] dev: make announcer understand deeper --- pkg/announcer/announcer.go | 64 +++++++++++++++++++++++++++++++++++--- 1 file changed, 59 insertions(+), 5 deletions(-) diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go index 48bd570a2..cd1b778cd 100644 --- a/pkg/announcer/announcer.go +++ b/pkg/announcer/announcer.go @@ -171,13 +171,11 @@ func (a Announcer) M(m ...interface{}) Announcer { case string: b.meta = typed default: - meta := reflect.ValueOf(m[0]) - namespace := meta.Elem().FieldByName("Namespace") - name := meta.Elem().FieldByName("Name") - if !namespace.IsValid() || !name.IsValid() { + if meta, ok := a.findMeta(m[0]); ok { + b.meta = meta + } else { return a } - b.meta = namespace.String() + "/" + name.String() } case 2: namespace, _ := m[0].(string) @@ -326,3 +324,59 @@ func (a Announcer) prependFormat(format string) string { } return format } + +func (a Announcer) findMeta(m interface{}) (string, bool) { + if meta, ok := a.findInObjectMeta(m); ok { + return meta, ok + } + if meta, ok := a.findInCHI(m); ok { + return meta, ok + } + if meta, ok := a.findInAddress(m); ok { + return meta, ok + } + return "", false +} + +func (a Announcer) findInObjectMeta(m interface{}) (string, bool) { + meta := reflect.ValueOf(m) + namespace := meta.Elem().FieldByName("Namespace") + if !namespace.IsValid() { + return "", false + } + name := meta.Elem().FieldByName("Name") + if !name.IsValid() { + return "", false + } + return namespace.String() + "/" + name.String(), true +} + +func (a Announcer) findInCHI(m interface{}) (string, bool) { + object := reflect.ValueOf(m) + chi := object.Elem().FieldByName("CHI") + if !chi.IsValid() { + return "", false + } + namespace := chi.Elem().FieldByName("Namespace") + if !namespace.IsValid() { + return "", false + } + name := chi.Elem().FieldByName("Name") + if !name.IsValid() { + return "", false + } + return namespace.String() + "/" + name.String(), true +} + +func (a Announcer) findInAddress(m interface{}) (string, bool) { + address := reflect.ValueOf(m) + namespace := address.Elem().FieldByName("Namespace") + if !namespace.IsValid() { + return "", false + } + name := address.Elem().FieldByName("CHIName") + if !name.IsValid() { + return "", false + } + return namespace.String() + "/" + name.String(), true +} From 01db4739a14ae40e894157381ff00a8ef53fdbb2 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 16:51:42 +0300 Subject: [PATCH 49/78] dev: make announcer understand nested strcutures --- pkg/announcer/announcer.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go index cd1b778cd..813334e23 100644 --- a/pkg/announcer/announcer.go +++ b/pkg/announcer/announcer.go @@ -339,7 +339,13 @@ func (a Announcer) findMeta(m interface{}) (string, bool) { } func (a Announcer) findInObjectMeta(m interface{}) (string, bool) { + if m == nil { + return "", false + } meta := reflect.ValueOf(m) + if !meta.IsValid() || meta.IsNil() || meta.IsZero() { + return "", false + } namespace := meta.Elem().FieldByName("Namespace") if !namespace.IsValid() { return "", false @@ -352,9 +358,15 @@ func (a Announcer) findInObjectMeta(m interface{}) (string, bool) { } func (a Announcer) findInCHI(m interface{}) (string, bool) { + if m == nil { + return "", false + } object := reflect.ValueOf(m) + if !object.IsValid() || object.IsNil() || object.IsZero() { + return "", false + } chi := object.Elem().FieldByName("CHI") - if !chi.IsValid() { + if !chi.IsValid() || chi.IsNil() || chi.IsZero() { return "", false } namespace := chi.Elem().FieldByName("Namespace") @@ -369,7 +381,13 @@ func (a Announcer) findInCHI(m interface{}) (string, bool) { } func (a Announcer) findInAddress(m interface{}) (string, bool) { + if m == nil { + return "", false + } address := reflect.ValueOf(m) + if !address.IsValid() || address.IsNil() || address.IsZero() { + return "", false + } namespace := address.Elem().FieldByName("Namespace") if !namespace.IsValid() { return "", false From 66bfa19e513f5479f95e4513846cafa0b601a776 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 16:52:34 +0300 Subject: [PATCH 50/78] controller --- pkg/controller/chi/controller.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index 2292b406b..c773a5ae7 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -562,15 +562,15 @@ func (c *Controller) updateCHIObjectStatus(chi *chi.ClickHouseInstallation, tole // installFinalizer func (c *Controller) installFinalizer(chi *chi.ClickHouseInstallation) error { - namespace, name := util.NamespaceName(chi.ObjectMeta) - log.V(2).M(chi).Info("Update CHI status") + log.V(2).M(chi).S().P() + defer log.V(2).M(chi).E().P() - cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(name, newGetOptions()) + cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.Namespace).Get(chi.Name, newGetOptions()) if err != nil { return err } if cur == nil { - return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", namespace, name) + return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", chi.Namespace, chi.Name) } if util.InArray(FinalizerName, cur.ObjectMeta.Finalizers) { @@ -584,15 +584,15 @@ func (c *Controller) installFinalizer(chi *chi.ClickHouseInstallation) error { // uninstallFinalizer func (c *Controller) uninstallFinalizer(chi *chi.ClickHouseInstallation) error { - namespace, name := util.NamespaceName(chi.ObjectMeta) - log.V(2).M(chi).Info("Update CHI status") + log.V(2).M(chi).S().P() + defer log.V(2).M(chi).E().P() - cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(name, newGetOptions()) + cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.Namespace).Get(chi.Name, newGetOptions()) if err != nil { return err } if cur == nil { - return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", namespace, name) + return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", chi.Namespace, chi.Name) } cur.ObjectMeta.Finalizers = util.RemoveFromArray(FinalizerName, cur.ObjectMeta.Finalizers) From c8bb50d07e30b80949d68a8a1e539390c51bc59e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 16:52:48 +0300 Subject: [PATCH 51/78] creator --- pkg/controller/chi/creator.go | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/pkg/controller/chi/creator.go b/pkg/controller/chi/creator.go index 06c396e29..97bea8439 100644 --- a/pkg/controller/chi/creator.go +++ b/pkg/controller/chi/creator.go @@ -29,7 +29,7 @@ import ( // createStatefulSet is an internal function, used in reconcileStatefulSet only func (c *Controller) createStatefulSet(statefulSet *apps.StatefulSet, host *chop.ChiHost) error { - log.V(1).M(statefulSet).F().P() + log.V(1).M(host).F().P() if statefulSet, err := c.kubeClient.AppsV1().StatefulSets(statefulSet.Namespace).Create(statefulSet); err != nil { // Error call Create() return err @@ -46,13 +46,13 @@ func (c *Controller) createStatefulSet(statefulSet *apps.StatefulSet, host *chop // updateStatefulSet is an internal function, used in reconcileStatefulSet only func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStatefulSet *apps.StatefulSet, host *chop.ChiHost) error { - log.V(2).M(oldStatefulSet).F().P() + log.V(2).M(host).F().P() // Apply newStatefulSet and wait for Generation to change updatedStatefulSet, err := c.kubeClient.AppsV1().StatefulSets(newStatefulSet.Namespace).Update(newStatefulSet) if err != nil { // Update failed - log.V(1).M(newStatefulSet).A().Error("%v", err) + log.V(1).M(host).A().Error("%v", err) return err } @@ -62,18 +62,18 @@ func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStat if updatedStatefulSet.Generation == oldStatefulSet.Generation { // Generation is not updated - no changes in .spec section were made - log.V(2).M(oldStatefulSet).F().Info("no generation change") + log.V(2).M(host).F().Info("no generation change") return nil } - log.V(1).M(oldStatefulSet).F().Info("generation change %d=>%d", oldStatefulSet.Generation, updatedStatefulSet.Generation) + log.V(1).M(host).F().Info("generation change %d=>%d", oldStatefulSet.Generation, updatedStatefulSet.Generation) if err := c.waitHostReady(host); err == nil { // Target generation reached, StatefulSet updated successfully return nil } else { // Unable to run StatefulSet, StatefulSet update failed, time to rollback? - return c.onStatefulSetUpdateFailed(oldStatefulSet) + return c.onStatefulSetUpdateFailed(oldStatefulSet, host) } return fmt.Errorf("unexpected flow") @@ -101,22 +101,22 @@ func (c *Controller) onStatefulSetCreateFailed(failedStatefulSet *apps.StatefulS switch c.chop.Config().OnStatefulSetCreateFailureAction { case chop.OnStatefulSetCreateFailureActionAbort: // Report appropriate error, it will break reconcile loop - log.V(1).M(failedStatefulSet).F().Info("abort") + log.V(1).M(host).F().Info("abort") return errors.New(fmt.Sprintf("Create failed on %s", util.NamespaceNameString(failedStatefulSet.ObjectMeta))) case chop.OnStatefulSetCreateFailureActionDelete: // Delete gracefully failed StatefulSet - log.V(1).M(failedStatefulSet).F().Info("going to DELETE FAILED StatefulSet") + log.V(1).M(host).F().Info("going to DELETE FAILED StatefulSet %s", util.NamespaceNameString(failedStatefulSet.ObjectMeta)) _ = c.deleteHost(host) return c.shouldContinueOnCreateFailed() case chop.OnStatefulSetCreateFailureActionIgnore: // Ignore error, continue reconcile loop - log.V(1).M(failedStatefulSet).F().Info("going to ignore error") + log.V(1).M(host).F().Info("going to ignore error %s", util.NamespaceNameString(failedStatefulSet.ObjectMeta)) return nil default: - log.V(1).M(failedStatefulSet).A().Error("Unknown c.chop.Config().OnStatefulSetCreateFailureAction=%s", c.chop.Config().OnStatefulSetCreateFailureAction) + log.V(1).M(host).A().Error("Unknown c.chop.Config().OnStatefulSetCreateFailureAction=%s", c.chop.Config().OnStatefulSetCreateFailureAction) return nil } @@ -125,7 +125,7 @@ func (c *Controller) onStatefulSetCreateFailed(failedStatefulSet *apps.StatefulS // onStatefulSetUpdateFailed handles situation when StatefulSet update failed // It can try to revert StatefulSet to its previous version, specified in rollbackStatefulSet -func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.StatefulSet) error { +func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.StatefulSet, host *chop.ChiHost) error { // Convenience shortcuts namespace := rollbackStatefulSet.Namespace name := rollbackStatefulSet.Name @@ -134,12 +134,12 @@ func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.Statefu switch c.chop.Config().OnStatefulSetUpdateFailureAction { case chop.OnStatefulSetUpdateFailureActionAbort: // Report appropriate error, it will break reconcile loop - log.V(1).M(rollbackStatefulSet).F().Info("abort") + log.V(1).M(host).F().Info("abort StatefulSet %s", util.NamespaceNameString(rollbackStatefulSet.ObjectMeta)) return errors.New(fmt.Sprintf("Update failed on %s/%s", namespace, name)) case chop.OnStatefulSetUpdateFailureActionRollback: // Need to revert current StatefulSet to oldStatefulSet - log.V(1).M(rollbackStatefulSet).F().Info("going to ROLLBACK FAILED StatefulSet") + log.V(1).M(host).F().Info("going to ROLLBACK FAILED StatefulSet %s", util.NamespaceNameString(rollbackStatefulSet.ObjectMeta)) if statefulSet, err := c.statefulSetLister.StatefulSets(namespace).Get(name); err != nil { // Unable to get StatefulSet return err @@ -151,18 +151,18 @@ func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.Statefu // This will rollback Pod to previous .spec statefulSet.Spec = *rollbackStatefulSet.Spec.DeepCopy() statefulSet, err = c.kubeClient.AppsV1().StatefulSets(namespace).Update(statefulSet) - _ = c.statefulSetDeletePod(statefulSet) + _ = c.statefulSetDeletePod(statefulSet, host) return c.shouldContinueOnUpdateFailed() } case chop.OnStatefulSetUpdateFailureActionIgnore: // Ignore error, continue reconcile loop - log.V(1).M(rollbackStatefulSet).F().Info("going to ignore error") + log.V(1).M(host).F().Info("going to ignore error %s", util.NamespaceNameString(rollbackStatefulSet.ObjectMeta)) return nil default: - log.V(1).M(rollbackStatefulSet).A().Error("Unknown c.chop.Config().OnStatefulSetUpdateFailureAction=%s", c.chop.Config().OnStatefulSetUpdateFailureAction) + log.V(1).M(host).A().Error("Unknown c.chop.Config().OnStatefulSetUpdateFailureAction=%s", c.chop.Config().OnStatefulSetUpdateFailureAction) return nil } From 040e8afc4fc474647efce96c05446050c9572c49 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 16:52:56 +0300 Subject: [PATCH 52/78] deleter --- pkg/controller/chi/deleter.go | 52 +++++++++++++++++------------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/pkg/controller/chi/deleter.go b/pkg/controller/chi/deleter.go index a166422db..bdf5b20ee 100644 --- a/pkg/controller/chi/deleter.go +++ b/pkg/controller/chi/deleter.go @@ -36,14 +36,14 @@ func (c *Controller) deleteHost(host *chop.ChiHost) error { // 5. Service // Need to delete all these item - log.V(1).M(host.GetCHI()).S().Info(host.Address.ClusterNameString()) + log.V(1).M(host).S().Info(host.Address.ClusterNameString()) _ = c.deleteStatefulSet(host) _ = c.deletePVC(host) _ = c.deleteConfigMap(host) _ = c.deleteServiceHost(host) - log.V(1).M(host.GetCHI()).E().Info(host.Address.ClusterNameString()) + log.V(1).M(host).E().Info(host.Address.ClusterNameString()) return nil } @@ -86,17 +86,17 @@ func (c *Controller) deleteConfigMapsCHI(chi *chop.ClickHouseInstallation) error } // statefulSetDeletePod delete a pod of a StatefulSet. This requests StatefulSet to relaunch deleted pod -func (c *Controller) statefulSetDeletePod(statefulSet *apps.StatefulSet) error { +func (c *Controller) statefulSetDeletePod(statefulSet *apps.StatefulSet, host *chop.ChiHost) error { name := chopmodel.CreatePodName(statefulSet) - log.V(1).M(statefulSet).Info("Delete Pod %s/%s", statefulSet.Namespace, name) + log.V(1).M(host).Info("Delete Pod %s/%s", statefulSet.Namespace, name) err := c.kubeClient.CoreV1().Pods(statefulSet.Namespace).Delete(name, newDeleteOptions()) if err == nil { - log.V(1).M(statefulSet).Info("OK delete Pod %s/%s", statefulSet.Namespace, name) + log.V(1).M(host).Info("OK delete Pod %s/%s", statefulSet.Namespace, name) } else if apierrors.IsNotFound(err) { - log.V(1).M(statefulSet).Info("NEUTRAL not found Pod %s/%s", statefulSet.Namespace, name) + log.V(1).M(host).Info("NEUTRAL not found Pod %s/%s", statefulSet.Namespace, name) err = nil } else { - log.V(1).M(statefulSet).A().Error("FAIL delete ConfigMap %s/%s err:%v", statefulSet.Namespace, name, err) + log.V(1).M(host).A().Error("FAIL delete ConfigMap %s/%s err:%v", statefulSet.Namespace, name, err) } return err @@ -113,14 +113,14 @@ func (c *Controller) deleteStatefulSet(host *chop.ChiHost) error { name := chopmodel.CreateStatefulSetName(host) namespace := host.Address.Namespace - log.V(1).M(host.GetCHI()).F().Info("%s/%s", namespace, name) + log.V(1).M(host).F().Info("%s/%s", namespace, name) statefulSet, err := c.getStatefulSet(host) if err != nil { if apierrors.IsNotFound(err) { - log.V(1).M(host.GetCHI()).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) + log.V(1).M(host).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) } else { - log.V(1).M(host.GetCHI()).A().Error("FAIL get StatefulSet %s/%s err:%v", namespace, name, err) + log.V(1).M(host).A().Error("FAIL get StatefulSet %s/%s err:%v", namespace, name, err) } return nil } @@ -135,13 +135,13 @@ func (c *Controller) deleteStatefulSet(host *chop.ChiHost) error { // And now delete empty StatefulSet if err := c.kubeClient.AppsV1().StatefulSets(namespace).Delete(name, newDeleteOptions()); err == nil { - log.V(1).M(host.GetCHI()).Info("OK delete StatefulSet %s/%s", namespace, name) + log.V(1).M(host).Info("OK delete StatefulSet %s/%s", namespace, name) c.syncStatefulSet(host) } else if apierrors.IsNotFound(err) { - log.V(1).M(host.GetCHI()).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) + log.V(1).M(host).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) err = nil } else { - log.V(1).M(host.GetCHI()).A().Error("FAIL delete StatefulSet %s/%s err: %v", namespace, name, err) + log.V(1).M(host).A().Error("FAIL delete StatefulSet %s/%s err: %v", namespace, name, err) return nil } @@ -165,26 +165,26 @@ func (c *Controller) syncStatefulSet(host *chop.ChiHost) { // deletePVC deletes PersistentVolumeClaim func (c *Controller) deletePVC(host *chop.ChiHost) error { - log.V(2).M(host.GetCHI()).S().P() - defer log.V(2).M(host.GetCHI()).E().P() + log.V(2).M(host).S().P() + defer log.V(2).M(host).E().P() namespace := host.Address.Namespace c.walkActualPVCs(host, func(pvc *v1.PersistentVolumeClaim) { if !chopmodel.HostCanDeletePVC(host, pvc.Name) { - log.V(1).M(host.GetCHI()).Info("PVC %s/%s should not be deleted, leave it intact", namespace, pvc.Name) + log.V(1).M(host).Info("PVC %s/%s should not be deleted, leave it intact", namespace, pvc.Name) // Move to the next PVC return } // Actually delete PVC if err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Delete(pvc.Name, newDeleteOptions()); err == nil { - log.V(1).M(host.GetCHI()).Info("OK delete PVC %s/%s", namespace, pvc.Name) + log.V(1).M(host).Info("OK delete PVC %s/%s", namespace, pvc.Name) } else if apierrors.IsNotFound(err) { - log.V(1).M(host.GetCHI()).Info("NEUTRAL not found PVC %s/%s", namespace, pvc.Name) + log.V(1).M(host).Info("NEUTRAL not found PVC %s/%s", namespace, pvc.Name) err = nil } else { - log.M(host.GetCHI()).A().Error("FAIL to delete PVC %s/%s err:%v", namespace, pvc.Name, err) + log.M(host).A().Error("FAIL to delete PVC %s/%s err:%v", namespace, pvc.Name, err) } }) @@ -196,15 +196,15 @@ func (c *Controller) deleteConfigMap(host *chop.ChiHost) error { name := chopmodel.CreateConfigMapPodName(host) namespace := host.Address.Namespace - log.V(1).M(host.GetCHI()).F().Info("%s/%s", namespace, name) + log.V(1).M(host).F().Info("%s/%s", namespace, name) if err := c.kubeClient.CoreV1().ConfigMaps(namespace).Delete(name, newDeleteOptions()); err == nil { - log.V(1).M(host.GetCHI()).Info("OK delete ConfigMap %s/%s", namespace, name) + log.V(1).M(host).Info("OK delete ConfigMap %s/%s", namespace, name) } else if apierrors.IsNotFound(err) { - log.V(1).M(host.GetCHI()).Info("NEUTRAL not found ConfigMap %s/%s", namespace, name) + log.V(1).M(host).Info("NEUTRAL not found ConfigMap %s/%s", namespace, name) err = nil } else { - log.V(1).M(host.GetCHI()).A().Error("FAIL delete ConfigMap %s/%s err:%v", namespace, name, err) + log.V(1).M(host).A().Error("FAIL delete ConfigMap %s/%s err:%v", namespace, name, err) } return nil @@ -214,7 +214,7 @@ func (c *Controller) deleteConfigMap(host *chop.ChiHost) error { func (c *Controller) deleteServiceHost(host *chop.ChiHost) error { serviceName := chopmodel.CreateStatefulSetServiceName(host) namespace := host.Address.Namespace - log.V(1).M(host.GetCHI()).F().Info("%s/%s", namespace, serviceName) + log.V(1).M(host).F().Info("%s/%s", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -222,7 +222,7 @@ func (c *Controller) deleteServiceHost(host *chop.ChiHost) error { func (c *Controller) deleteServiceShard(shard *chop.ChiShard) error { serviceName := chopmodel.CreateShardServiceName(shard) namespace := shard.Address.Namespace - log.V(1).M(shard.GetCHI()).F().Info("%s/%s", namespace, serviceName) + log.V(1).M(shard).F().Info("%s/%s", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } @@ -230,7 +230,7 @@ func (c *Controller) deleteServiceShard(shard *chop.ChiShard) error { func (c *Controller) deleteServiceCluster(cluster *chop.ChiCluster) error { serviceName := chopmodel.CreateClusterServiceName(cluster) namespace := cluster.Address.Namespace - log.V(1).M(cluster.GetCHI()).F().Info("%s/%s", namespace, serviceName) + log.V(1).M(cluster).F().Info("%s/%s", namespace, serviceName) return c.deleteServiceIfExists(namespace, serviceName) } From 6460859343dca4c184e01278bc00f92127e1acf7 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 16:53:05 +0300 Subject: [PATCH 53/78] pods --- pkg/controller/chi/pods.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/controller/chi/pods.go b/pkg/controller/chi/pods.go index 3197dfd33..99f8a1fb9 100644 --- a/pkg/controller/chi/pods.go +++ b/pkg/controller/chi/pods.go @@ -25,7 +25,7 @@ import ( func (c *Controller) appendLabelReady(host *chop.ChiHost) error { pod, err := c.getPod(host) if err != nil { - log.M(host.Address.NamespaceCHINameString()).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) + log.M(host).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) return err } @@ -37,7 +37,7 @@ func (c *Controller) appendLabelReady(host *chop.ChiHost) error { func (c *Controller) deleteLabelReady(host *chop.ChiHost) error { pod, err := c.getPod(host) if err != nil { - log.M(host.Address.NamespaceCHINameString()).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) + log.M(host).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) return err } @@ -49,7 +49,7 @@ func (c *Controller) deleteLabelReady(host *chop.ChiHost) error { func (c *Controller) walkContainers(host *chop.ChiHost, f func(container *v1.Container)) { pod, err := c.getPod(host) if err != nil { - log.M(host.Address.NamespaceCHINameString()).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) + log.M(host).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) return } @@ -62,7 +62,7 @@ func (c *Controller) walkContainers(host *chop.ChiHost, f func(container *v1.Con func (c *Controller) walkContainerStatuses(host *chop.ChiHost, f func(status *v1.ContainerStatus)) { pod, err := c.getPod(host) if err != nil { - log.M(host.Address.NamespaceCHINameString()).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) + log.M(host).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) return } From 6ec62a18bc7c75623d13ae73c5b749a9480076d3 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 16:53:13 +0300 Subject: [PATCH 54/78] poller --- pkg/controller/chi/poller.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/pkg/controller/chi/poller.go b/pkg/controller/chi/poller.go index 4c043d6c1..882d6202f 100644 --- a/pkg/controller/chi/poller.go +++ b/pkg/controller/chi/poller.go @@ -69,24 +69,24 @@ func (c *Controller) waitHostRunning(host *chop.ChiHost) error { for { if c.isHostRunning(host) { // All is good, job done, exit - log.V(1).M(host.Address.NamespaceCHINameString()).F().Info("%s/%s-OK", namespace, name) + log.V(1).M(host).F().Info("%s/%s-OK", namespace, name) return nil } // Object is found, function not positive if time.Since(start) >= (time.Duration(waitStatefulSetGenerationTimeoutBeforeStartBothering) * time.Second) { // Start bothering with log messages after some time only - log.V(1).M(host.Address.NamespaceCHINameString()).F().Info("%s/%s-WAIT", namespace, name) + log.V(1).M(host).F().Info("%s/%s-WAIT", namespace, name) } if time.Since(start) >= (time.Duration(c.chop.Config().StatefulSetUpdateTimeout) * time.Second) { // Timeout reached, no good result available, time to quit - log.V(1).M(host.Address.NamespaceCHINameString()).F().Error("%s/%s-TIMEOUT reached", namespace, name) + log.V(1).M(host).F().Error("%s/%s-TIMEOUT reached", namespace, name) return errors.New(fmt.Sprintf("waitHostRunning(%s/%s) - wait timeout", namespace, name)) } // Wait some more time - log.V(2).M(host.Address.NamespaceCHINameString()).F().Info("%s/%s", namespace, name) + log.V(2).M(host).F().Info("%s/%s", namespace, name) select { case <-time.After(time.Duration(c.chop.Config().StatefulSetUpdatePollPeriod) * time.Second): } @@ -204,31 +204,30 @@ func (c *Controller) pollHost(host *chop.ChiHost, opts *StatefulSetPollOptions, } namespace := host.Address.Namespace name := host.Address.HostName - m := host.Address.NamespaceCHINameString() // Wait timeout is specified in c.chopConfig.StatefulSetUpdateTimeout in seconds start := time.Now() for { if f(host) { // All is good, job done, exit - log.V(1).M(m).F().Info("%s/%s-OK", namespace, name) + log.V(1).M(host).F().Info("%s/%s-OK", namespace, name) return nil } // Object is found, but function is not positive if time.Since(start) >= opts.StartBotheringAfterTimeout { // Start bothering with log messages after some time only - log.V(1).M(m).F().Info("%s/%s-WAIT", namespace, name) + log.V(1).M(host).F().Info("%s/%s-WAIT", namespace, name) } if time.Since(start) >= opts.Timeout { // Timeout reached, no good result available, time to quit - log.V(1).M(m).F().Error("%s/%s-TIMEOUT reached", namespace, name) + log.V(1).M(host).F().Error("%s/%s-TIMEOUT reached", namespace, name) return errors.New(fmt.Sprintf("pollHost(%s/%s) - wait timeout", namespace, name)) } // Wait some more time - log.V(2).M(m).F().Info("%s/%s", namespace, name) + log.V(2).M(host).F().Info("%s/%s", namespace, name) select { case <-time.After(opts.Interval): } From 2b94504da2bb9fa32cb612725182a3697d65531a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 16:53:24 +0300 Subject: [PATCH 55/78] volumes --- pkg/controller/chi/volumes.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/controller/chi/volumes.go b/pkg/controller/chi/volumes.go index aecc6097f..a74c05ece 100644 --- a/pkg/controller/chi/volumes.go +++ b/pkg/controller/chi/volumes.go @@ -27,7 +27,7 @@ func (c *Controller) walkPVCs(host *chop.ChiHost, f func(pvc *v1.PersistentVolum name := chopmodel.CreatePodName(host) pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(name, newGetOptions()) if err != nil { - log.Error("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) + log.M(host).A().Error("FAIL get pod for host %s/%s err:%v", namespace, host.Name, err) return } @@ -40,7 +40,7 @@ func (c *Controller) walkPVCs(host *chop.ChiHost, f func(pvc *v1.PersistentVolum pvcName := volume.PersistentVolumeClaim.ClaimName pvc, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(pvcName, newGetOptions()) if err != nil { - log.Error("FAIL get PVC %s/%s err:%v", namespace, pvcName, err) + log.M(host).A().Error("FAIL get PVC %s/%s err:%v", namespace, pvcName, err) continue } @@ -54,7 +54,7 @@ func (c *Controller) walkActualPVCs(host *chop.ChiHost, f func(pvc *v1.Persisten pvcList, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).List(newListOptions(labeler.GetSelectorHostScope(host))) if err != nil { - log.Error("FAIL get list of PVC for host %s/%s err:%v", namespace, host.Name, err) + log.M(host).A().Error("FAIL get list of PVC for host %s/%s err:%v", namespace, host.Name, err) return } @@ -70,7 +70,7 @@ func (c *Controller) walkPVs(host *chop.ChiHost, f func(pv *v1.PersistentVolume) c.walkPVCs(host, func(pvc *v1.PersistentVolumeClaim) { pv, err := c.kubeClient.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, newGetOptions()) if err != nil { - log.Error("FAIL get PV %s err:%v", pvc.Spec.VolumeName, err) + log.M(host).A().Error("FAIL get PV %s err:%v", pvc.Spec.VolumeName, err) return } f(pv) From ad822277682e91772fe4bcf90723dcc62d866df2 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 4 Feb 2021 16:53:33 +0300 Subject: [PATCH 56/78] worker --- pkg/controller/chi/worker.go | 251 ++++++++++++++++++++--------------- 1 file changed, 146 insertions(+), 105 deletions(-) diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index 58071be69..81f052afe 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -64,8 +64,8 @@ func (c *Controller) newWorker(queue workqueue.RateLimitingInterface) *worker { // run is an endless work loop, expected to be run in a thread func (w *worker) run() { - w.a.V(2).Info("run() - start") - defer w.a.V(2).Info("run() - end") + w.a.V(2).S().P() + defer w.a.V(2).E().P() for { // Get() blocks until it can return an item @@ -93,8 +93,8 @@ func (w *worker) run() { // processWorkItem processes one work item according to its type func (w *worker) processItem(item interface{}) error { - w.a.V(3).Info("processItem() - start") - defer w.a.V(3).Info("processItem() - end") + w.a.V(3).S().P() + defer w.a.V(3).E().P() switch item.(type) { @@ -153,10 +153,10 @@ func (w *worker) processItem(item interface{}) error { case *DropDns: drop, _ := item.(*DropDns) if chi, err := w.createCHIFromObjectMeta(drop.initiator); err == nil { - w.a.V(2).Info("endpointsInformer UpdateFunc(%s/%s) flushing DNS for CHI %s", drop.initiator.Namespace, drop.initiator.Name, chi.Name) + w.a.V(2).M(drop.initiator).Info("flushing DNS for CHI %s", chi.Name) _ = w.schemer.CHIDropDnsCache(chi) } else { - w.a.Error("endpointsInformer UpdateFunc(%s/%s) unable to find CHI by %v", drop.initiator.Namespace, drop.initiator.Name, drop.initiator.Labels) + w.a.M(drop.initiator).A().Error("unable to find CHI by %v", drop.initiator.Labels) } return nil } @@ -169,8 +169,8 @@ func (w *worker) processItem(item interface{}) error { // normalize func (w *worker) normalize(chi *chop.ClickHouseInstallation) *chop.ClickHouseInstallation { - w.a.V(3).Info("normalize() - start") - defer w.a.V(3).Info("normalize() - end") + w.a.V(3).M(chi).S().P() + defer w.a.V(3).M(chi).E().P() var withDefaultCluster bool @@ -185,6 +185,7 @@ func (w *worker) normalize(chi *chop.ClickHouseInstallation) *chop.ClickHouseIns if err != nil { w.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed). WithStatusError(chi). + M(chi).A(). Error("FAILED to normalize CHI : %v", err) } @@ -193,32 +194,30 @@ func (w *worker) normalize(chi *chop.ClickHouseInstallation) *chop.ClickHouseIns // ensureFinalizer func (w *worker) ensureFinalizer(chi *chop.ClickHouseInstallation) { - namespace, name := util.NamespaceName(chi.ObjectMeta) - // Check whether finalizer is already listed in CHI if util.InArray(FinalizerName, chi.ObjectMeta.Finalizers) { - w.a.V(2).Info("ensureFinalizer(%s/%s): finalizer already installed", namespace, name) + w.a.V(2).M(chi).F().Info("finalizer already installed") } // No finalizer found - need to install it if err := w.c.installFinalizer(chi); err != nil { - w.a.V(1).Info("ensureFinalizer(%s/%s): unable to install finalizer. err: %v", namespace, name, err) + w.a.V(1).M(chi).A().Error("unable to install finalizer. err: %v", err) } - w.a.V(3).Info("ensureFinalizer(%s/%s): finalizer installed", namespace, name) + w.a.V(3).M(chi).F().Info("finalizer installed") } // updateCHI sync CHI which was already created earlier func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { - w.a.V(3).Info("updateCHI() - start") - defer w.a.V(3).Info("updateCHI() - end") + w.a.V(3).M(new).S().P() + defer w.a.V(3).M(new).E().P() update := (old != nil) && (new != nil) if update && (old.ObjectMeta.ResourceVersion == new.ObjectMeta.ResourceVersion) { - w.a.V(3).Info("updateCHI(%s/%s): ResourceVersion did not change: %s", new.Namespace, new.Name, new.ObjectMeta.ResourceVersion) // No need to react + w.a.V(3).M(new).F().Info("ResourceVersion did not change: %s", new.ObjectMeta.ResourceVersion) return nil } @@ -238,28 +237,30 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { if !actionPlan.HasActionsToDo() { // Nothing to do - no changes found - no need to react - w.a.V(3).Info("updateCHI(%s/%s) - ResourceVersion changed, but no actual changes found", new.Namespace, new.Name) + w.a.V(3).M(new).F().Info("ResourceVersion changed, but no actual changes found") return nil } // Write desired normalized CHI with initialized .Status, so it would be possible to monitor progress (&new.Status).ReconcileStart(actionPlan.GetRemovedHostsNum()) if err := w.c.updateCHIObjectStatus(new, false); err != nil { - w.a.V(1).Info("UNABLE to write normalized CHI (%s/%s). It can trigger update action again. Error: %q", new.Namespace, new.Name, err) + w.a.V(1).M(new).A().Error("UNABLE to write normalized CHI. Can trigger update action. Err: %q", err) return nil } w.a.V(1). WithEvent(new, eventActionReconcile, eventReasonReconcileStarted). WithStatusAction(new). - Info("updateCHI(%s/%s) reconcile started", new.Namespace, new.Name) - w.a.V(2).Info("updateCHI(%s/%s) - action plan\n%s\n", new.Namespace, new.Name, actionPlan.String()) + M(new).F(). + Info("reconcile started") + w.a.V(2).M(new).F().Info("action plan\n%s\n", actionPlan.String()) if new.IsStopped() { w.a.V(1). WithEvent(new, eventActionReconcile, eventReasonReconcileInProgress). WithStatusAction(new). - Info("updateCHI(%s/%s) exclude CHI from monitoring", new.Namespace, new.Name) + M(new).F(). + Info("exclude CHI from monitoring") w.c.deleteWatch(new.Namespace, new.Name) } @@ -305,13 +306,13 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { new.WalkHosts(func(host *chop.ChiHost) error { if host.ReconcileAttributes.IsAdd() { - w.a.Info("ADD host: %s", host.Address.CompactString()) + w.a.M(host).Info("ADD host: %s", host.Address.CompactString()) } else if host.ReconcileAttributes.IsModify() { - w.a.Info("MODIFY host: %s", host.Address.CompactString()) + w.a.M(host).Info("MODIFY host: %s", host.Address.CompactString()) } else if host.ReconcileAttributes.IsUnclear() { - w.a.Info("UNCLEAR host: %s", host.Address.CompactString()) + w.a.M(host).Info("UNCLEAR host: %s", host.Address.CompactString()) } else { - w.a.Info("UNTOUCHED host: %s", host.Address.CompactString()) + w.a.M(host).Info("UNTOUCHED host: %s", host.Address.CompactString()) } return nil }) @@ -319,6 +320,7 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { if err := w.reconcile(new); err != nil { w.a.WithEvent(new, eventActionReconcile, eventReasonReconcileFailed). WithStatusError(new). + M(new).A(). Error("FAILED update: %v", err) return nil } @@ -327,7 +329,8 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { w.a.V(1). WithEvent(new, eventActionReconcile, eventReasonReconcileInProgress). WithStatusAction(new). - Info("updateCHI(%s/%s) remove scheduled for deletion items", new.Namespace, new.Name) + M(new).F(). + Info("remove items scheduled for deletion") actionPlan.WalkAdded( func(cluster *chop.ChiCluster) { }, @@ -354,7 +357,8 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { w.a.V(1). WithEvent(new, eventActionReconcile, eventReasonReconcileInProgress). WithStatusAction(new). - Info("updateCHI(%s/%s) remove scheduled for deletion items", new.Namespace, new.Name) + M(new).F(). + Info("remove items scheduled for deletion") actionPlan.WalkRemoved( func(cluster *chop.ChiCluster) { _ = w.deleteCluster(cluster) @@ -371,7 +375,8 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { w.a.V(1). WithEvent(new, eventActionReconcile, eventReasonReconcileInProgress). WithStatusAction(new). - Info("updateCHI(%s/%s) add CHI to monitoring", new.Namespace, new.Name) + M(new).F(). + Info("add CHI to monitoring") w.c.updateWatch(new.Namespace, new.Name, chopmodel.CreatePodFQDNsOfCHI(new)) } @@ -382,15 +387,16 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { w.a.V(1). WithEvent(new, eventActionReconcile, eventReasonReconcileCompleted). WithStatusActions(new). - Info("updateCHI(%s/%s) reconcile completed", new.Namespace, new.Name) + M(new).F(). + Info("reconcile completed") return nil } // reconcile reconciles ClickHouseInstallation func (w *worker) reconcile(chi *chop.ClickHouseInstallation) error { - w.a.V(2).Info("reconcile() - start") - defer w.a.V(2).Info("reconcile() - end") + w.a.V(2).M(chi).S().P() + defer w.a.V(2).M(chi).E().P() w.creator = chopmodel.NewCreator(w.c.chop, chi) return chi.WalkTillError( @@ -404,8 +410,8 @@ func (w *worker) reconcile(chi *chop.ClickHouseInstallation) error { // reconcileCHIAuxObjectsPreliminary reconciles CHI preliminary in order to ensure that ConfigMaps are in place func (w *worker) reconcileCHIAuxObjectsPreliminary(chi *chop.ClickHouseInstallation) error { - w.a.V(2).Info("reconcileCHIAuxObjectsPreliminary() - start") - defer w.a.V(2).Info("reconcileCHIAuxObjectsPreliminary() - end") + w.a.V(2).M(chi).S().P() + defer w.a.V(2).M(chi).E().P() // 1. CHI Service if chi.IsStopped() { @@ -428,8 +434,8 @@ func (w *worker) reconcileCHIAuxObjectsPreliminary(chi *chop.ClickHouseInstallat // reconcileCHIAuxObjectsFinal reconciles CHI global objects func (w *worker) reconcileCHIAuxObjectsFinal(chi *chop.ClickHouseInstallation) error { - w.a.V(2).Info("reconcileCHIAuxObjectsFinal() - start") - defer w.a.V(2).Info("reconcileCHIAuxObjectsFinal() - end") + w.a.V(2).M(chi).S().P() + defer w.a.V(2).M(chi).E().P() // CHI ConfigMaps with update return w.reconcileCHIConfigMapCommon(chi, nil, true) @@ -456,8 +462,8 @@ func (w *worker) reconcileCHIConfigMapUsers(chi *chop.ClickHouseInstallation, op // reconcileCluster reconciles Cluster, excluding nested shards func (w *worker) reconcileCluster(cluster *chop.ChiCluster) error { - w.a.V(2).Info("reconcileCluster() - start") - defer w.a.V(2).Info("reconcileCluster() - end") + w.a.V(2).M(cluster).S().P() + defer w.a.V(2).M(cluster).E().P() // Add Cluster's Service service := w.creator.CreateServiceCluster(cluster) @@ -471,8 +477,8 @@ func (w *worker) reconcileCluster(cluster *chop.ChiCluster) error { // reconcileShard reconciles Shard, excluding nested replicas func (w *worker) reconcileShard(shard *chop.ChiShard) error { - w.a.V(2).Info("reconcileShard() - start") - defer w.a.V(2).Info("reconcileShard() - end") + w.a.V(2).M(shard).S().P() + defer w.a.V(2).M(shard).E().P() // Add Shard's Service service := w.creator.CreateServiceShard(shard) @@ -486,26 +492,27 @@ func (w *worker) reconcileShard(shard *chop.ChiShard) error { // reconcileHost reconciles ClickHouse host func (w *worker) reconcileHost(host *chop.ChiHost) error { - w.a.V(2).Info("reconcileHost() - start") - defer w.a.V(2).Info("reconcileHost() - end") + w.a.V(2).M(host).S().P() + defer w.a.V(2).M(host).E().P() w.a.V(1). - WithEvent(host.CHI, eventActionReconcile, eventReasonReconcileStarted). - WithStatusAction(host.CHI). + WithEvent(host.GetCHI(), eventActionReconcile, eventReasonReconcileStarted). + WithStatusAction(host.GetCHI()). + M(host).F(). Info("Reconcile Host %s started", host.Name) // Create artifacts configMap := w.creator.CreateConfigMapHost(host) statefulSet := w.creator.CreateStatefulSet(host) service := w.creator.CreateServiceHost(host) - (&host.ReconcileAttributes).SetStatus(w.getStatefulSetStatus(statefulSet)) + (&host.ReconcileAttributes).SetStatus(w.getStatefulSetStatus(statefulSet, host)) if err := w.excludeHost(host); err != nil { return err } // Reconcile host's ConfigMap - if err := w.reconcileConfigMap(host.CHI, configMap, true); err != nil { + if err := w.reconcileConfigMap(host.GetCHI(), configMap, true); err != nil { return err } @@ -518,7 +525,7 @@ func (w *worker) reconcileHost(host *chop.ChiHost) error { w.reconcilePersistentVolumes(host) // Reconcile host's Service - if err := w.reconcileService(host.CHI, service); err != nil { + if err := w.reconcileService(host.GetCHI(), service); err != nil { return err } @@ -526,14 +533,16 @@ func (w *worker) reconcileHost(host *chop.ChiHost) error { if w.migrateTables(host) { w.a.V(1). - WithEvent(host.CHI, eventActionCreate, eventReasonCreateStarted). - WithStatusAction(host.CHI). + WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateStarted). + WithStatusAction(host.GetCHI()). + M(host).F(). Info("Adding tables on shard/host:%d/%d cluster:%s", host.Address.ShardIndex, host.Address.ReplicaIndex, host.Address.ClusterName) if err := w.schemer.HostCreateTables(host); err != nil { - w.a.Error("ERROR create tables on host %s. err: %v", host.Name, err) + w.a.M(host).A().Error("ERROR create tables on host %s. err: %v", host.Name, err) } } else { w.a.V(1). + M(host).F(). Info("No need to add tables on host %d to shard %d in cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) } @@ -545,6 +554,7 @@ func (w *worker) reconcileHost(host *chop.ChiHost) error { w.a.V(1). WithEvent(host.CHI, eventActionReconcile, eventReasonReconcileCompleted). WithStatusAction(host.CHI). + M(host).F(). Info("Reconcile Host %s completed", host.Name) return nil @@ -564,6 +574,7 @@ func (w *worker) migrateTables(host *chop.ChiHost) bool { func (w *worker) excludeHost(host *chop.ChiHost) error { if w.shouldExcludeHost(host) { w.a.V(1). + M(host).F(). Info("Exclude from cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) w.excludeHostFromService(host) @@ -575,6 +586,7 @@ func (w *worker) excludeHost(host *chop.ChiHost) error { // Always include host back to ClickHouse clusters func (w *worker) includeHost(host *chop.ChiHost) error { w.a.V(1). + M(host).F(). Info("Include into cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) w.includeHostIntoClickHouseCluster(host) @@ -605,7 +617,7 @@ func (w *worker) excludeHostFromClickHouseCluster(host *chop.ChiHost) { // Remove host from cluster config and wait for ClickHouse to pick-up the change if w.waitExcludeHost(host) { - _ = w.reconcileCHIConfigMapCommon(host.CHI, options, true) + _ = w.reconcileCHIConfigMapCommon(host.GetCHI(), options, true) _ = w.waitHostNotInCluster(host) } } @@ -619,7 +631,7 @@ func (w *worker) includeHostIntoClickHouseCluster(host *chop.ChiHost) { ), ) // Add host to the cluster config (always) and wait for ClickHouse to pick-up the change - _ = w.reconcileCHIConfigMapCommon(host.CHI, options, true) + _ = w.reconcileCHIConfigMapCommon(host.GetCHI(), options, true) if w.waitIncludeHost(host) { _ = w.waitHostInCluster(host) } @@ -645,9 +657,9 @@ func (w *worker) shouldExcludeHost(host *chop.ChiHost) bool { func (w *worker) waitExcludeHost(host *chop.ChiHost) bool { // Check CHI settings switch { - case host.CHI.IsReconcilingPolicyWait(): + case host.GetCHI().IsReconcilingPolicyWait(): return true - case host.CHI.IsReconcilingPolicyNoWait(): + case host.GetCHI().IsReconcilingPolicyNoWait(): return false } @@ -669,9 +681,9 @@ func (w *worker) waitIncludeHost(host *chop.ChiHost) bool { // Check CHI settings switch { - case host.CHI.IsReconcilingPolicyWait(): + case host.GetCHI().IsReconcilingPolicyWait(): return true - case host.CHI.IsReconcilingPolicyNoWait(): + case host.GetCHI().IsReconcilingPolicyNoWait(): return false } @@ -693,11 +705,10 @@ func (w *worker) waitHostNotInCluster(host *chop.ChiHost) error { // finalizeCHI func (w *worker) finalizeCHI(chi *chop.ClickHouseInstallation) error { - namespace, name := util.NamespaceName(chi.ObjectMeta) - w.a.V(3).Info("finalizeCHI(%s/%s) - start", namespace, name) - defer w.a.V(3).Info("finalizeCHI(%s/%s) - end", namespace, name) + w.a.V(3).M(chi).S().P() + defer w.a.V(3).M(chi).E().P() - cur, err := w.c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(name, newGetOptions()) + cur, err := w.c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.Namespace).Get(chi.Name, newGetOptions()) if (err != nil) || (cur == nil) { return nil } @@ -710,16 +721,16 @@ func (w *worker) finalizeCHI(chi *chop.ClickHouseInstallation) error { // Delete CHI (&chi.Status).DeleteStart() if err := w.c.updateCHIObjectStatus(chi, true); err != nil { - w.a.V(1).Info("UNABLE to write normalized CHI (%s/%s). err:%q", namespace, name, err) + w.a.V(1).M(chi).A().Error("UNABLE to write normalized CHI. err:%q", err) return nil } _ = w.deleteCHI(chi) // Uninstall finalizer - w.a.V(2).Info("finalizeCHI(%s/%s): uninstall finalizer", namespace, name) + w.a.V(2).M(chi).F().Info("uninstall finalizer") if err := w.c.uninstallFinalizer(chi); err != nil { - w.a.V(1).Info("finalizeCHI(%s/%s): unable to uninstall finalizer: err:%v", namespace, name, err) + w.a.V(1).M(chi).A().Error("unable to uninstall finalizer: err:%v", err) } return nil @@ -727,21 +738,23 @@ func (w *worker) finalizeCHI(chi *chop.ClickHouseInstallation) error { // deleteCHI deletes all kubernetes resources related to chi *chop.ClickHouseInstallation func (w *worker) deleteCHI(chi *chop.ClickHouseInstallation) error { - w.a.V(2).Info("deleteCHI() - start") - defer w.a.V(2).Info("deleteCHI() - end") + w.a.V(2).M(chi).S().P() + defer w.a.V(2).M(chi).E().P() var err error w.a.V(1). WithEvent(chi, eventActionDelete, eventReasonDeleteStarted). WithStatusAction(chi). - Info("Delete CHI %s/%s started", chi.Namespace, chi.Name) + M(chi).F(). + Info("Delete CHI started") chi, err = w.normalizer.CreateTemplatedCHI(chi, true) if err != nil { w.a.WithEvent(chi, eventActionDelete, eventReasonDeleteFailed). WithStatusError(chi). - Error("Delete CHI %s/%s failed - unable to normalize: %q", chi.Namespace, chi.Name, err) + M(chi).A(). + Error("Delete CHI failed - unable to normalize: %q", err) return err } @@ -762,7 +775,8 @@ func (w *worker) deleteCHI(chi *chop.ClickHouseInstallation) error { w.a.V(1). WithEvent(chi, eventActionDelete, eventReasonDeleteCompleted). WithStatusAction(chi). - Info("Delete CHI %s/%s - completed", chi.Namespace, chi.Name) + M(chi).F(). + Info("Delete CHI completed") return nil } @@ -778,11 +792,13 @@ func (w *worker) deleteTables(host *chop.ChiHost) error { w.a.V(1). WithEvent(host.CHI, eventActionDelete, eventReasonDeleteCompleted). WithStatusAction(host.CHI). + M(host).F(). Info("Deleted tables on host %s replica %d to shard %d in cluster %s", host.Name, host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) } else { w.a.WithEvent(host.CHI, eventActionDelete, eventReasonDeleteFailed). WithStatusError(host.CHI). + M(host).A(). Error("FAILED to delete tables on host %s with error %v", host.Name, err) } @@ -791,17 +807,19 @@ func (w *worker) deleteTables(host *chop.ChiHost) error { // deleteHost deletes all kubernetes resources related to replica *chop.ChiHost func (w *worker) deleteHost(host *chop.ChiHost) error { - w.a.V(2).Info("deleteHost() - start") - defer w.a.V(2).Info("deleteHost() - end") + w.a.V(2).M(host).S().Info(host.Address.HostName) + defer w.a.V(2).M(host).E().Info(host.Address.HostName) w.a.V(1). WithEvent(host.CHI, eventActionDelete, eventReasonDeleteStarted). WithStatusAction(host.CHI). + M(host).F(). Info("Delete host %s/%s - started", host.Address.ClusterName, host.Name) if _, err := w.c.getStatefulSet(host); err != nil { w.a.WithEvent(host.CHI, eventActionDelete, eventReasonDeleteCompleted). WithStatusAction(host.CHI). + M(host).F(). Info("Delete host %s/%s - completed StatefulSet not found - already deleted? err: %v", host.Address.ClusterName, host.Name, err) return nil @@ -826,10 +844,12 @@ func (w *worker) deleteHost(host *chop.ChiHost) error { w.a.V(1). WithEvent(host.CHI, eventActionDelete, eventReasonDeleteCompleted). WithStatusAction(host.CHI). + M(host).F(). Info("Delete host %s/%s - completed", host.Address.ClusterName, host.Name) } else { w.a.WithEvent(host.CHI, eventActionDelete, eventReasonDeleteFailed). WithStatusError(host.CHI). + M(host).F(). Error("FAILED Delete host %s/%s - completed", host.Address.ClusterName, host.Name) } @@ -838,12 +858,13 @@ func (w *worker) deleteHost(host *chop.ChiHost) error { // deleteShard deletes all kubernetes resources related to shard *chop.ChiShard func (w *worker) deleteShard(shard *chop.ChiShard) error { - w.a.V(2).Info("deleteShard() - start") - defer w.a.V(2).Info("deleteShard() - end") + w.a.V(2).M(shard).S().P() + defer w.a.V(2).M(shard).E().P() w.a.V(1). WithEvent(shard.CHI, eventActionDelete, eventReasonDeleteStarted). WithStatusAction(shard.CHI). + M(shard).F(). Info("Delete shard %s/%s - started", shard.Address.Namespace, shard.Name) // Delete all replicas @@ -855,6 +876,7 @@ func (w *worker) deleteShard(shard *chop.ChiShard) error { w.a.V(1). WithEvent(shard.CHI, eventActionDelete, eventReasonDeleteCompleted). WithStatusAction(shard.CHI). + M(shard).F(). Info("Delete shard %s/%s - completed", shard.Address.Namespace, shard.Name) return nil @@ -862,12 +884,13 @@ func (w *worker) deleteShard(shard *chop.ChiShard) error { // deleteCluster deletes all kubernetes resources related to cluster *chop.ChiCluster func (w *worker) deleteCluster(cluster *chop.ChiCluster) error { - w.a.V(2).Info("deleteCluster() - start") - defer w.a.V(2).Info("deleteCluster() - end") + w.a.V(2).M(cluster).S().P() + defer w.a.V(2).M(cluster).E().P() w.a.V(1). WithEvent(cluster.CHI, eventActionDelete, eventReasonDeleteStarted). WithStatusAction(cluster.CHI). + M(cluster).F(). Info("Delete cluster %s/%s - started", cluster.Address.Namespace, cluster.Name) // Delete all shards @@ -881,6 +904,7 @@ func (w *worker) deleteCluster(cluster *chop.ChiCluster) error { w.a.V(1). WithEvent(cluster.CHI, eventActionDelete, eventReasonDeleteCompleted). WithStatusAction(cluster.CHI). + M(cluster).F(). Info("Delete cluster %s/%s - completed", cluster.Address.Namespace, cluster.Name) return nil @@ -888,8 +912,8 @@ func (w *worker) deleteCluster(cluster *chop.ChiCluster) error { // createCHIFromObjectMeta func (w *worker) createCHIFromObjectMeta(objectMeta *meta.ObjectMeta) (*chop.ClickHouseInstallation, error) { - w.a.V(3).Info("createCHIFromObjectMeta() - start") - defer w.a.V(3).Info("createCHIFromObjectMeta() - end") + w.a.V(3).M(objectMeta).S().P() + defer w.a.V(3).M(objectMeta).E().P() chi, err := w.c.GetCHIByObjectMeta(objectMeta) if err != nil { @@ -906,8 +930,8 @@ func (w *worker) createCHIFromObjectMeta(objectMeta *meta.ObjectMeta) (*chop.Cli // createClusterFromObjectMeta func (w *worker) createClusterFromObjectMeta(objectMeta *meta.ObjectMeta) (*chop.ChiCluster, error) { - w.a.V(3).Info("createClusterFromObjectMeta() - start") - defer w.a.V(3).Info("createClusterFromObjectMeta() - end") + w.a.V(3).M(objectMeta).S().P() + defer w.a.V(3).M(objectMeta).E().P() clusterName, err := chopmodel.GetClusterNameFromObjectMeta(objectMeta) if err != nil { @@ -935,11 +959,13 @@ func (w *worker) updateConfigMap(chi *chop.ClickHouseInstallation, configMap *co w.a.V(1). WithEvent(chi, eventActionUpdate, eventReasonUpdateCompleted). WithStatusAction(chi). + M(chi).F(). Info("Update ConfigMap %s/%s", configMap.Namespace, configMap.Name) } else { w.a.WithEvent(chi, eventActionUpdate, eventReasonUpdateFailed). WithStatusAction(chi). WithStatusError(chi). + M(chi).A(). Error("Update ConfigMap %s/%s failed with error %v", configMap.Namespace, configMap.Name, err) } @@ -954,11 +980,13 @@ func (w *worker) createConfigMap(chi *chop.ClickHouseInstallation, configMap *co w.a.V(1). WithEvent(chi, eventActionCreate, eventReasonCreateCompleted). WithStatusAction(chi). + M(chi).F(). Info("Create ConfigMap %s/%s", configMap.Namespace, configMap.Name) } else { w.a.WithEvent(chi, eventActionCreate, eventReasonCreateFailed). WithStatusAction(chi). WithStatusError(chi). + M(chi).A(). Error("Create ConfigMap %s/%s failed with error %v", configMap.Namespace, configMap.Name, err) } @@ -971,8 +999,8 @@ func (w *worker) reconcileConfigMap( configMap *core.ConfigMap, update bool, ) error { - w.a.V(2).Info("reconcileConfigMap() - start") - defer w.a.V(2).Info("reconcileConfigMap() - end") + w.a.V(2).M(chi).S().P() + defer w.a.V(2).M(chi).E().P() // Check whether this object already exists in k8s curConfigMap, err := w.c.getConfigMap(&configMap.ObjectMeta, false) @@ -994,6 +1022,7 @@ func (w *worker) reconcileConfigMap( w.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed). WithStatusAction(chi). WithStatusError(chi). + M(chi).A(). Error("FAILED to reconcile ConfigMap: %s CHI: %s ", configMap.Name, chi.Name) } @@ -1025,7 +1054,7 @@ func (w *worker) updateService(chi *chop.ClickHouseInstallation, curService, new // Already have this port specified - reuse all internals, // due to limitations with auto-assigned values *newPort = *curPort - w.a.Info("reuse Port %d values", newPort.Port) + w.a.M(chi).F().Info("reuse Port %d values", newPort.Port) break } } @@ -1058,11 +1087,13 @@ func (w *worker) updateService(chi *chop.ClickHouseInstallation, curService, new w.a.V(1). WithEvent(chi, eventActionUpdate, eventReasonUpdateCompleted). WithStatusAction(chi). + M(chi).F(). Info("Update Service %s/%s", newService.Namespace, newService.Name) } else { w.a.WithEvent(chi, eventActionUpdate, eventReasonUpdateFailed). WithStatusAction(chi). WithStatusError(chi). + M(chi).A(). Error("Update Service %s/%s failed with error %v", newService.Namespace, newService.Name, err) } @@ -1077,11 +1108,13 @@ func (w *worker) createService(chi *chop.ClickHouseInstallation, service *core.S w.a.V(1). WithEvent(chi, eventActionCreate, eventReasonCreateCompleted). WithStatusAction(chi). + M(chi).F(). Info("Create Service %s/%s", service.Namespace, service.Name) } else { w.a.WithEvent(chi, eventActionCreate, eventReasonCreateFailed). WithStatusAction(chi). WithStatusError(chi). + M(chi).A(). Error("Create Service %s/%s failed with error %v", service.Namespace, service.Name, err) } @@ -1090,8 +1123,8 @@ func (w *worker) createService(chi *chop.ClickHouseInstallation, service *core.S // reconcileService reconciles core.Service func (w *worker) reconcileService(chi *chop.ClickHouseInstallation, service *core.Service) error { - w.a.V(2).Info("reconcileService() - start") - defer w.a.V(2).Info("reconcileService() - end") + w.a.V(2).M(chi).S().Info(service.Name) + defer w.a.V(2).M(chi).E().Info(service.Name) // Check whether this object already exists curService, err := w.c.getService(&service.ObjectMeta, false) @@ -1111,15 +1144,16 @@ func (w *worker) reconcileService(chi *chop.ClickHouseInstallation, service *cor w.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed). WithStatusAction(chi). WithStatusError(chi). + M(chi).A(). Error("FAILED to reconcile Service: %s CHI: %s ", service.Name, chi.Name) } return err } -func (w *worker) getStatefulSetStatus(statefulSet *apps.StatefulSet) chop.StatefulSetStatus { - w.a.V(2).Info("getStatefulSetStatus() - start") - defer w.a.V(2).Info("getStatefulSetStatus() - end") +func (w *worker) getStatefulSetStatus(statefulSet *apps.StatefulSet, host *chop.ChiHost) chop.StatefulSetStatus { + w.a.V(2).M(host).S().Info(util.NamespaceNameString(statefulSet.ObjectMeta)) + defer w.a.V(2).M(host).E().Info(util.NamespaceNameString(statefulSet.ObjectMeta)) // Check whether this object already exists in k8s curStatefulSet, err := w.c.getStatefulSet(&statefulSet.ObjectMeta, false) @@ -1130,7 +1164,7 @@ func (w *worker) getStatefulSetStatus(statefulSet *apps.StatefulSet) chop.Statef newLabel, newHasLabel := w.creator.GetStatefulSetVersion(statefulSet) if curHasLabel && newHasLabel { if curLabel == newLabel { - w.a.Info("INFO StatefulSet ARE EQUAL based on labels no reconcile is actually needed") + w.a.M(host).F().Info("INFO StatefulSet ARE EQUAL based on labels no reconcile is actually needed %s", util.NamespaceNameString(statefulSet.ObjectMeta)) return chop.StatefulSetStatusSame } else { //if diff, equal := messagediff.DeepDiff(curStatefulSet.Spec, statefulSet.Spec); equal { @@ -1140,7 +1174,7 @@ func (w *worker) getStatefulSetStatus(statefulSet *apps.StatefulSet) chop.Statef // w.a.Info("INFO StatefulSet ARE DIFFERENT based on diff reconcile is required: a:%v m:%v r:%v", diff.Added, diff.Modified, diff.Removed) // // return chop.StatefulSetStatusModified //} - w.a.Info("INFO StatefulSet ARE DIFFERENT based on labels reconcile needed") + w.a.M(host).F().Info("INFO StatefulSet ARE DIFFERENT based on labels reconcile needed %s", util.NamespaceNameString(statefulSet.ObjectMeta)) return chop.StatefulSetStatusModified } } @@ -1159,11 +1193,11 @@ func (w *worker) getStatefulSetStatus(statefulSet *apps.StatefulSet) chop.Statef // reconcileStatefulSet reconciles apps.StatefulSet func (w *worker) reconcileStatefulSet(newStatefulSet *apps.StatefulSet, host *chop.ChiHost) error { - w.a.V(2).Info("reconcileStatefulSet() - start") - defer w.a.V(2).Info("reconcileStatefulSet() - end") + w.a.V(2).M(host).S().Info(util.NamespaceNameString(newStatefulSet.ObjectMeta)) + defer w.a.V(2).M(host).E().Info(util.NamespaceNameString(newStatefulSet.ObjectMeta)) if host.ReconcileAttributes.GetStatus() == chop.StatefulSetStatusSame { - defer w.a.V(2).Info("reconcileStatefulSet() - no need to reconcile the same StaetfulSet") + defer w.a.V(2).M(host).F().Info("no need to reconcile the same StatefulSet %s", util.NamespaceNameString(newStatefulSet.ObjectMeta)) return nil } @@ -1184,6 +1218,7 @@ func (w *worker) reconcileStatefulSet(newStatefulSet *apps.StatefulSet, host *ch w.a.WithEvent(host.CHI, eventActionReconcile, eventReasonReconcileFailed). WithStatusAction(host.CHI). WithStatusError(host.CHI). + M(host).A(). Error("FAILED to reconcile StatefulSet: %s CHI: %s ", newStatefulSet.Name, host.CHI.Name) } @@ -1192,12 +1227,13 @@ func (w *worker) reconcileStatefulSet(newStatefulSet *apps.StatefulSet, host *ch // createStatefulSet func (w *worker) createStatefulSet(statefulSet *apps.StatefulSet, host *chop.ChiHost) error { - w.a.V(2).Info("createStatefulSet() - start") - defer w.a.V(2).Info("createStatefulSet() - end") + w.a.V(2).M(host).S().Info(util.NamespaceNameString(statefulSet.ObjectMeta)) + defer w.a.V(2).M(host).E().Info(util.NamespaceNameString(statefulSet.ObjectMeta)) w.a.V(1). WithEvent(host.CHI, eventActionCreate, eventReasonCreateStarted). WithStatusAction(host.CHI). + M(host).F(). Info("Create StatefulSet %s/%s - started", statefulSet.Namespace, statefulSet.Name) err := w.c.createStatefulSet(statefulSet, host) @@ -1209,11 +1245,13 @@ func (w *worker) createStatefulSet(statefulSet *apps.StatefulSet, host *chop.Chi w.a.V(1). WithEvent(host.CHI, eventActionCreate, eventReasonCreateCompleted). WithStatusAction(host.CHI). + M(host).F(). Info("Create StatefulSet %s/%s - completed", statefulSet.Namespace, statefulSet.Name) } else { w.a.WithEvent(host.CHI, eventActionCreate, eventReasonCreateFailed). WithStatusAction(host.CHI). WithStatusError(host.CHI). + M(host).A(). Error("Create StatefulSet %s/%s - failed with error %v", statefulSet.Namespace, statefulSet.Name, err) } @@ -1222,8 +1260,8 @@ func (w *worker) createStatefulSet(statefulSet *apps.StatefulSet, host *chop.Chi // updateStatefulSet func (w *worker) updateStatefulSet(curStatefulSet, newStatefulSet *apps.StatefulSet, host *chop.ChiHost) error { - w.a.V(2).Info("updateStatefulSet() - start") - defer w.a.V(2).Info("updateStatefulSet() - end") + w.a.V(2).M(host).S().Info(newStatefulSet.Name) + defer w.a.V(2).M(host).E().Info(newStatefulSet.Name) namespace := newStatefulSet.Namespace name := newStatefulSet.Name @@ -1231,6 +1269,7 @@ func (w *worker) updateStatefulSet(curStatefulSet, newStatefulSet *apps.Stateful w.a.V(1). WithEvent(host.CHI, eventActionCreate, eventReasonCreateStarted). WithStatusAction(host.CHI). + M(host).F(). Info("Update StatefulSet(%s/%s) - started", namespace, name) err := w.c.updateStatefulSet(curStatefulSet, newStatefulSet, host) @@ -1240,6 +1279,7 @@ func (w *worker) updateStatefulSet(curStatefulSet, newStatefulSet *apps.Stateful w.a.V(1). WithEvent(host.CHI, eventActionUpdate, eventReasonUpdateCompleted). WithStatusAction(host.CHI). + M(host).F(). Info("Update StatefulSet(%s/%s) - completed", namespace, name) return nil } @@ -1247,11 +1287,12 @@ func (w *worker) updateStatefulSet(curStatefulSet, newStatefulSet *apps.Stateful w.a.WithEvent(host.CHI, eventActionUpdate, eventReasonUpdateFailed). WithStatusAction(host.CHI). WithStatusError(host.CHI). + M(host).A(). Error("Update StatefulSet(%s/%s) - failed with error\n---\n%v\n--\nContinue with recreate", namespace, name, err) diff, equal := messagediff.DeepDiff(curStatefulSet.Spec, newStatefulSet.Spec) - w.a.Info("StatefulSet.Spec diff:") - w.a.Info(util.MessageDiffString(diff, equal)) + w.a.M(host).Info("StatefulSet.Spec diff:") + w.a.M(host).Info(util.MessageDiffString(diff, equal)) err = w.c.deleteStatefulSet(host) err = w.reconcilePersistentVolumeClaims(host) @@ -1269,8 +1310,8 @@ func (w *worker) reconcilePersistentVolumes(host *chop.ChiHost) { // reconcilePersistentVolumeClaims func (w *worker) reconcilePersistentVolumeClaims(host *chop.ChiHost) error { namespace := host.Address.Namespace - w.a.V(2).Info("reconcilePersistentVolumeClaims for host %s/%s - start", namespace, host.Name) - defer w.a.V(2).Info("reconcilePersistentVolumeClaims for host %s/%s - end", namespace, host.Name) + w.a.V(2).M(host).S().Info("host %s/%s", namespace, host.Name) + defer w.a.V(2).M(host).E().Info("host %s/%s", namespace, host.Name) host.WalkVolumeMounts(func(volumeMount *core.VolumeMount) { volumeClaimTemplateName := volumeMount.Name @@ -1281,15 +1322,15 @@ func (w *worker) reconcilePersistentVolumeClaims(host *chop.ChiHost) error { } pvcName := chopmodel.CreatePVCName(host, volumeMount, volumeClaimTemplate) - w.a.V(2).Info("reconcile volumeMount (%s/%s/%s/%s) - start", namespace, host.Name, volumeMount.Name, pvcName) - defer w.a.V(2).Info("reconcile volumeMount (%s/%s/%s/%s) - end", namespace, host.Name, volumeMount.Name, pvcName) + w.a.V(2).M(host).Info("reconcile volumeMount (%s/%s/%s/%s) - start", namespace, host.Name, volumeMount.Name, pvcName) + defer w.a.V(2).M(host).Info("reconcile volumeMount (%s/%s/%s/%s) - end", namespace, host.Name, volumeMount.Name, pvcName) pvc, err := w.c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(pvcName, newGetOptions()) if err != nil { if apierrors.IsNotFound(err) { // This is not an error per se, means PVC is not created (yet)? } else { - w.a.Error("ERROR unable to get PVC(%s/%s) err: %v", namespace, pvcName, err) + w.a.M(host).A().Error("ERROR unable to get PVC(%s/%s) err: %v", namespace, pvcName, err) } return } @@ -1330,8 +1371,8 @@ func (w *worker) reconcileResource( desiredResourceList core.ResourceList, resourceName core.ResourceName, ) { - w.a.V(2).Info("reconcileResource(%s/%s/%s) - start", pvc.Namespace, pvc.Name, resourceName) - defer w.a.V(2).Info("reconcileResource(%s/%s/%s) - end", pvc.Namespace, pvc.Name, resourceName) + w.a.V(2).M(pvc).Info("reconcileResource(%s/%s/%s) - start", pvc.Namespace, pvc.Name, resourceName) + defer w.a.V(2).M(pvc).Info("reconcileResource(%s/%s/%s) - end", pvc.Namespace, pvc.Name, resourceName) var ok bool if (pvcResourceList == nil) || (desiredResourceList == nil) { @@ -1351,11 +1392,11 @@ func (w *worker) reconcileResource( return } - w.a.V(2).Info("reconcileResource(%s/%s/%s) - unequal requests, want to update", pvc.Namespace, pvc.Name, resourceName) + w.a.V(2).M(pvc).Info("reconcileResource(%s/%s/%s) - unequal requests, want to update", pvc.Namespace, pvc.Name, resourceName) pvcResourceList[resourceName] = desiredResourceList[resourceName] _, err := w.c.kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(pvc) if err != nil { - w.a.Error("unable to reconcileResource(%s/%s/%s) err: %v", pvc.Namespace, pvc.Name, resourceName, err) + w.a.M(pvc).A().Error("unable to reconcileResource(%s/%s/%s) err: %v", pvc.Namespace, pvc.Name, resourceName, err) return } } From 7439f74f08d97ed5e5c07f389cb6c33825ce9207 Mon Sep 17 00:00:00 2001 From: alz Date: Fri, 5 Feb 2021 10:17:55 +0300 Subject: [PATCH 57/78] Disable readiness probe --- pkg/model/creator.go | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/pkg/model/creator.go b/pkg/model/creator.go index e3de5bcd0..4d3f28a82 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -850,27 +850,28 @@ func newDefaultLivenessProbe() *corev1.Probe { Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/ping", - Port: intstr.Parse(chDefaultHTTPPortName), + Port: intstr.Parse(chDefaultHTTPPortName), // What if it is not a default? }, }, InitialDelaySeconds: 60, PeriodSeconds: 3, - FailureThreshold: 5, + FailureThreshold: 10, } } // newDefaultReadinessProbe func (c *Creator) newDefaultReadinessProbe() *corev1.Probe { - return &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/replicas_status", - Port: intstr.Parse(chDefaultHTTPPortName), - }, - }, - InitialDelaySeconds: 10, - PeriodSeconds: 3, - } + return nil +// return &corev1.Probe{ +// Handler: corev1.Handler{ +// HTTPGet: &corev1.HTTPGetAction{ +// Path: "/replicas_status", +// Port: intstr.Parse(chDefaultHTTPPortName), +// }, +// }, +// InitialDelaySeconds: 10, +// PeriodSeconds: 3, +// } } // newDefaultClickHouseContainer returns default ClickHouse Container From 4a68ea561b4040a818146dfa008ab8dd2caff405 Mon Sep 17 00:00:00 2001 From: alz Date: Fri, 5 Feb 2021 11:06:51 +0300 Subject: [PATCH 58/78] Add error message when could not set a ready label --- pkg/controller/chi/pods.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/controller/chi/pods.go b/pkg/controller/chi/pods.go index 99f8a1fb9..daf9e2541 100644 --- a/pkg/controller/chi/pods.go +++ b/pkg/controller/chi/pods.go @@ -31,6 +31,10 @@ func (c *Controller) appendLabelReady(host *chop.ChiHost) error { chopmodel.AppendLabelReady(&pod.ObjectMeta) _, err = c.kubeClient.CoreV1().Pods(pod.Namespace).Update(pod) + if err != nil { + log.M(host).A().Error("FAIL setting 'ready' label for host %s err:%v", host.Address.NamespaceNameString(), err) + return err + } return err } From 946ca385fa0fe28642ad1a52d9a5b37e62cb1073 Mon Sep 17 00:00:00 2001 From: alz Date: Fri, 5 Feb 2021 13:18:51 +0300 Subject: [PATCH 59/78] Fix tests --- tests/configs/test-017-multi-version.yaml | 2 +- tests/test.py | 1 + tests/test_operator.py | 7 ++++--- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/configs/test-017-multi-version.yaml b/tests/configs/test-017-multi-version.yaml index 09ae4f01e..df122b7bf 100644 --- a/tests/configs/test-017-multi-version.yaml +++ b/tests/configs/test-017-multi-version.yaml @@ -25,7 +25,7 @@ spec: - templates: podTemplate: v20.8 files: - remove_database_ordinary.xml: | + users.d/remove_database_ordinary.xml: | diff --git a/tests/test.py b/tests/test.py index 60d2b62f8..d95a8f073 100644 --- a/tests/test.py +++ b/tests/test.py @@ -37,6 +37,7 @@ # python3 tests/test.py --only operator* xfails = { + "/main/operator/test_009. Test operator upgrade": [(Fail, "May fail due to label changes")], "/main/operator/test_022. Test that chi with broken image can be deleted": [(Error, "Not supported yet. Timeout")], "/main/operator/test_024. Test annotations for various template types/PV annotations should be populated": [(Fail, "Not supported yet")], } diff --git a/tests/test_operator.py b/tests/test_operator.py index f932ec944..6ad073e81 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -160,11 +160,12 @@ def test_operator_upgrade(config, version_from, version_to=settings.operator_ver with When(f"upgrade operator to {version_to}"): set_operator_version(version_to, timeout=120) - time.sleep(5) + time.sleep(10) kubectl.wait_chi_status(chi, "Completed", retries=6) kubectl.wait_objects(chi, {"statefulset": 1, "pod": 1, "service": 2}) - new_start_time = kubectl.get_field("pod", f"chi-{chi}-{chi}-0-0-0", ".status.startTime") - assert start_time == new_start_time + with Then("ClickHouse pods should not be restarted"): + new_start_time = kubectl.get_field("pod", f"chi-{chi}-{chi}-0-0-0", ".status.startTime") + assert start_time == new_start_time kubectl.delete_chi(chi) From b6834f34921c4d8ba2934a40ba52432ad1a70f8c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 5 Feb 2021 13:35:21 +0300 Subject: [PATCH 60/78] dev: align --- pkg/model/creator.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 4d3f28a82..39a302701 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -862,16 +862,16 @@ func newDefaultLivenessProbe() *corev1.Probe { // newDefaultReadinessProbe func (c *Creator) newDefaultReadinessProbe() *corev1.Probe { return nil -// return &corev1.Probe{ -// Handler: corev1.Handler{ -// HTTPGet: &corev1.HTTPGetAction{ -// Path: "/replicas_status", -// Port: intstr.Parse(chDefaultHTTPPortName), -// }, -// }, -// InitialDelaySeconds: 10, -// PeriodSeconds: 3, -// } + // return &corev1.Probe{ + // Handler: corev1.Handler{ + // HTTPGet: &corev1.HTTPGetAction{ + // Path: "/replicas_status", + // Port: intstr.Parse(chDefaultHTTPPortName), + // }, + // }, + // InitialDelaySeconds: 10, + // PeriodSeconds: 3, + // } } // newDefaultClickHouseContainer returns default ClickHouse Container From 9cb809f5a18906d094de8ad2da8231d5818f7f78 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 5 Feb 2021 13:35:33 +0300 Subject: [PATCH 61/78] dev: minot comment --- pkg/model/builder/xml/xml.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/model/builder/xml/xml.go b/pkg/model/builder/xml/xml.go index 9e4765fa4..68a332fd2 100644 --- a/pkg/model/builder/xml/xml.go +++ b/pkg/model/builder/xml/xml.go @@ -91,7 +91,7 @@ func normalizePath(prefix, path string) string { } } -// addBranch ensures branch esists and assign value to the last tagged node +// addBranch ensures branch exists and assign value to the last tagged node func (n *xmlNode) addBranch(tags []string, setting *chiv1.Setting) { node := n for _, tag := range tags { From 9e8ce5db4b386322226465f05213c02f69380a0d Mon Sep 17 00:00:00 2001 From: alz Date: Fri, 5 Feb 2021 13:58:18 +0300 Subject: [PATCH 62/78] Fix tests --- tests/test.py | 2 +- tests/test_operator.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/test.py b/tests/test.py index d95a8f073..e4fee20b1 100644 --- a/tests/test.py +++ b/tests/test.py @@ -64,10 +64,10 @@ test_operator.test_019, test_operator.test_020, test_operator.test_021, - test_operator.test_022, test_operator.test_023, test_operator.test_024, test_operator.test_025, + test_operator.test_022, # this should go last while failing ] run_tests = all_tests diff --git a/tests/test_operator.py b/tests/test_operator.py index 6ad073e81..dc24d6269 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -1245,7 +1245,6 @@ def test_023(): check={ "pod_count": 1, "apply_templates": { - settings.clickhouse_template, "templates/tpl-clickhouse-auto.yaml", }, # test-001.yaml does not have a template reference but should get correct ClickHouse version From 6a489a9a2db337d1185f5e5647351543f3a7dfb0 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 5 Feb 2021 14:02:32 +0300 Subject: [PATCH 63/78] dev: clickhouse SQL connector --- pkg/model/clickhouse/connection.go | 36 ++----------------- pkg/model/clickhouse/query.go | 58 ++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 34 deletions(-) create mode 100644 pkg/model/clickhouse/query.go diff --git a/pkg/model/clickhouse/connection.go b/pkg/model/clickhouse/connection.go index f3809b374..b1e5b2ce5 100644 --- a/pkg/model/clickhouse/connection.go +++ b/pkg/model/clickhouse/connection.go @@ -31,7 +31,7 @@ type CHConnection struct { } func NewConnection(params *CHConnectionParams) *CHConnection { - // DO not perform connection immediately, do it in lazy manner + // Do not establish connection immediately, do it in a lazy manner return &CHConnection{ params: params, } @@ -69,34 +69,6 @@ func (c *CHConnection) ensureConnected() bool { return c.conn != nil } -// Query -type Query struct { - ctx context.Context - cancelFunc context.CancelFunc - - Rows *databasesql.Rows -} - -// Close -func (q *Query) Close() { - if q == nil { - return - } - - if q.Rows != nil { - err := q.Rows.Close() - q.Rows = nil - if err != nil { - log.V(1).Info("UNABLE to close rows. err: %v", err) - } - } - - if q.cancelFunc != nil { - q.cancelFunc() - q.cancelFunc = nil - } -} - // Query runs given sql query func (c *CHConnection) Query(sql string) (*Query, error) { if len(sql) == 0 { @@ -122,11 +94,7 @@ func (c *CHConnection) Query(sql string) (*Query, error) { log.V(2).Info("clickhouse.QueryContext():'%s'", sql) - return &Query{ - ctx: ctx, - cancelFunc: cancel, - Rows: rows, - }, nil + return NewQuery(ctx, cancel, rows), nil } // Exec runs given sql query diff --git a/pkg/model/clickhouse/query.go b/pkg/model/clickhouse/query.go new file mode 100644 index 000000000..25859122f --- /dev/null +++ b/pkg/model/clickhouse/query.go @@ -0,0 +1,58 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clickhouse + +import ( + "context" + databasesql "database/sql" + + log "github.com/altinity/clickhouse-operator/pkg/announcer" +) + +// Query +type Query struct { + ctx context.Context + cancelFunc context.CancelFunc + Rows *databasesql.Rows +} + +// NewQuery +func NewQuery(ctx context.Context, cancelFunc context.CancelFunc, rows *databasesql.Rows) *Query { + return &Query{ + ctx: ctx, + cancelFunc: cancelFunc, + Rows: rows, + } +} + +// Close +func (q *Query) Close() { + if q == nil { + return + } + + if q.Rows != nil { + err := q.Rows.Close() + q.Rows = nil + if err != nil { + log.A().Error("UNABLE to close rows. err: %v", err) + } + } + + if q.cancelFunc != nil { + q.cancelFunc() + q.cancelFunc = nil + } +} From d8a0843ea3f7a03d02be98b8eeddf3ed525585d3 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 5 Feb 2021 14:07:56 +0300 Subject: [PATCH 64/78] dev: connector logger --- pkg/model/clickhouse/connection.go | 16 ++++++++-------- pkg/model/clickhouse/pool.go | 8 ++++---- pkg/model/clickhouse/query.go | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/model/clickhouse/connection.go b/pkg/model/clickhouse/connection.go index b1e5b2ce5..37e83310f 100644 --- a/pkg/model/clickhouse/connection.go +++ b/pkg/model/clickhouse/connection.go @@ -41,7 +41,7 @@ func (c *CHConnection) connect() { log.V(2).Info("Establishing connection: %s", c.params.GetDSNWithHiddenCredentials()) dbConnection, err := databasesql.Open("clickhouse", c.params.GetDSN()) if err != nil { - log.V(1).Info("FAILED Open(%s) %v", c.params.GetDSNWithHiddenCredentials(), err) + log.V(1).A().Error("FAILED Open(%s). Err: %v", c.params.GetDSNWithHiddenCredentials(), err) return } @@ -50,7 +50,7 @@ func (c *CHConnection) connect() { defer cancel() if err := dbConnection.PingContext(ctx); err != nil { - log.V(1).Info("FAILED Ping(%s) %v", c.params.GetDSNWithHiddenCredentials(), err) + log.V(1).A().Error("FAILED Ping(%s). Err: %v", c.params.GetDSNWithHiddenCredentials(), err) _ = dbConnection.Close() return } @@ -60,7 +60,7 @@ func (c *CHConnection) connect() { func (c *CHConnection) ensureConnected() bool { if c.conn != nil { - log.V(2).Info("Already connected: %s", c.params.GetDSNWithHiddenCredentials()) + log.V(2).F().Info("Already connected: %s", c.params.GetDSNWithHiddenCredentials()) return true } @@ -80,7 +80,7 @@ func (c *CHConnection) Query(sql string) (*Query, error) { if !c.ensureConnected() { cancel() s := fmt.Sprintf("FAILED connect(%s) for SQL: %s", c.params.GetDSNWithHiddenCredentials(), sql) - log.V(1).Info(s) + log.V(1).A().Error(s) return nil, fmt.Errorf(s) } @@ -88,7 +88,7 @@ func (c *CHConnection) Query(sql string) (*Query, error) { if err != nil { cancel() s := fmt.Sprintf("FAILED Query(%s) %v for SQL: %s", c.params.GetDSNWithHiddenCredentials(), err, sql) - log.V(1).Info(s) + log.V(1).A().Error(s) return nil, err } @@ -108,18 +108,18 @@ func (c *CHConnection) Exec(sql string) error { if !c.ensureConnected() { s := fmt.Sprintf("FAILED connect(%s) for SQL: %s", c.params.GetDSNWithHiddenCredentials(), sql) - log.V(1).Info(s) + log.V(1).A().Error(s) return fmt.Errorf(s) } _, err := c.conn.ExecContext(ctx, sql) if err != nil { - log.V(1).Info("FAILED Exec(%s) %v for SQL: %s", c.params.GetDSNWithHiddenCredentials(), err, sql) + log.V(1).A().Error("FAILED Exec(%s) %v for SQL: %s", c.params.GetDSNWithHiddenCredentials(), err, sql) return err } - log.V(2).Info("clickhouse.Exec():\n", sql) + log.V(2).F().Info("\n%s", sql) return nil } diff --git a/pkg/model/clickhouse/pool.go b/pkg/model/clickhouse/pool.go index 56d5463ee..459a074b6 100644 --- a/pkg/model/clickhouse/pool.go +++ b/pkg/model/clickhouse/pool.go @@ -31,7 +31,7 @@ func GetPooledDBConnection(params *CHConnectionParams) *CHConnection { key := makePoolKey(params) if connection, existed := dbConnectionPool.Load(key); existed { - log.V(2).Info("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) + log.V(2).F().Info("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) return connection.(*CHConnection) } @@ -42,16 +42,16 @@ func GetPooledDBConnection(params *CHConnectionParams) *CHConnection { // Double check for race condition if connection, existed := dbConnectionPool.Load(key); existed { - log.V(2).Info("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) + log.V(2).F().Info("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) return connection.(*CHConnection) } - log.V(2).Info("Add connection to the pool: %s", params.GetDSNWithHiddenCredentials()) + log.V(2).F().Info("Add connection to the pool: %s", params.GetDSNWithHiddenCredentials()) dbConnectionPool.Store(key, NewConnection(params)) // Fetch from the pool if connection, existed := dbConnectionPool.Load(key); existed { - log.V(2).Info("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) + log.V(2).F().Info("Found pooled connection: %s", params.GetDSNWithHiddenCredentials()) return connection.(*CHConnection) } diff --git a/pkg/model/clickhouse/query.go b/pkg/model/clickhouse/query.go index 25859122f..de6ecc5ff 100644 --- a/pkg/model/clickhouse/query.go +++ b/pkg/model/clickhouse/query.go @@ -47,7 +47,7 @@ func (q *Query) Close() { err := q.Rows.Close() q.Rows = nil if err != nil { - log.A().Error("UNABLE to close rows. err: %v", err) + log.A().Error("UNABLE to close rows. Err: %v", err) } } From ba5bcd8acbab2d4ea55162d2413980e358fe9a19 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 5 Feb 2021 14:26:26 +0300 Subject: [PATCH 65/78] dev: creator logger --- pkg/model/ch_config_const.go | 1 + pkg/model/creator.go | 43 ++++++++++++++++++------------------ 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/pkg/model/ch_config_const.go b/pkg/model/ch_config_const.go index 07e482205..aacd8e1e1 100644 --- a/pkg/model/ch_config_const.go +++ b/pkg/model/ch_config_const.go @@ -85,6 +85,7 @@ const ( chDefaultInterserverHTTPPortName = "interserver" chDefaultInterserverHTTPPortNumber = int32(9009) ) + const ( zkDefaultPort = 2181 // zkDefaultRootTemplate specifies default ZK root - /clickhouse/{namespace}/{chi name} diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 39a302701..dfcf3f541 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -34,17 +34,16 @@ type Creator struct { chi *chiv1.ClickHouseInstallation chConfigFilesGenerator *ClickHouseConfigFilesGenerator labeler *Labeler + a log.Announcer } -func NewCreator( - chop *chop.CHOp, - chi *chiv1.ClickHouseInstallation, -) *Creator { +func NewCreator(chop *chop.CHOp, chi *chiv1.ClickHouseInstallation) *Creator { return &Creator{ chop: chop, chi: chi, chConfigFilesGenerator: NewClickHouseConfigFilesGenerator(NewClickHouseConfigGenerator(chi), chop.Config()), labeler: NewLabeler(chop, chi), + a: log.M(chi), } } @@ -52,7 +51,7 @@ func NewCreator( func (c *Creator) CreateServiceCHI() *corev1.Service { serviceName := CreateCHIServiceName(c.chi) - log.V(1).Info("CreateServiceCHI(%s/%s)", c.chi.Namespace, serviceName) + c.a.V(1).F().Info("%s/%s", c.chi.Namespace, serviceName) if template, ok := c.chi.GetCHIServiceTemplate(); ok { // .templates.ServiceTemplate specified return c.createServiceFromTemplate( @@ -99,7 +98,7 @@ func (c *Creator) CreateServiceCHI() *corev1.Service { func (c *Creator) CreateServiceCluster(cluster *chiv1.ChiCluster) *corev1.Service { serviceName := CreateClusterServiceName(cluster) - log.V(1).Info("CreateServiceCluster(%s/%s)", cluster.Address.Namespace, serviceName) + c.a.V(1).F().Info("%s/%s", cluster.Address.Namespace, serviceName) if template, ok := cluster.GetServiceTemplate(); ok { // .templates.ServiceTemplate specified return c.createServiceFromTemplate( @@ -118,7 +117,7 @@ func (c *Creator) CreateServiceCluster(cluster *chiv1.ChiCluster) *corev1.Servic func (c *Creator) CreateServiceShard(shard *chiv1.ChiShard) *corev1.Service { serviceName := CreateShardServiceName(shard) - log.V(1).Info("CreateServiceShard(%s/%s)", shard.Address.Namespace, serviceName) + c.a.V(1).F().Info("%s/%s", shard.Address.Namespace, serviceName) if template, ok := shard.GetServiceTemplate(); ok { // .templates.ServiceTemplate specified return c.createServiceFromTemplate( @@ -138,7 +137,7 @@ func (c *Creator) CreateServiceHost(host *chiv1.ChiHost) *corev1.Service { serviceName := CreateStatefulSetServiceName(host) statefulSetName := CreateStatefulSetName(host) - log.V(1).Info("CreateServiceHost(%s/%s) for Set %s", host.Address.Namespace, serviceName, statefulSetName) + c.a.V(1).F().Info("%s/%s for Set %s", host.Address.Namespace, serviceName, statefulSetName) if template, ok := host.GetServiceTemplate(); ok { // .templates.ServiceTemplate specified return c.createServiceFromTemplate( @@ -192,12 +191,11 @@ func (c *Creator) verifyServiceTemplatePorts(template *chiv1.ChiServiceTemplate) for i := range template.Spec.Ports { servicePort := &template.Spec.Ports[i] if (servicePort.Port < 1) || (servicePort.Port > 65535) { - msg := fmt.Sprintf("verifyServiceTemplatePorts(%s) INCORRECT PORT: %d ", template.Name, servicePort.Port) - log.V(1).Info(msg) + msg := fmt.Sprintf("template:%s INCORRECT PORT:%d", template.Name, servicePort.Port) + c.a.V(1).F().Warning(msg) return fmt.Errorf(msg) } } - return nil } @@ -328,7 +326,7 @@ func (c *Creator) setupStatefulSetVersion(statefulSet *apps.StatefulSet) { LabelStatefulSetVersion: util.Fingerprint(statefulSet), }, ) - log.V(2).Info("StatefulSet(%s/%s)\n%s", statefulSet.Namespace, statefulSet.Name, util.Dump(statefulSet)) + c.a.V(2).F().Info("StatefulSet(%s/%s)\n%s", statefulSet.Namespace, statefulSet.Name, util.Dump(statefulSet)) } // GetStatefulSetVersion @@ -407,7 +405,7 @@ func (c *Creator) personalizeStatefulSetTemplate(statefulSet *apps.StatefulSet, // In case we have default LogVolumeClaimTemplate specified - need to append log container to Pod Template if host.Templates.LogVolumeClaimTemplate != "" { addContainer(&statefulSet.Spec.Template.Spec, newDefaultLogContainer()) - log.V(1).Info("setupStatefulSetPodTemplate() add log container for statefulSet %s", statefulSetName) + c.a.V(1).F().Info("add log container for statefulSet %s", statefulSetName) } } @@ -421,11 +419,11 @@ func (c *Creator) getPodTemplate(host *chiv1.ChiHost) *chiv1.ChiPodTemplate { // Host references known PodTemplate // Make local copy of this PodTemplate, in order not to spoil the original common-used template podTemplate = podTemplate.DeepCopy() - log.V(1).Info("getPodTemplate() statefulSet %s use custom template %s", statefulSetName, podTemplate.Name) + c.a.V(1).F().Info("statefulSet %s use custom template %s", statefulSetName, podTemplate.Name) } else { // Host references UNKNOWN PodTemplate, will use default one podTemplate = c.newDefaultPodTemplate(statefulSetName) - log.V(1).Info("getPodTemplate() statefulSet %s use default generated template", statefulSetName) + c.a.V(1).F().Info("statefulSet %s use default generated template", statefulSetName) } // Here we have local copy of Pod Template, to be used to create StatefulSet @@ -670,14 +668,14 @@ func (c *Creator) setupStatefulSetApplyVolumeMount( // 3. Specified (by volumeClaimTemplateName) VolumeClaimTemplate has to be available as well if _, ok := c.chi.GetVolumeClaimTemplate(volumeClaimTemplateName); !ok { // Incorrect/unknown .templates.VolumeClaimTemplate specified - log.V(1).Info("Can not find volumeClaimTemplate %s. Volume claim can not be mounted", volumeClaimTemplateName) + c.a.V(1).F().Warning("Can not find volumeClaimTemplate %s. Volume claim can not be mounted", volumeClaimTemplateName) return nil } // 4. Specified container has to be available container := getContainerByName(statefulSet, containerName) if container == nil { - log.V(1).Info("Can not find container %s. Volume claim can not be mounted", containerName) + c.a.V(1).F().Warning("Can not find container %s. Volume claim can not be mounted", containerName) return nil } @@ -696,8 +694,8 @@ func (c *Creator) setupStatefulSetApplyVolumeMount( // 1. Check whether this VolumeClaimTemplate is already listed in VolumeMount of this container if volumeMount.Name == existingVolumeMount.Name { // This .templates.VolumeClaimTemplate is already used in VolumeMount - log.V(1).Info( - "setupStatefulSetApplyVolumeClaim(%s) container %s volumeClaimTemplateName %s already used", + c.a.V(1).F().Warning( + "StatefulSet:%s container:%s volumeClaimTemplateName:%s already used", statefulSet.Name, container.Name, volumeMount.Name, @@ -708,8 +706,8 @@ func (c *Creator) setupStatefulSetApplyVolumeMount( // 2. Check whether `mountPath` (say '/var/lib/clickhouse') is already mounted if volumeMount.MountPath == existingVolumeMount.MountPath { // `mountPath` (say /var/lib/clickhouse) is already mounted - log.V(1).Info( - "setupStatefulSetApplyVolumeClaim(%s) container %s mountPath %s already used", + c.a.V(1).F().Warning( + "StatefulSet:%s container:%s mountPath:%s already used", statefulSet.Name, container.Name, volumeMount.MountPath, @@ -730,7 +728,8 @@ func (c *Creator) setupStatefulSetApplyVolumeMount( ) } - log.V(1).Info("setupStatefulSetApplyVolumeClaim(%s) container %s mounted %s on %s", + c.a.V(1).F().Info( + "StatefulSet:%s container:%s mounted %s on %s", statefulSet.Name, container.Name, volumeMount.Name, From 5d66e6338ec55a7aef16b3d06fb6fc2fe5d383d8 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 9 Feb 2021 17:57:56 +0300 Subject: [PATCH 66/78] dev: adopt poller for 2 functions - main and background --- pkg/controller/chi/poller.go | 106 ++++++++++++++++++++++++++--------- 1 file changed, 81 insertions(+), 25 deletions(-) diff --git a/pkg/controller/chi/poller.go b/pkg/controller/chi/poller.go index 882d6202f..228db5546 100644 --- a/pkg/controller/chi/poller.go +++ b/pkg/controller/chi/poller.go @@ -33,20 +33,9 @@ const ( waitStatefulSetGenerationTimeoutToCreateStatefulSet = 30 ) -// waitStatefulSetReady polls StatefulSet for reaching target generation and Ready state -func (c *Controller) waitStatefulSetReady(statefulSet *apps.StatefulSet) error { - if err := c.pollStatefulSet(statefulSet, nil, func(sts *apps.StatefulSet) bool { - return model.IsStatefulSetGeneration(sts, sts.Generation) - }); err == nil { - return c.pollStatefulSet(statefulSet, nil, model.IsStatefulSetReady) - } else { - return err - } -} - -// waitHostNotReady polls StatefulSet for not exists or not ready +// waitHostNotReady polls host's StatefulSet for not exists or not ready func (c *Controller) waitHostNotReady(host *chop.ChiHost) error { - err := c.pollStatefulSet(host, NewStatefulSetPollOptionsConfigNoCreate(c.chop.Config()), model.IsStatefulSetNotReady) + err := c.pollStatefulSet(host, NewStatefulSetPollOptionsConfigNoCreate(c.chop.Config()), model.IsStatefulSetNotReady, nil) if apierrors.IsNotFound(err) { err = nil } @@ -54,9 +43,54 @@ func (c *Controller) waitHostNotReady(host *chop.ChiHost) error { return err } -// waitHostReady polls hosts's StatefulSet until it is ready +// waitHostReady polls host's StatefulSet until it is ready func (c *Controller) waitHostReady(host *chop.ChiHost) error { - return c.waitStatefulSetReady(host.StatefulSet) + // Wait for StatefulSet to reach generation + err := c.pollStatefulSet( + host.StatefulSet, + nil, + func(sts *apps.StatefulSet) bool { + if sts == nil { + return false + } + _ = c.deleteLabelReady(host) + return model.IsStatefulSetGeneration(sts, sts.Generation) + }, + func() { + _ = c.deleteLabelReady(host) + }, + ) + if err != nil { + return err + } + + // Wait StatefulSet to reach ready status + return c.pollStatefulSet( + host.StatefulSet, + nil, + func(sts *apps.StatefulSet) bool { + _ = c.deleteLabelReady(host) + return model.IsStatefulSetReady(sts) + }, + func() { + _ = c.deleteLabelReady(host) + }, + ) +} + +// waitHostDeleted polls host's StatefulSet until it is not available +func (c *Controller) waitHostDeleted(host *chop.ChiHost) { + for { + // TODO + // Probably there would be better way to wait until k8s reported StatefulSet deleted + if _, err := c.getStatefulSet(host); err == nil { + log.V(2).Info("cache NOT yet synced") + time.Sleep(15 * time.Second) + } else { + log.V(1).Info("cache synced") + return + } + } } // waitHostRunning polls host for `Running` state @@ -99,7 +133,8 @@ type StatefulSetPollOptions struct { StartBotheringAfterTimeout time.Duration CreateTimeout time.Duration Timeout time.Duration - Interval time.Duration + MainInterval time.Duration + BackgroundInterval time.Duration } func NewStatefulSetPollOptions() *StatefulSetPollOptions { @@ -111,7 +146,8 @@ func NewStatefulSetPollOptionsConfig(config *chop.OperatorConfig) *StatefulSetPo StartBotheringAfterTimeout: time.Duration(waitStatefulSetGenerationTimeoutBeforeStartBothering) * time.Second, CreateTimeout: time.Duration(waitStatefulSetGenerationTimeoutToCreateStatefulSet) * time.Second, Timeout: time.Duration(config.StatefulSetUpdateTimeout) * time.Second, - Interval: time.Duration(config.StatefulSetUpdatePollPeriod) * time.Second, + MainInterval: time.Duration(config.StatefulSetUpdatePollPeriod) * time.Second, + BackgroundInterval: 1 * time.Second, } } @@ -119,13 +155,19 @@ func NewStatefulSetPollOptionsConfigNoCreate(config *chop.OperatorConfig) *State return &StatefulSetPollOptions{ StartBotheringAfterTimeout: time.Duration(waitStatefulSetGenerationTimeoutBeforeStartBothering) * time.Second, //CreateTimeout: time.Duration(waitStatefulSetGenerationTimeoutToCreateStatefulSet) * time.Second, - Timeout: time.Duration(config.StatefulSetUpdateTimeout) * time.Second, - Interval: time.Duration(config.StatefulSetUpdatePollPeriod) * time.Second, + Timeout: time.Duration(config.StatefulSetUpdateTimeout) * time.Second, + MainInterval: time.Duration(config.StatefulSetUpdatePollPeriod) * time.Second, + BackgroundInterval: 1 * time.Second, } } // pollStatefulSet polls StatefulSet with poll callback function. -func (c *Controller) pollStatefulSet(entity interface{}, opts *StatefulSetPollOptions, f func(set *apps.StatefulSet) bool) error { +func (c *Controller) pollStatefulSet( + entity interface{}, + opts *StatefulSetPollOptions, + mainFn func(set *apps.StatefulSet) bool, + backFn func(), +) error { if opts == nil { opts = NewStatefulSetPollOptionsConfig(c.chop.Config()) } @@ -149,7 +191,7 @@ func (c *Controller) pollStatefulSet(entity interface{}, opts *StatefulSetPollOp for { if statefulSet, err := c.statefulSetLister.StatefulSets(namespace).Get(name); err == nil { // Object is found - if f(statefulSet) { + if mainFn(statefulSet) { // All is good, job done, exit log.V(1).M(namespace, name).F().Info("OK :%s", model.StrStatefulSetStatus(&statefulSet.Status)) return nil @@ -189,14 +231,28 @@ func (c *Controller) pollStatefulSet(entity interface{}, opts *StatefulSetPollOp // Wait some more time log.V(2).Info("pollStatefulSet(%s/%s)", namespace, name) - select { - case <-time.After(opts.Interval): - } + pollback(opts, backFn) } return fmt.Errorf("unexpected flow") } +func pollback(opts *StatefulSetPollOptions, fn func()) { + main := time.After(opts.MainInterval) + run := true + for run { + back := time.After(opts.BackgroundInterval) + select { + case <-main: + run = false + case <-back: + if fn != nil { + fn() + } + } + } +} + // pollHost polls host with poll callback function. func (c *Controller) pollHost(host *chop.ChiHost, opts *StatefulSetPollOptions, f func(host *chop.ChiHost) bool) error { if opts == nil { @@ -229,7 +285,7 @@ func (c *Controller) pollHost(host *chop.ChiHost, opts *StatefulSetPollOptions, // Wait some more time log.V(2).M(host).F().Info("%s/%s", namespace, name) select { - case <-time.After(opts.Interval): + case <-time.After(opts.MainInterval): } } From 8376c5004d6f5cc88fb1c0834d3534d2dd5edb68 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 9 Feb 2021 17:58:25 +0300 Subject: [PATCH 67/78] dev: absence of pod is not an error any more --- pkg/controller/chi/pods.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/chi/pods.go b/pkg/controller/chi/pods.go index daf9e2541..c71f369c4 100644 --- a/pkg/controller/chi/pods.go +++ b/pkg/controller/chi/pods.go @@ -41,7 +41,7 @@ func (c *Controller) appendLabelReady(host *chop.ChiHost) error { func (c *Controller) deleteLabelReady(host *chop.ChiHost) error { pod, err := c.getPod(host) if err != nil { - log.M(host).A().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) + log.V(1).M(host).F().Info("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) return err } From 708ce9e1042a70e11043f77c828e26b5cdd80c12 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 9 Feb 2021 17:58:59 +0300 Subject: [PATCH 68/78] dev: lower logging for aux objects --- pkg/controller/chi/controller.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index c773a5ae7..ab486b777 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -206,21 +206,21 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&service.ObjectMeta) { return } - log.V(2).M(service).Info("serviceInformer.AddFunc") + log.V(3).M(service).Info("serviceInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { oldService := old.(*core.Service) if !c.isTrackedObject(&oldService.ObjectMeta) { return } - log.V(2).M(oldService).Info("serviceInformer.UpdateFunc") + log.V(3).M(oldService).Info("serviceInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { service := obj.(*core.Service) if !c.isTrackedObject(&service.ObjectMeta) { return } - log.V(2).M(service).Info("serviceInformer.DeleteFunc") + log.V(3).M(service).Info("serviceInformer.DeleteFunc") }, }) @@ -230,7 +230,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&endpoints.ObjectMeta) { return } - log.V(2).M(endpoints).Info("endpointsInformer.AddFunc") + log.V(3).M(endpoints).Info("endpointsInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { oldEndpoints := old.(*core.Endpoints) @@ -283,21 +283,21 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&configMap.ObjectMeta) { return } - log.V(2).M(configMap).Info("configMapInformer.AddFunc") + log.V(3).M(configMap).Info("configMapInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { configMap := old.(*core.ConfigMap) if !c.isTrackedObject(&configMap.ObjectMeta) { return } - log.V(2).M(configMap).Info("configMapInformer.UpdateFunc") + log.V(3).M(configMap).Info("configMapInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { configMap := obj.(*core.ConfigMap) if !c.isTrackedObject(&configMap.ObjectMeta) { return } - log.V(2).M(configMap).Info("configMapInformer.DeleteFunc") + log.V(3).M(configMap).Info("configMapInformer.DeleteFunc") }, }) @@ -307,7 +307,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - log.V(2).M(statefulSet).Info("statefulSetInformer.AddFunc") + log.V(3).M(statefulSet).Info("statefulSetInformer.AddFunc") //controller.handleObject(obj) }, UpdateFunc: func(old, new interface{}) { @@ -315,14 +315,14 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - log.V(2).M(statefulSet).Info("statefulSetInformer.UpdateFunc") + log.V(3).M(statefulSet).Info("statefulSetInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { statefulSet := obj.(*apps.StatefulSet) if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - log.V(2).M(statefulSet).Info("statefulSetInformer.DeleteFunc") + log.V(3).M(statefulSet).Info("statefulSetInformer.DeleteFunc") //controller.handleObject(obj) }, }) @@ -333,21 +333,21 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&pod.ObjectMeta) { return } - log.V(2).M(pod).Info("podInformer.AddFunc") + log.V(3).M(pod).Info("podInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { pod := old.(*core.Pod) if !c.isTrackedObject(&pod.ObjectMeta) { return } - log.V(2).M(pod).Info("podInformer.UpdateFunc") + log.V(3).M(pod).Info("podInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { pod := obj.(*core.Pod) if !c.isTrackedObject(&pod.ObjectMeta) { return } - log.V(2).M(pod).Info("podInformer.DeleteFunc") + log.V(3).M(pod).Info("podInformer.DeleteFunc") }, }) } From 89fd091498805e7804b5d9b1889ee6eee88e240f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 9 Feb 2021 17:59:40 +0300 Subject: [PATCH 69/78] dev: simplify StatefulSet creator --- pkg/controller/chi/creator.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/pkg/controller/chi/creator.go b/pkg/controller/chi/creator.go index 97bea8439..d91140832 100644 --- a/pkg/controller/chi/creator.go +++ b/pkg/controller/chi/creator.go @@ -30,18 +30,21 @@ import ( // createStatefulSet is an internal function, used in reconcileStatefulSet only func (c *Controller) createStatefulSet(statefulSet *apps.StatefulSet, host *chop.ChiHost) error { log.V(1).M(host).F().P() - if statefulSet, err := c.kubeClient.AppsV1().StatefulSets(statefulSet.Namespace).Create(statefulSet); err != nil { - // Error call Create() + + if _, err := c.kubeClient.AppsV1().StatefulSets(statefulSet.Namespace).Create(statefulSet); err != nil { + // Unable to create StatefulSet at all return err - } else if err := c.waitHostReady(host); err == nil { + } + + // StatefulSet created, wait until it is ready + + if err := c.waitHostReady(host); err == nil { // Target generation reached, StatefulSet created successfully return nil - } else { - // Unable to run StatefulSet, StatefulSet create failed, time to rollback? - return c.onStatefulSetCreateFailed(statefulSet, host) } - return fmt.Errorf("unexpected flow") + // Unable to run StatefulSet, StatefulSet create failed, time to rollback? + return c.onStatefulSetCreateFailed(statefulSet, host) } // updateStatefulSet is an internal function, used in reconcileStatefulSet only From ed1488fd2b1c8c00416d4ce60711e7bb7f5ed74a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 9 Feb 2021 18:00:55 +0300 Subject: [PATCH 70/78] dev: move syncer to poller and wait for host during deletion --- pkg/controller/chi/deleter.go | 38 ++++++++++++----------------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/pkg/controller/chi/deleter.go b/pkg/controller/chi/deleter.go index bdf5b20ee..ef490f32c 100644 --- a/pkg/controller/chi/deleter.go +++ b/pkg/controller/chi/deleter.go @@ -15,8 +15,6 @@ package chi import ( - "time" - apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -115,28 +113,33 @@ func (c *Controller) deleteStatefulSet(host *chop.ChiHost) error { log.V(1).M(host).F().Info("%s/%s", namespace, name) - statefulSet, err := c.getStatefulSet(host) - if err != nil { + if sts, err := c.getStatefulSet(host); err == nil { + host.StatefulSet = sts + } else { if apierrors.IsNotFound(err) { log.V(1).M(host).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) } else { log.V(1).M(host).A().Error("FAIL get StatefulSet %s/%s err:%v", namespace, name, err) } - return nil + return err } // Scale StatefulSet down to 0 pods count. // This is the proper and graceful way to delete StatefulSet var zero int32 = 0 - statefulSet.Spec.Replicas = &zero - statefulSet, _ = c.kubeClient.AppsV1().StatefulSets(namespace).Update(statefulSet) - _ = c.waitStatefulSetReady(statefulSet) - host.StatefulSet = statefulSet + host.StatefulSet.Spec.Replicas = &zero + if _, err := c.kubeClient.AppsV1().StatefulSets(namespace).Update(host.StatefulSet); err != nil { + log.V(1).M(host).Error("UNABLE to update StatefulSet %s/%s", namespace, name) + return err + } + + // Wait until StatefulSet scales down to 0 pods count. + _ = c.waitHostReady(host) // And now delete empty StatefulSet if err := c.kubeClient.AppsV1().StatefulSets(namespace).Delete(name, newDeleteOptions()); err == nil { log.V(1).M(host).Info("OK delete StatefulSet %s/%s", namespace, name) - c.syncStatefulSet(host) + c.waitHostDeleted(host) } else if apierrors.IsNotFound(err) { log.V(1).M(host).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) err = nil @@ -148,21 +151,6 @@ func (c *Controller) deleteStatefulSet(host *chop.ChiHost) error { return nil } -// syncStatefulSet -func (c *Controller) syncStatefulSet(host *chop.ChiHost) { - for { - // TODO - // There should be better way to sync cache - if _, err := c.getStatefulSet(host); err == nil { - log.V(2).Info("cache NOT yet synced") - time.Sleep(15 * time.Second) - } else { - log.V(1).Info("cache synced") - return - } - } -} - // deletePVC deletes PersistentVolumeClaim func (c *Controller) deletePVC(host *chop.ChiHost) error { log.V(2).M(host).S().P() From 20c2ef43ce19babda1c38186d01d573536aaa4cc Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 9 Feb 2021 18:01:29 +0300 Subject: [PATCH 71/78] dev: create StatefulSet with Ready label preset --- pkg/model/creator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/model/creator.go b/pkg/model/creator.go index dfcf3f541..1c5b0af20 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -512,7 +512,7 @@ func (c *Creator) statefulSetApplyPodTemplate( ObjectMeta: metav1.ObjectMeta{ Name: template.Name, Labels: util.MergeStringMapsOverwrite( - c.labeler.getLabelsHostScope(host, true), + c.labeler.getLabelsHostScopeReady(host, true), template.ObjectMeta.Labels, ), Annotations: util.MergeStringMapsOverwrite( From c83eba079c67bdb8f56759d21d34cbd9de5bc9b3 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 9 Feb 2021 18:02:25 +0300 Subject: [PATCH 72/78] dev: add host-scoped-ready --- pkg/model/labeler.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go index d4996bf43..2ca994070 100644 --- a/pkg/model/labeler.go +++ b/pkg/model/labeler.go @@ -255,6 +255,11 @@ func (l *Labeler) getLabelsHostScope(host *chi.ChiHost, applySupplementaryServic return l.appendCHILabels(labels) } +// getLabelsHostScopeReady gets labels for Host-scoped object including Ready label +func (l *Labeler) getLabelsHostScopeReady(host *chi.ChiHost, applySupplementaryServiceLabels bool) map[string]string { + return l.appendReadyLabels(l.getLabelsHostScope(host, applySupplementaryServiceLabels)) +} + // getSelectorShardScope gets labels to select a Host-scoped object func (l *Labeler) GetSelectorHostScope(host *chi.ChiHost) map[string]string { // Do not include CHI-provided labels From 2a6f0879abac0cfec6e53c48470ce716aaffec84 Mon Sep 17 00:00:00 2001 From: alz Date: Tue, 9 Feb 2021 23:58:21 +0300 Subject: [PATCH 73/78] Increase insert timeout --- tests/test_operator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_operator.py b/tests/test_operator.py index dc24d6269..cc3e55c3b 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -1313,7 +1313,7 @@ def test_025(): with Given("Create replicated table and populate it"): clickhouse.query(chi, create_table) clickhouse.query(chi, "CREATE TABLE test_distr as test_local Engine = Distributed('default', default, test_local)") - clickhouse.query(chi, f"INSERT INTO test_local select * from numbers({numbers})") + clickhouse.query(chi, f"INSERT INTO test_local select * from numbers({numbers})", timeout=120) with When("Add one more replica, but do not wait for completion"): kubectl.create_and_check( From 672a2a137b62578604f36a99b9fccc4afeb54d1e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 10 Feb 2021 00:03:47 +0300 Subject: [PATCH 74/78] dev: temples logger --- pkg/model/normalizer.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/model/normalizer.go b/pkg/model/normalizer.go index c57435370..6174e0926 100644 --- a/pkg/model/normalizer.go +++ b/pkg/model/normalizer.go @@ -70,9 +70,9 @@ func (n *Normalizer) CreateTemplatedCHI(chi *chiv1.ClickHouseInstallation, withD var useTemplates []chiv1.ChiUseTemplate if autoTemplates := n.chop.Config().FindAutoTemplates(); len(autoTemplates) > 0 { - log.V(1).Info("Adding %d auto-templates", len(autoTemplates)) + log.V(2).M(chi).F().Info("Found auto-templates: %d", len(autoTemplates)) for _, template := range autoTemplates { - log.V(1).Info("Add %s/%s auto-template", template.Name, template.Namespace) + log.V(2).M(chi).F().Info("Adding auto-template to merge list: %s/%s ", template.Name, template.Namespace) useTemplates = append(useTemplates, chiv1.ChiUseTemplate{ Name: template.Name, Namespace: template.Namespace, @@ -93,10 +93,10 @@ func (n *Normalizer) CreateTemplatedCHI(chi *chiv1.ClickHouseInstallation, withD for i := range useTemplates { useTemplate := &useTemplates[i] if template := n.chop.Config().FindTemplate(useTemplate, chi.Namespace); template == nil { - log.V(1).Info("UNABLE to find template %s/%s referenced in useTemplates. Skip it.", useTemplate.Namespace, useTemplate.Name) + log.V(1).M(chi).A().Warning("UNABLE to find template %s/%s referenced in useTemplates. Skip it.", useTemplate.Namespace, useTemplate.Name) } else { (&n.chi.Spec).MergeFrom(&template.Spec, chiv1.MergeTypeOverrideByNonEmptyValues) - log.V(2).Info("Merge template %s/%s referenced in useTemplates", useTemplate.Namespace, useTemplate.Name) + log.V(2).M(chi).F().Info("Merge template %s/%s referenced in useTemplates", useTemplate.Namespace, useTemplate.Name) } } From 1972011908067d2afb02ac5e29fc7ce0597641d9 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 11 Feb 2021 11:41:19 +0300 Subject: [PATCH 75/78] dev: use object-version label --- pkg/model/creator.go | 4 ++-- pkg/model/labeler.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 1c5b0af20..192d7d0a5 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -323,7 +323,7 @@ func (c *Creator) setupStatefulSetVersion(statefulSet *apps.StatefulSet) { statefulSet.Labels = util.MergeStringMapsOverwrite( statefulSet.Labels, map[string]string{ - LabelStatefulSetVersion: util.Fingerprint(statefulSet), + LabelObjectVersion: util.Fingerprint(statefulSet), }, ) c.a.V(2).F().Info("StatefulSet(%s/%s)\n%s", statefulSet.Namespace, statefulSet.Name, util.Dump(statefulSet)) @@ -335,7 +335,7 @@ func (c *Creator) GetStatefulSetVersion(statefulSet *apps.StatefulSet) (string, if statefulSet == nil { return "", false } - label, ok := statefulSet.Labels[LabelStatefulSetVersion] + label, ok := statefulSet.Labels[LabelObjectVersion] return label, ok } diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go index 2ca994070..2d6481cd0 100644 --- a/pkg/model/labeler.go +++ b/pkg/model/labeler.go @@ -62,7 +62,7 @@ const ( // Supplementary service labels - used to cooperate with k8s LabelZookeeperConfigVersion = clickhousealtinitycom.GroupName + "/zookeeper-version" LabelSettingsConfigVersion = clickhousealtinitycom.GroupName + "/settings-version" - LabelStatefulSetVersion = clickhousealtinitycom.GroupName + "/statefulset-version" + LabelObjectVersion = clickhousealtinitycom.GroupName + "/object-version" ) // Labeler is an entity which can label CHI artifacts From 7e60b8fc29f53960b47270b140250b3931b3d6d2 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 11 Feb 2021 11:55:10 +0300 Subject: [PATCH 76/78] dev: logger --- pkg/model/normalizer.go | 8 +++--- pkg/model/schemer.go | 58 ++++++++++++++++++++--------------------- 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/pkg/model/normalizer.go b/pkg/model/normalizer.go index 6174e0926..e985ccca1 100644 --- a/pkg/model/normalizer.go +++ b/pkg/model/normalizer.go @@ -70,9 +70,9 @@ func (n *Normalizer) CreateTemplatedCHI(chi *chiv1.ClickHouseInstallation, withD var useTemplates []chiv1.ChiUseTemplate if autoTemplates := n.chop.Config().FindAutoTemplates(); len(autoTemplates) > 0 { - log.V(2).M(chi).F().Info("Found auto-templates: %d", len(autoTemplates)) + log.V(2).M(chi).F().Info("Found auto-templates num: %d", len(autoTemplates)) for _, template := range autoTemplates { - log.V(2).M(chi).F().Info("Adding auto-template to merge list: %s/%s ", template.Name, template.Namespace) + log.V(3).M(chi).F().Info("Adding auto-template to merge list: %s/%s ", template.Name, template.Namespace) useTemplates = append(useTemplates, chiv1.ChiUseTemplate{ Name: template.Name, Namespace: template.Namespace, @@ -184,7 +184,7 @@ func (n *Normalizer) getHostTemplate(host *chiv1.ChiHost) *chiv1.ChiHostTemplate hostTemplate, ok := host.GetHostTemplate() if ok { // Host references known HostTemplate - log.V(2).Info("getHostTemplate() statefulSet %s use custom host template %s", statefulSetName, hostTemplate.Name) + log.V(2).M(host).F().Info("StatefulSet %s uses custom hostTemplate %s", statefulSetName, hostTemplate.Name) return hostTemplate } @@ -205,7 +205,7 @@ func (n *Normalizer) getHostTemplate(host *chiv1.ChiHost) *chiv1.ChiHostTemplate hostTemplate = newDefaultHostTemplate(statefulSetName) } - log.V(3).Info("getHostTemplate() statefulSet %s use default host template", statefulSetName) + log.V(3).M(host).F().Info("StatefulSet %s use default hostTemplate", statefulSetName) return hostTemplate } diff --git a/pkg/model/schemer.go b/pkg/model/schemer.go index 8460a7bd6..c7a9aa589 100644 --- a/pkg/model/schemer.go +++ b/pkg/model/schemer.go @@ -77,11 +77,11 @@ func (s *Schemer) getObjectListFromClickHouse(endpoints []string, sql string) ([ // One of specified services returned result, no need to iterate more break } else { - log.V(1).Info("Run query on: %s of %v FAILED skip to next. err: %v", endpoint, endpoints, err) + log.V(1).A().Warning("FAILED to run query on: %s of %v skip to next. err: %v", endpoint, endpoints, err) } } if err != nil { - log.V(1).Info("Run query FAILED on all %v", endpoints) + log.V(1).A().Error("FAILED to run query on all endpoints %v", endpoints) return nil, nil, err } @@ -94,7 +94,7 @@ func (s *Schemer) getObjectListFromClickHouse(endpoints []string, sql string) ([ names = append(names, name) statements = append(statements, statement) } else { - log.V(1).Info("UNABLE to scan row err: %v", err) + log.V(1).A().Error("UNABLE to scan row err: %v", err) } } @@ -107,7 +107,7 @@ func (s *Schemer) getCreateDistributedObjects(host *chop.ChiHost) ([]string, []s hosts := CreatePodFQDNsOfCluster(host.GetCluster()) nHosts := len(hosts) if nHosts <= 1 { - log.V(1).Info("Single host in a cluster. Nothing to create a schema from.") + log.V(1).M(host).F().Info("Single host in a cluster. Nothing to create a schema from.") return nil, nil, nil } @@ -122,7 +122,7 @@ func (s *Schemer) getCreateDistributedObjects(host *chop.ChiHost) ([]string, []s // remove new host from the list. See https://stackoverflow.com/questions/37334119/how-to-delete-an-element-from-a-slice-in-golang hosts[hostIndex] = hosts[nHosts-1] hosts = hosts[:nHosts-1] - log.V(1).Info("Extracting distributed table definitions from hosts: %v", hosts) + log.V(1).M(host).F().Info("Extracting distributed table definitions from hosts: %v", hosts) cluster_tables := fmt.Sprintf("remote('%s', system, tables)", strings.Join(hosts, ",")) @@ -171,28 +171,28 @@ func (s *Schemer) getCreateDistributedObjects(host *chop.ChiHost) ([]string, []s cluster_tables, )) - log.V(1).Info("fetch dbs list") - log.V(1).Info("dbs sql\n%v", sqlDBs) + log.V(1).M(host).F().Info("fetch dbs list") + log.V(1).M(host).F().Info("dbs sql\n%v", sqlDBs) names1, sqlStatements1, _ := s.getObjectListFromClickHouse(CreatePodFQDNsOfCHI(host.GetCHI()), sqlDBs) - log.V(1).Info("names1:") + log.V(1).M(host).F().Info("names1:") for _, v := range names1 { - log.V(1).Info("names1: %s", v) + log.V(1).M(host).F().Info("names1: %s", v) } - log.V(1).Info("sql1:") + log.V(1).M(host).F().Info("sql1:") for _, v := range sqlStatements1 { - log.V(1).Info("sql1: %s", v) + log.V(1).M(host).F().Info("sql1: %s", v) } - log.V(1).Info("fetch table list") - log.V(1).Info("tbl sql\n%v", sqlTables) + log.V(1).M(host).F().Info("fetch table list") + log.V(1).M(host).F().Info("tbl sql\n%v", sqlTables) names2, sqlStatements2, _ := s.getObjectListFromClickHouse(CreatePodFQDNsOfCHI(host.GetCHI()), sqlTables) - log.V(1).Info("names2:") + log.V(1).M(host).F().Info("names2:") for _, v := range names2 { - log.V(1).Info("names2: %s", v) + log.V(1).M(host).F().Info("names2: %s", v) } - log.V(1).Info("sql2:") + log.V(1).M(host).F().Info("sql2:") for _, v := range sqlStatements2 { - log.V(1).Info("sql2: %s", v) + log.V(1).M(host).F().Info("sql2: %s", v) } return append(names1, names2...), append(sqlStatements1, sqlStatements2...), nil @@ -213,19 +213,19 @@ func (s *Schemer) getCreateReplicaObjects(host *chop.ChiHost) ([]string, []strin } } if shard == nil { - log.V(1).Info("Can not find shard for replica") + log.V(1).M(host).F().Info("Can not find shard for replica") return nil, nil, nil } replicas := CreatePodFQDNsOfShard(shard) nReplicas := len(replicas) if nReplicas <= 1 { - log.V(1).Info("Single replica in a shard. Nothing to create a schema from.") + log.V(1).M(host).F().Info("Single replica in a shard. Nothing to create a schema from.") return nil, nil, nil } // remove new replica from the list. See https://stackoverflow.com/questions/37334119/how-to-delete-an-element-from-a-slice-in-golang replicas[replicaIndex] = replicas[nReplicas-1] replicas = replicas[:nReplicas-1] - log.V(1).Info("Extracting replicated table definitions from %v", replicas) + log.V(1).M(host).F().Info("Extracting replicated table definitions from %v", replicas) system_tables := fmt.Sprintf("remote('%s', system, tables)", strings.Join(replicas, ",")) @@ -273,28 +273,28 @@ func (s *Schemer) hostGetDropTables(host *chop.ChiHost) ([]string, []string, err // HostDeleteTables func (s *Schemer) HostDeleteTables(host *chop.ChiHost) error { tableNames, dropTableSQLs, _ := s.hostGetDropTables(host) - log.V(1).Info("Drop tables: %v as %v", tableNames, dropTableSQLs) + log.V(1).M(host).F().Info("Drop tables: %v as %v", tableNames, dropTableSQLs) return s.hostApplySQLs(host, dropTableSQLs, false) } // HostCreateTables func (s *Schemer) HostCreateTables(host *chop.ChiHost) error { - log.V(1).Info("Migrating schema objects to host %s", host.Address.HostName) + log.V(1).M(host).F().Info("Migrating schema objects to host %s", host.Address.HostName) var err1, err2 error if names, createSQLs, err := s.getCreateReplicaObjects(host); err == nil { if len(createSQLs) > 0 { - log.V(1).Info("Creating replica objects at %s: %v", host.Address.HostName, names) - log.V(1).Info("\n%v", createSQLs) + log.V(1).M(host).F().Info("Creating replica objects at %s: %v", host.Address.HostName, names) + log.V(1).M(host).F().Info("\n%v", createSQLs) err1 = s.hostApplySQLs(host, createSQLs, true) } } if names, createSQLs, err := s.getCreateDistributedObjects(host); err == nil { if len(createSQLs) > 0 { - log.V(1).Info("Creating distributed objects at %s: %v", host.Address.HostName, names) - log.V(1).Info("\n%v", createSQLs) + log.V(1).M(host).F().Info("Creating distributed objects at %s: %v", host.Address.HostName, names) + log.V(1).M(host).F().Info("\n%v", createSQLs) err2 = s.hostApplySQLs(host, createSQLs, true) } } @@ -361,7 +361,7 @@ func (s *Schemer) applySQLs(hosts []string, sqls []string, retry bool) error { for _, host := range hosts { conn := s.getCHConnection(host) if conn == nil { - log.V(1).Info("Unable to get conn to host %s", host) + log.V(1).M(host).F().Warning("Unable to get conn to host %s", host) continue } err := util.Retry(maxTries, "Applying sqls", func() error { @@ -373,7 +373,7 @@ func (s *Schemer) applySQLs(hosts []string, sqls []string, retry bool) error { } err := conn.Exec(sql) if err != nil && strings.Contains(err.Error(), "Code: 253,") && strings.Contains(sql, "CREATE TABLE") { - log.V(1).Info("Replica is already in ZooKeeper. Trying ATTACH TABLE instead") + log.V(1).M(host).F().Info("Replica is already in ZooKeeper. Trying ATTACH TABLE instead") sqlAttach := strings.ReplaceAll(sql, "CREATE TABLE", "ATTACH TABLE") err = conn.Exec(sqlAttach) } @@ -389,7 +389,7 @@ func (s *Schemer) applySQLs(hosts []string, sqls []string, retry bool) error { } return nil }, - log.V(1).Info, + log.V(1).M(host).F().Info, ) if err != nil { From c35bfe788530a44cf088887f882b0849ae28d27d Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 11 Feb 2021 12:55:58 +0300 Subject: [PATCH 77/78] dev: do not delete lables on zero-replicas StatefulSet --- pkg/controller/chi/pods.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pkg/controller/chi/pods.go b/pkg/controller/chi/pods.go index c71f369c4..72fbcc64b 100644 --- a/pkg/controller/chi/pods.go +++ b/pkg/controller/chi/pods.go @@ -39,6 +39,15 @@ func (c *Controller) appendLabelReady(host *chop.ChiHost) error { } func (c *Controller) deleteLabelReady(host *chop.ChiHost) error { + if host == nil { + return nil + } + if host.StatefulSet.Spec.Replicas != nil { + if *host.StatefulSet.Spec.Replicas == 0 { + return nil + } + } + pod, err := c.getPod(host) if err != nil { log.V(1).M(host).F().Info("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) From 8298522665a985704d5a1b7932263a26cefe0cf3 Mon Sep 17 00:00:00 2001 From: alz Date: Fri, 12 Feb 2021 14:43:34 +0300 Subject: [PATCH 78/78] Update tests to the newer testflows version --- tests/kubectl.py | 2 +- tests/requirements.txt | 3 +- tests/test.py | 8 ++-- tests/test_clickhouse.py | 6 +-- tests/test_metrics_exporter.py | 25 ++++++------ tests/test_operator.py | 72 +++++++++++++++++++--------------- 6 files changed, 61 insertions(+), 55 deletions(-) diff --git a/tests/kubectl.py b/tests/kubectl.py index 26fe99873..1d3d8c1e7 100644 --- a/tests/kubectl.py +++ b/tests/kubectl.py @@ -4,7 +4,7 @@ import manifest import util -from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module +from testflows.core import TestScenario, Name, When, Then, Given, And, main, Module from testflows.asserts import error from testflows.connect import Shell diff --git a/tests/requirements.txt b/tests/requirements.txt index 28afadbc5..0aeafdbee 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1,3 +1,2 @@ -testflows>=1.3.44,<1.4 -testflows.core>=1.3,<1.4 +testflows==1.6.72 PyYAML \ No newline at end of file diff --git a/tests/test.py b/tests/test.py index e4fee20b1..146a79705 100644 --- a/tests/test.py +++ b/tests/test.py @@ -4,7 +4,7 @@ import test_clickhouse import util -from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module, TE, args, Fail, Error +from testflows.core import TestScenario, Name, When, Then, Given, And, main, Scenario, Module, TE, args, Fail, Error from testflows.asserts import error if main(): @@ -77,9 +77,9 @@ for t in run_tests: if callable(t): - run(test=t) + Scenario(test=t)() else: - run(test=t[0], args=t[1]) + Scenario(test=t[0], args=t[1])() # python3 tests/test.py --only clickhouse* with Module("clickhouse"): @@ -94,4 +94,4 @@ # run_test = [test_ch_002] for t in run_test: - run(test=t) + Scenario(test=t)() diff --git a/tests/test_clickhouse.py b/tests/test_clickhouse.py index 01b0189d7..43ddccdab 100644 --- a/tests/test_clickhouse.py +++ b/tests/test_clickhouse.py @@ -3,13 +3,13 @@ import settings from test_operator import require_zookeeper -from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module, TE +from testflows.core import TestScenario, Name, When, Then, Given, And, main, Scenario, Module, TE from testflows.asserts import error @TestScenario @Name("test_ch_001. Insert quorum") -def test_ch_001(): +def test_ch_001(self): require_zookeeper() create_and_check( @@ -114,7 +114,7 @@ def test_ch_001(): @TestScenario @Name("test_ch_002. Row-level security") -def test_ch_002(): +def test_ch_002(self): create_and_check( "configs/test-ch-002-row-level.yaml", { diff --git a/tests/test_metrics_exporter.py b/tests/test_metrics_exporter.py index 412c1c40e..597cebfaa 100644 --- a/tests/test_metrics_exporter.py +++ b/tests/test_metrics_exporter.py @@ -2,7 +2,7 @@ import re import json -from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module, TE +from testflows.core import TestScenario, Name, When, Then, Given, And, main, Scenario, Module, TE from testflows.asserts import error import kubectl @@ -17,18 +17,18 @@ def set_metrics_exporter_version(version, ns=settings.operator_namespace): @TestScenario @Name("Check metrics server setup and version") -def test_metrics_exporter_setup(): +def test_metrics_exporter_setup(self): with Given("clickhouse-operator is installed"): assert kubectl.get_count("pod", ns='--all-namespaces', label="-l app=clickhouse-operator") > 0, error() - with And(f"Set metrics-exporter version {settings.operator_version}"): + with Then(f"Set metrics-exporter version {settings.operator_version}"): set_metrics_exporter_version(settings.operator_version) @TestScenario @Name("Check metrics server state after reboot") -def test_metrics_exporter_reboot(): +def test_metrics_exporter_reboot(self): def check_monitoring_chi(operator_namespace, operator_pod, expect_result, max_retries=10): - with And(f"metrics-exporter /chi enpoint result should return {expect_result}"): + with Then(f"metrics-exporter /chi enpoint result should return {expect_result}"): for i in range(1, max_retries): # check /metrics for try to refresh monitored instances kubectl.launch( @@ -89,9 +89,9 @@ def check_monitoring_chi(operator_namespace, operator_pod, expect_result, max_re @TestScenario @Name("Check metrics server help with different clickhouse version") -def test_metrics_exporter_with_multiple_clickhouse_version(): +def test_metrics_exporter_with_multiple_clickhouse_version(self): def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, max_retries=10): - with And(f"metrics-exporter /metrics enpoint result should match with {expect_result}"): + with Then(f"metrics-exporter /metrics enpoint result should match with {expect_result}"): for i in range(1, max_retries): out = kubectl.launch( f"exec {operator_pod} -c metrics-exporter -- wget -O- -q http://127.0.0.1:8888/metrics", @@ -127,9 +127,9 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma config=config, check={ "object_counts": { - "statefulset": 4, - "pod": 4, - "service": 5, + "statefulset": 2, + "pod": 2, + "service": 3, }, "do_not_delete": True, }) @@ -139,9 +139,6 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma '# TYPE chi_clickhouse_metric_VersionInteger gauge': True, 'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-0-0': True, 'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-1-0': True, - 'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-2-0': True, - 'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-3-0': True, - }) with Then("check empty /metrics after delete namespace"): @@ -159,4 +156,4 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma test_metrics_exporter_with_multiple_clickhouse_version, ] for t in test_cases: - run(test=t, flags=TE) + Scenario(test=t, flags=TE)() diff --git a/tests/test_operator.py b/tests/test_operator.py index cc3e55c3b..a3f682da1 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -6,13 +6,13 @@ import util import manifest -from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module, TE +from testflows.core import TestScenario, Name, When, Then, Given, And, main, Scenario, Module, TE from testflows.asserts import error @TestScenario @Name("test_001. 1 node") -def test_001(): +def test_001(self): kubectl.create_and_check( config="configs/test-001.yaml", check={ @@ -28,7 +28,7 @@ def test_001(): @TestScenario @Name("test_002. useTemplates for pod, volume templates, and distribution") -def test_002(): +def test_002(self): kubectl.create_and_check( config="configs/test-002-tpl.yaml", check={ @@ -49,7 +49,7 @@ def test_002(): @TestScenario @Name("test_003. 4 nodes with custom layout definition") -def test_003(): +def test_003(self): kubectl.create_and_check( config="configs/test-003-complex-layout.yaml", check={ @@ -64,7 +64,7 @@ def test_003(): @TestScenario @Name("test_004. Compatibility test if old syntax with volumeClaimTemplate is still supported") -def test_004(): +def test_004(self): kubectl.create_and_check( config="configs/test-004-tpl.yaml", check={ @@ -78,7 +78,7 @@ def test_004(): @TestScenario @Name("test_005. Test manifest created by ACM") -def test_005(): +def test_005(self): kubectl.create_and_check( config="configs/test-005-acm.yaml", check={ @@ -93,7 +93,7 @@ def test_005(): @TestScenario @Name("test_006. Test clickhouse version upgrade from one version to another using podTemplate change") -def test_006(): +def test_006(self): old_version = "yandex/clickhouse-server:20.8.6.6" new_version = "yandex/clickhouse-server:20.8.7.15" with Then("Create initial position"): @@ -128,7 +128,7 @@ def test_006(): @TestScenario @Name("test_007. Test template with custom clickhouse ports") -def test_007(): +def test_007(self): kubectl.create_and_check( config="configs/test-007-custom-ports.yaml", check={ @@ -210,7 +210,7 @@ def test_operator_restart(config, version=settings.operator_version): @TestScenario @Name("test_008. Test operator restart") -def test_008(): +def test_008(self): with Then("Test simple chi for operator restart"): test_operator_restart("configs/test-008-operator-restart-1.yaml") with Then("Test advanced chi for operator restart"): @@ -219,7 +219,7 @@ def test_008(): @TestScenario @Name("test_009. Test operator upgrade") -def test_009(version_from="0.11.0", version_to=settings.operator_version): +def test_009(self, version_from="0.11.0", version_to=settings.operator_version): with Then("Test simple chi for operator upgrade"): test_operator_upgrade("configs/test-009-operator-upgrade-1.yaml", version_from, version_to) with Then("Test advanced chi for operator upgrade"): @@ -255,7 +255,7 @@ def require_zookeeper(): @TestScenario @Name("test_010. Test zookeeper initialization") -def test_010(): +def test_010(self): set_operator_version(settings.operator_version) require_zookeeper() @@ -279,7 +279,7 @@ def test_010(): @TestScenario @Name("test_011. Test user security and network isolation") -def test_011(): +def test_011(self): with Given("test-011-secured-cluster.yaml and test-011-insecured-cluster.yaml"): kubectl.create_and_check( config="configs/test-011-secured-cluster.yaml", @@ -386,7 +386,7 @@ def test_011(): @TestScenario @Name("test_011_1. Test default user security") -def test_011_1(): +def test_011_1(self): with Given("test-011-secured-default.yaml with password_sha256_hex for default user"): kubectl.create_and_check( config="configs/test-011-secured-default.yaml", @@ -444,7 +444,7 @@ def test_011_1(): @TestScenario @Name("test_012. Test service templates") -def test_012(): +def test_012(self): kubectl.create_and_check( config="configs/test-012-service-template.yaml", check={ @@ -490,7 +490,7 @@ def test_012(): @TestScenario @Name("test_013. Test adding shards and creating local and distributed tables automatically") -def test_013(): +def test_013(self): config = "configs/test-013-add-shards-1.yaml" chi = manifest.get_chi_name(util.get_full_path(config)) cluster = "default" @@ -625,7 +625,7 @@ def test_013(): @TestScenario @Name("test_014. Test that replication works") -def test_014(): +def test_014(self): require_zookeeper() create_table = """ @@ -785,7 +785,7 @@ def test_014(): @TestScenario @Name("test_015. Test circular replication with hostNetwork") -def test_015(): +def test_015(self): kubectl.create_and_check( config="configs/test-015-host-network.yaml", check={ @@ -825,7 +825,7 @@ def test_015(): @TestScenario @Name("test_016. Test advanced settings options") -def test_016(): +def test_016(self): chi = "test-016-settings" kubectl.create_and_check( config="configs/test-016-settings-01.yaml", @@ -944,7 +944,7 @@ def test_016(): @TestScenario @Name("test_017. Test deployment of multiple versions in a cluster") -def test_017(): +def test_017(self): pod_count = 2 kubectl.create_and_check( config="configs/test-017-multi-version.yaml", @@ -982,7 +982,7 @@ def test_017(): @TestScenario @Name("test_018. Test that configuration is properly updated") -def test_018(): # Obsolete, covered by test_016 +def test_018(self): # Obsolete, covered by test_016 kubectl.create_and_check( config="configs/test-018-configmap.yaml", check={ @@ -1019,7 +1019,7 @@ def test_018(): # Obsolete, covered by test_016 @TestScenario @Name("test_019. Test that volume is correctly retained and can be re-attached") -def test_019(): +def test_019(self): require_zookeeper() config="configs/test-019-retain-volume.yaml" @@ -1063,7 +1063,7 @@ def test_019(): ) with Then("PVC should be re-mounted"): - with And("Non-replicated table should have data"): + with Then("Non-replicated table should have data"): out = clickhouse.query(chi, sql="select a from t1") assert out == "1" with And("Replicated table should have data"): @@ -1097,7 +1097,7 @@ def test_019(): ) with Then("Data should be in place"): - with And("Non-replicated table should have data"): + with Then("Non-replicated table should have data"): out = clickhouse.query(chi, sql="select a from t1") assert out == "1" with And("Replicated table should have data"): @@ -1110,7 +1110,8 @@ def test_019(): @TestScenario @Name("test_020. Test multi-volume configuration") -def test_020(config="configs/test-020-multi-volume.yaml"): +def test_020(self): + config="configs/test-020-multi-volume.yaml" chi = manifest.get_chi_name(util.get_full_path(config)) kubectl.create_and_check( config=config, @@ -1144,7 +1145,9 @@ def test_020(config="configs/test-020-multi-volume.yaml"): @TestScenario @Name("test_021. Test rescaling storage") -def test_021(config="configs/test-021-rescale-volume-01.yaml"): +def test_021(self): + config = "configs/test-021-rescale-volume-01.yaml" + with Given("Default storage class is expandable"): default_storage_class = kubectl.get_default_storage_class() assert default_storage_class is not None @@ -1198,6 +1201,7 @@ def test_021(config="configs/test-021-rescale-volume-01.yaml"): size = kubectl.get_pvc_size("disk1-chi-test-021-rescale-volume-simple-0-0-0") assert size == "200Mi" kubectl.wait_object("pvc", "disk2-chi-test-021-rescale-volume-simple-0-0-0") + kubectl.wait_field("pvc", "disk2-chi-test-021-rescale-volume-simple-0-0-0", ".status.phase", "Bound") size = kubectl.get_pvc_size("disk2-chi-test-021-rescale-volume-simple-0-0-0") assert size == "50Mi" @@ -1205,8 +1209,12 @@ def test_021(config="configs/test-021-rescale-volume-01.yaml"): kubectl.wait_pod_status("chi-test-021-rescale-volume-simple-0-0-0", "Running") # ClickHouse requires some time to mount volume. Race conditions. # TODO: wait for proper pod state and check the liveness probe probably. This is better than waiting - time.sleep(10) - out = clickhouse.query(chi, "SELECT count() FROM system.disks") + for i in range(8): + out = clickhouse.query(chi, "SELECT count() FROM system.disks") + if out == "2": + break + with Then(f"Not ready yet. Wait for {1<