From 981d1499fb950c2af0a7290de351cf1610b6ea96 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 2 Jun 2019 17:21:54 +0300 Subject: [PATCH 01/31] env: add PV and PVC in watch script --- manifests/dev/dev-watch.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/dev/dev-watch.sh b/manifests/dev/dev-watch.sh index 10db80efb..e08c9fe3f 100755 --- a/manifests/dev/dev-watch.sh +++ b/manifests/dev/dev-watch.sh @@ -4,4 +4,4 @@ CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" source ${CUR_DIR}/dev-config.sh -watch -n1 "kubectl -n ${CHOPERATOR_NAMESPACE} get all,configmap,endpoints" +watch -n1 "kubectl -n ${CHOPERATOR_NAMESPACE} get all,configmap,endpoints,pv,pvc" From 0c68203243521fa9ec6d9afd4927477f6fc05e1a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 4 Jun 2019 01:12:42 +0300 Subject: [PATCH 02/31] dev: initial PVC reclaim policy AND reconcile loop refactoring --- .../clickhouse.altinity.com/v1/type_chi.go | 19 ++ pkg/apis/clickhouse.altinity.com/v1/types.go | 23 +- pkg/controller/chi/controller.go | 4 +- pkg/controller/chi/creators.go | 65 ++-- pkg/controller/chi/deleters.go | 95 ++++-- pkg/controller/chi/getters.go | 22 +- pkg/model/ch_config_sections.go | 70 ++++ pkg/model/creator.go | 320 +++++++----------- pkg/model/labeler.go | 28 +- pkg/model/normalizer.go | 20 +- pkg/model/types.go | 5 +- 11 files changed, 375 insertions(+), 296 deletions(-) create mode 100644 pkg/model/ch_config_sections.go diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go index 27242276b..9de4ca996 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go @@ -208,6 +208,25 @@ func (chi *ClickHouseInstallation) WalkReplicas( return res } +func (chi *ClickHouseInstallation) WalkReplicasTillError( + f func(replica *ChiReplica) error, +) error { + for clusterIndex := range chi.Spec.Configuration.Clusters { + cluster := &chi.Spec.Configuration.Clusters[clusterIndex] + for shardIndex := range cluster.Layout.Shards { + shard := &cluster.Layout.Shards[shardIndex] + for replicaIndex := range shard.Replicas { + replica := &shard.Replicas[replicaIndex] + if err := f(replica); err != nil { + return err + } + } + } + } + + return nil +} + func (chi *ClickHouseInstallation) MergeFrom(from *ClickHouseInstallation) { if from == nil { return diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go index 4a0abd08e..173f48471 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse.altinity.com/v1/types.go @@ -166,8 +166,27 @@ type ChiPodTemplate struct { // ChiVolumeClaimTemplate defines PersistentVolumeClaim Template, directly used by StatefulSet type ChiVolumeClaimTemplate struct { - Name string `json:"name" yaml:"name"` - Spec corev1.PersistentVolumeClaimSpec `json:"spec" yaml:"spec"` + Name string `json:"name" yaml:"name"` + PVCReclaimPolicy PVCReclaimPolicy `json:"reclaimPolicy"` + Spec corev1.PersistentVolumeClaimSpec `json:"spec" yaml:"spec"` +} + +type PVCReclaimPolicy string + +const ( + PVCReclaimPolicyRetain PVCReclaimPolicy = "Retain" + PVCReclaimPolicyDelete PVCReclaimPolicy = "Delete" +) + +// isValid checks whether PVCReclaimPolicy is valid +func (v PVCReclaimPolicy) IsValid() bool { + switch v { + case PVCReclaimPolicyRetain: + return true + case PVCReclaimPolicyDelete: + return true + } + return false } // ChiDistributedDDL defines distributedDDL section of .spec.defaults diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index 2782bde56..d1bc57310 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -452,7 +452,7 @@ func (c *Controller) onAddChi(chi *chop.ClickHouseInstallation) error { } c.eventChi(chi, eventTypeNormal, eventActionCreate, eventReasonCreateInProgress, fmt.Sprintf("onAddChi(%s/%s) create objects", chi.Namespace, chi.Name)) - err = c.reconcileChi(chi) + err = c.reconcile(chi) if err != nil { glog.V(1).Infof("ClickHouseInstallation (%q): unable to create controlled resources: %q", chi.Name, err) c.eventChi(chi, eventTypeWarning, eventActionCreate, eventReasonCreateFailed, fmt.Sprintf("ClickHouseInstallation (%s): unable to create", chi.Name)) @@ -532,7 +532,7 @@ func (c *Controller) onUpdateChi(old, new *chop.ClickHouseInstallation) error { // Deal with added/updated items // c.listStatefulSetResources(chi) c.eventChi(old, eventTypeNormal, eventActionUpdate, eventReasonUpdateInProgress, fmt.Sprintf("onUpdateChi(%s/%s) update resources", old.Namespace, old.Name)) - if err := c.reconcileChi(new); err != nil { + if err := c.reconcile(new); err != nil { glog.V(1).Infof("reconcileChi() FAILED: %v", err) c.eventChi(old, eventTypeWarning, eventActionUpdate, eventReasonUpdateFailed, fmt.Sprintf("onUpdateChi(%s/%s) update resources failed", old.Namespace, old.Name)) } else { diff --git a/pkg/controller/chi/creators.go b/pkg/controller/chi/creators.go index 1bda5bab9..373590c98 100644 --- a/pkg/controller/chi/creators.go +++ b/pkg/controller/chi/creators.go @@ -29,38 +29,21 @@ import ( ) // reconcileChi reconciles ClickHouseInstallation -func (c *Controller) reconcileChi(chi *chop.ClickHouseInstallation) error { - creator := chopmodel.NewCreator(chi, c.chopConfig, c.version) - listOfObjectsLists := creator.CreateObjects() - - for i := range listOfObjectsLists { - switch listOfObjectsLists[i].(type) { - case chopmodel.ServiceList: - for j := range listOfObjectsLists[i].(chopmodel.ServiceList) { - if err := c.reconcileService(listOfObjectsLists[i].(chopmodel.ServiceList)[j]); err != nil { - return err - } - } - case chopmodel.ConfigMapList: - for j := range listOfObjectsLists[i].(chopmodel.ConfigMapList) { - if err := c.reconcileConfigMap(listOfObjectsLists[i].(chopmodel.ConfigMapList)[j]); err != nil { - return err - } - } - case chopmodel.StatefulSetList: - for j := range listOfObjectsLists[i].(chopmodel.StatefulSetList) { - if err := c.reconcileStatefulSet(listOfObjectsLists[i].(chopmodel.StatefulSetList)[j]); err != nil { - return err - } - } - } - } - - return nil +func (c *Controller) reconcile(chi *chop.ClickHouseInstallation) error { + creator := chopmodel.NewCreator( + chi, + c.chopConfig, + c.version, + &chopmodel.ReconcileFuncs{ + ConfigMap: c.ReconcileConfigMap, + Service: c.ReconcileService, + StatefulSet: c.ReconcileStatefulSet, + }) + return creator.Reconcile() } // reconcileConfigMap reconciles core.ConfigMap -func (c *Controller) reconcileConfigMap(configMap *core.ConfigMap) error { +func (c *Controller) ReconcileConfigMap(configMap *core.ConfigMap) error { // Check whether object with such name already exists in k8s curConfigMap, err := c.getConfigMap(&configMap.ObjectMeta) @@ -89,7 +72,7 @@ func (c *Controller) reconcileConfigMap(configMap *core.ConfigMap) error { } // reconcileService reconciles core.Service -func (c *Controller) reconcileService(service *core.Service) error { +func (c *Controller) ReconcileService(service *core.Service) error { // Check whether object with such name already exists in k8s curService, err := c.getService(&service.ObjectMeta) @@ -113,7 +96,7 @@ func (c *Controller) reconcileService(service *core.Service) error { } // reconcileStatefulSet reconciles apps.StatefulSet -func (c *Controller) reconcileStatefulSet(newStatefulSet *apps.StatefulSet) error { +func (c *Controller) ReconcileStatefulSet(newStatefulSet *apps.StatefulSet, replica *chop.ChiReplica) error { // Check whether object with such name already exists in k8s curStatefulSet, err := c.getStatefulSet(&newStatefulSet.ObjectMeta) @@ -124,14 +107,14 @@ func (c *Controller) reconcileStatefulSet(newStatefulSet *apps.StatefulSet) erro if apierrors.IsNotFound(err) { // StatefulSet with such name not found - create StatefulSet - return c.createStatefulSet(newStatefulSet) + return c.createStatefulSet(newStatefulSet, replica) } // Error has happened with .Get() return err } -func (c *Controller) createStatefulSet(statefulSet *apps.StatefulSet) error { +func (c *Controller) createStatefulSet(statefulSet *apps.StatefulSet, replica *chop.ChiReplica) error { if statefulSet, err := c.kubeClient.AppsV1().StatefulSets(statefulSet.Namespace).Create(statefulSet); err != nil { return err } else if err := c.waitStatefulSetGeneration(statefulSet.Namespace, statefulSet.Name, statefulSet.Generation); err == nil { @@ -139,10 +122,8 @@ func (c *Controller) createStatefulSet(statefulSet *apps.StatefulSet) error { return nil } else { // Unable to reach target generation, StatefulSet create failed, time to rollback? - return c.onStatefulSetCreateFailed(statefulSet) + return c.onStatefulSetCreateFailed(statefulSet, replica) } - - return errors.New("createStatefulSet() - unknown position") } func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStatefulSet *apps.StatefulSet) error { @@ -177,8 +158,6 @@ func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStat // Unable to reach target generation, StatefulSet update failed, time to rollback? return c.onStatefulSetUpdateFailed(oldStatefulSet) } - - return errors.New("updateStatefulSet() - unknown position") } // waitStatefulSetGeneration polls StatefulSet for reaching target generation @@ -216,13 +195,11 @@ func (c *Controller) waitStatefulSetGeneration(namespace, name string, targetGen return errors.New(fmt.Sprintf("waitStatefulSetGeneration(%s/%s) - wait timeout", namespace, name)) } } - - return errors.New(fmt.Sprintf("waitStatefulSetGeneration(%s/%s) - unknown position", namespace, name)) } // onStatefulSetCreateFailed handles situation when StatefulSet create failed // It can just delete failed StatefulSet or do nothing -func (c *Controller) onStatefulSetCreateFailed(failedStatefulSet *apps.StatefulSet) error { +func (c *Controller) onStatefulSetCreateFailed(failedStatefulSet *apps.StatefulSet, replica *chop.ChiReplica) error { // Convenience shortcuts namespace := failedStatefulSet.Namespace name := failedStatefulSet.Name @@ -237,14 +214,12 @@ func (c *Controller) onStatefulSetCreateFailed(failedStatefulSet *apps.StatefulS case config.OnStatefulSetCreateFailureActionDelete: // Delete gracefully problematic failed StatefulSet glog.V(1).Infof("onStatefulSetCreateFailed(%s/%s) - going to DELETE FAILED StatefulSet", namespace, name) - c.statefulSetDelete(namespace, name) + _ = c.deleteReplica(replica) return c.shouldContinueOnCreateFailed() default: glog.V(1).Infof("Unknown c.chopConfig.OnStatefulSetCreateFailureAction=%s", c.chopConfig.OnStatefulSetCreateFailureAction) return nil } - - return errors.New(fmt.Sprintf("onStatefulSetCreateFailed(%s/%s) - unknown position", namespace, name)) } // onStatefulSetUpdateFailed handles situation when StatefulSet update failed @@ -283,8 +258,6 @@ func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.Statefu glog.V(1).Infof("Unknown c.chopConfig.OnStatefulSetUpdateFailureAction=%s", c.chopConfig.OnStatefulSetUpdateFailureAction) return nil } - - return errors.New(fmt.Sprintf("onStatefulSetUpdateFailed(%s/%s) - unknown position", namespace, name)) } // shouldContinueOnCreateFailed return nil in case 'continue' or error in case 'do not continue' diff --git a/pkg/controller/chi/deleters.go b/pkg/controller/chi/deleters.go index 0136bc69e..047aae392 100644 --- a/pkg/controller/chi/deleters.go +++ b/pkg/controller/chi/deleters.go @@ -34,11 +34,13 @@ func newDeleteOptions() *metav1.DeleteOptions { } // deleteTablesOnReplica deletes ClickHouse tables on replica before replica is deleted -func (c *Controller) deleteTablesOnReplica(replica *chop.ChiReplica) { +func (c *Controller) deleteTablesOnReplica(replica *chop.ChiReplica) error { // Delete tables on replica tableNames, dropTableSQLs, _ := c.schemer.ReplicaGetDropTables(replica) glog.V(1).Infof("Drop tables: %v as %v", tableNames, dropTableSQLs) _ = c.schemer.ReplicaApplySQLs(replica, dropTableSQLs, true) + + return nil } // deleteReplica deletes all kubernetes resources related to replica *chop.ChiReplica @@ -46,33 +48,17 @@ func (c *Controller) deleteReplica(replica *chop.ChiReplica) error { // Each replica consists of // 1. Tables on replica - we need to delete tables on replica in order to clean Zookeeper data // 2. StatefulSet - // 3. ConfigMap - // 4. Service + // 3. PersistentVolumeClaim + // 4. ConfigMap + // 5. Service // Need to delete all these item + glog.V(1).Infof("Start delete replica %s/%s", replica.Address.ClusterName, replica.Name) - c.deleteTablesOnReplica(replica) - - namespace := replica.Address.Namespace - - // Delete StatefulSet - statefulSetName := chopmodel.CreateStatefulSetName(replica) - c.statefulSetDelete(namespace, statefulSetName) - - // Delete ConfigMap - configMapName := chopmodel.CreateConfigMapPodName(replica) - if err := c.kubeClient.CoreV1().ConfigMaps(namespace).Delete(configMapName, newDeleteOptions()); err == nil { - glog.V(1).Infof("ConfigMap %s/%s deleted", namespace, configMapName) - } else { - glog.V(1).Infof("ConfigMap %s/%s delete FAILED %v", namespace, configMapName, err) - } - - // Delete Service - statefulSetServiceName := chopmodel.CreateStatefulSetServiceName(replica) - if err := c.kubeClient.CoreV1().Services(namespace).Delete(statefulSetServiceName, newDeleteOptions()); err == nil { - glog.V(1).Infof("Service %s/%s deleted", namespace, statefulSetServiceName) - } else { - glog.V(1).Infof("Service %s/%s delete FAILED %v", namespace, statefulSetServiceName, err) - } + _ = c.deleteTablesOnReplica(replica) + _ = c.statefulSetDelete(replica) + _ = c.persistentVolumeClaimDelete(replica) + _ = c.configMapDelete(replica) + _ = c.serviceDelete(replica) return nil } @@ -84,11 +70,13 @@ func (c *Controller) deleteShard(shard *chop.ChiShard) { // deleteCluster deletes all kubernetes resources related to cluster *chop.ChiCluster func (c *Controller) deleteCluster(cluster *chop.ChiCluster) { + glog.V(1).Infof("Start delete cluster %s", cluster.Name) cluster.WalkReplicas(c.deleteReplica) } // deleteChi deletes all kubernetes resources related to chi *chop.ClickHouseInstallation func (c *Controller) deleteChi(chi *chop.ClickHouseInstallation) { + // Delete all clusters chi.WalkClusters(func(cluster *chop.ChiCluster) error { c.deleteCluster(cluster) return nil @@ -137,12 +125,18 @@ func (c *Controller) statefulSetDeletePod(statefulSet *apps.StatefulSet) error { } // statefulSetDelete gracefully deletes StatefulSet through zeroing Pod's count -func (c *Controller) statefulSetDelete(namespace, name string) error { +func (c *Controller) statefulSetDelete(replica *chop.ChiReplica) error { // IMPORTANT // StatefulSets do not provide any guarantees on the termination of pods when a StatefulSet is deleted. // To achieve ordered and graceful termination of the pods in the StatefulSet, // it is possible to scale the StatefulSet down to 0 prior to deletion. + // Namespaced name + name := chopmodel.CreateStatefulSetName(replica) + namespace := replica.Address.Namespace + + glog.V(1).Infof("statefulSetDelete(%s/%s)", namespace, name) + statefulSet, err := c.statefulSetLister.StatefulSets(namespace).Get(name) if err != nil { glog.V(1).Infof("error get StatefulSet %s/%s", namespace, name) @@ -164,3 +158,50 @@ func (c *Controller) statefulSetDelete(namespace, name string) error { return nil } + +// persistentVolumeClaimDelete deletes PersistentVolumeClaim +func (c *Controller) persistentVolumeClaimDelete(replica *chop.ChiReplica) error { + name := "volumeclaim-template-" + chopmodel.CreatePodName(replica) + namespace := replica.Address.Namespace + + glog.V(1).Infof("persistentVolumeClaimDelete(%s/%s)", namespace, name) + + if err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Delete(name, newDeleteOptions()); err == nil { + glog.V(1).Infof("PersistentVolumeClaim %s/%s deleted", namespace, name) + } else { + glog.V(1).Infof("PersistentVolumeClaim %s/%s FAILED TO DELETE %v", namespace, name, err) + } + + return nil +} + +// configMapDelete deletes ConfigMap +func (c *Controller) configMapDelete(replica *chop.ChiReplica) error { + name := chopmodel.CreateConfigMapPodName(replica) + namespace := replica.Address.Namespace + + glog.V(1).Infof("configMapDelete(%s/%s)", namespace, name) + + if err := c.kubeClient.CoreV1().ConfigMaps(namespace).Delete(name, newDeleteOptions()); err == nil { + glog.V(1).Infof("ConfigMap %s/%s deleted", namespace, name) + } else { + glog.V(1).Infof("ConfigMap %s/%s delete FAILED %v", namespace, name, err) + } + + return nil +} + +// serviceDelete deletes Service +func (c *Controller) serviceDelete(replica *chop.ChiReplica) error { + name := chopmodel.CreateStatefulSetServiceName(replica) + namespace := replica.Address.Namespace + + glog.V(1).Infof("serviceDelete(%s/%s)", namespace, name) + + if err := c.kubeClient.CoreV1().Services(namespace).Delete(name, newDeleteOptions()); err == nil { + glog.V(1).Infof("Service %s/%s deleted", namespace, name) + } else { + glog.V(1).Infof("Service %s/%s delete FAILED %v", namespace, name, err) + } + return nil +} diff --git a/pkg/controller/chi/getters.go b/pkg/controller/chi/getters.go index 7def88cb5..a16f23e82 100644 --- a/pkg/controller/chi/getters.go +++ b/pkg/controller/chi/getters.go @@ -23,7 +23,7 @@ import ( meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - chi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopmodel "github.com/altinity/clickhouse-operator/pkg/model" ) @@ -101,16 +101,20 @@ func (c *Controller) getStatefulSet(obj *meta.ObjectMeta) (*apps.StatefulSet, er if apierrors.IsNotFound(err) { // Object with such name not found // Try to find by labels - if set, err := chopmodel.GetSelectorReplicaFromObjectMeta(obj); err == nil { + if set, err := chopmodel.GetSelectorReplicaFromObjectMeta(obj); err != nil { + return nil, err + } else { selector := labels.SelectorFromSet(set) - - objects, err := c.statefulSetLister.StatefulSets(obj.Namespace).List(selector) - if err != nil { + if objects, err := c.statefulSetLister.StatefulSets(obj.Namespace).List(selector); err != nil { return nil, err - } - if len(objects) == 1 { + } else if len(objects) == 1 { // Object found by labels return objects[0], nil + } else if len(objects) > 1 { + // Object found by labels + return nil, fmt.Errorf("ERROR too much objects returned by selector") + } else { + // Zero? Fall through and return IsNotFound() error } } } @@ -120,7 +124,7 @@ func (c *Controller) getStatefulSet(obj *meta.ObjectMeta) (*apps.StatefulSet, er } // TODO move labels into models modules -func (c *Controller) createChiFromObjectMeta(objectMeta *meta.ObjectMeta) (*chi.ClickHouseInstallation, error) { +func (c *Controller) createChiFromObjectMeta(objectMeta *meta.ObjectMeta) (*chiv1.ClickHouseInstallation, error) { // Parse Labels // Labels: map[string]string{ // labelChop: AppVersion, @@ -155,7 +159,7 @@ func (c *Controller) createChiFromObjectMeta(objectMeta *meta.ObjectMeta) (*chi. } // TODO move labels into models modules -func (c *Controller) createClusterFromObjectMeta(objectMeta *meta.ObjectMeta) (*chi.ChiCluster, error) { +func (c *Controller) createClusterFromObjectMeta(objectMeta *meta.ObjectMeta) (*chiv1.ChiCluster, error) { // Parse Labels // Labels: map[string]string{ // labelChop: AppVersion, diff --git a/pkg/model/ch_config_sections.go b/pkg/model/ch_config_sections.go new file mode 100644 index 000000000..a4b9f834c --- /dev/null +++ b/pkg/model/ch_config_sections.go @@ -0,0 +1,70 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + "github.com/altinity/clickhouse-operator/pkg/config" + "github.com/altinity/clickhouse-operator/pkg/util" +) + +func NewConfigSections(chConfigGenerator *ClickHouseConfigGenerator, chopConfig *config.Config) *configSections { + return &configSections{ + commonConfigSections: make(map[string]string), + commonUsersConfigSections: make(map[string]string), + chConfigGenerator: chConfigGenerator, + chopConfig: chopConfig, + } +} + +func (c *configSections) CreateConfigsCommon() { + // commonConfigSections maps section name to section XML chopConfig of the following sections: + // 1. remote servers + // 2. zookeeper + // 3. settings + util.IncludeNonEmpty(c.commonConfigSections, filenameRemoteServersXML, c.chConfigGenerator.GetRemoteServers()) + util.IncludeNonEmpty(c.commonConfigSections, filenameZookeeperXML, c.chConfigGenerator.GetZookeeper()) + util.IncludeNonEmpty(c.commonConfigSections, filenameSettingsXML, c.chConfigGenerator.GetSettings()) + // Extra user-specified configs + for filename, content := range c.chopConfig.ChCommonConfigs { + util.IncludeNonEmpty(c.commonConfigSections, filename, content) + } +} + +func (c *configSections) CreateConfigsUsers() { + // commonConfigSections maps section name to section XML chopConfig of the following sections: + // 1. users + // 2. quotas + // 3. profiles + util.IncludeNonEmpty(c.commonUsersConfigSections, filenameUsersXML, c.chConfigGenerator.GetUsers()) + util.IncludeNonEmpty(c.commonUsersConfigSections, filenameQuotasXML, c.chConfigGenerator.GetQuotas()) + util.IncludeNonEmpty(c.commonUsersConfigSections, filenameProfilesXML, c.chConfigGenerator.GetProfiles()) + // Extra user-specified configs + for filename, content := range c.chopConfig.ChUsersConfigs { + util.IncludeNonEmpty(c.commonUsersConfigSections, filename, content) + } +} + +func (c *configSections) CreateConfigsPod(replica *v1.ChiReplica) map[string]string { + // Prepare for this replica deployment chopConfig files map as filename->content + podConfigSections := make(map[string]string) + util.IncludeNonEmpty(podConfigSections, filenameMacrosXML, c.chConfigGenerator.GetHostMacros(replica)) + // Extra user-specified configs + for filename, content := range c.chopConfig.ChPodConfigs { + util.IncludeNonEmpty(podConfigSections, filename, content) + } + + return podConfigSections +} diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 8c1640032..18983dde7 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -17,33 +17,51 @@ package model import ( chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/config" - "github.com/altinity/clickhouse-operator/pkg/util" - "github.com/golang/glog" + apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/golang/glog" ) // Creator is the base struct to create k8s objects type Creator struct { - appVersion string - chi *chiv1.ClickHouseInstallation - chopConfig *config.Config - chConfigGenerator *ClickHouseConfigGenerator + appVersion string + chi *chiv1.ClickHouseInstallation + chopConfig *config.Config + chConfigGenerator *ClickHouseConfigGenerator + chConfigSectionsGenerator *configSections + labeler *Labeler podTemplatesIndex podTemplatesIndex volumeClaimTemplatesIndex volumeClaimTemplatesIndex + reconcile *ReconcileFuncs +} + +type ReconcileFuncs struct { + ConfigMap func(configMap *corev1.ConfigMap) error + Service func(service *corev1.Service) error + StatefulSet func(newStatefulSet *apps.StatefulSet, replica *chiv1.ChiReplica) error } // NewCreator creates new creator -func NewCreator(chi *chiv1.ClickHouseInstallation, chopConfig *config.Config, appVersion string) *Creator { +func NewCreator( + chi *chiv1.ClickHouseInstallation, + chopConfig *config.Config, + appVersion string, + reconcile *ReconcileFuncs, +) *Creator { creator := &Creator{ chi: chi, chopConfig: chopConfig, appVersion: appVersion, chConfigGenerator: NewClickHouseConfigGenerator(chi), + labeler: NewLabeler(appVersion, chi), + reconcile: reconcile, } + creator.chConfigSectionsGenerator = NewConfigSections(creator.chConfigGenerator, creator.chopConfig) creator.createPodTemplatesIndex() creator.createVolumeClaimTemplatesIndex() @@ -51,186 +69,29 @@ func NewCreator(chi *chiv1.ClickHouseInstallation, chopConfig *config.Config, ap } // ChiCreateObjects returns a map of the k8s objects created based on ClickHouseInstallation Object properties -func (c *Creator) CreateObjects() []interface{} { - list := make([]interface{}, 0) - list = append(list, c.createServiceObjects()) - list = append(list, c.createConfigMapObjects()) - list = append(list, c.createStatefulSetObjects()) - - return list -} - -// createConfigMapObjects returns a list of corev1.ConfigMap objects -func (c *Creator) createConfigMapObjects() ConfigMapList { - configMapList := make(ConfigMapList, 0) - configMapList = append( - configMapList, - c.createConfigMapObjectsCommon()..., - ) - configMapList = append( - configMapList, - c.createConfigMapObjectsPod()..., - ) - - return configMapList -} - -// createConfigMapObjectsCommon returns a list of corev1.ConfigMap objects -func (c *Creator) createConfigMapObjectsCommon() ConfigMapList { - var configs configSections - - // commonConfigSections maps section name to section XML chopConfig of the following sections: - // 1. remote servers - // 2. zookeeper - // 3. settings - configs.commonConfigSections = make(map[string]string) - util.IncludeNonEmpty(configs.commonConfigSections, filenameRemoteServersXML, c.chConfigGenerator.GetRemoteServers()) - util.IncludeNonEmpty(configs.commonConfigSections, filenameZookeeperXML, c.chConfigGenerator.GetZookeeper()) - util.IncludeNonEmpty(configs.commonConfigSections, filenameSettingsXML, c.chConfigGenerator.GetSettings()) - // Extra user-specified configs - for filename, content := range c.chopConfig.ChCommonConfigs { - util.IncludeNonEmpty(configs.commonConfigSections, filename, content) +func (c *Creator) Reconcile() error { + if err := c.reconcileServiceChi(CreateChiServiceName(c.chi)); err != nil { + return err } - // commonConfigSections maps section name to section XML chopConfig of the following sections: - // 1. users - // 2. quotas - // 3. profiles - configs.commonUsersConfigSections = make(map[string]string) - util.IncludeNonEmpty(configs.commonUsersConfigSections, filenameUsersXML, c.chConfigGenerator.GetUsers()) - util.IncludeNonEmpty(configs.commonUsersConfigSections, filenameQuotasXML, c.chConfigGenerator.GetQuotas()) - util.IncludeNonEmpty(configs.commonUsersConfigSections, filenameProfilesXML, c.chConfigGenerator.GetProfiles()) - // Extra user-specified configs - for filename, content := range c.chopConfig.ChUsersConfigs { - util.IncludeNonEmpty(configs.commonUsersConfigSections, filename, content) + if err := c.reconcileConfigMapsChi(); err != nil { + return err } - // There are two types of configs, kept in ConfigMaps: - // 1. Common configs - for all resources in the CHI (remote servers, zookeeper setup, etc) - // consists of common configs and common users configs - // 2. Personal configs - macros chopConfig - // configMapList contains all configs so we need deploymentsNum+2 ConfigMap objects - // personal chopConfig for each deployment and +2 for common chopConfig + common user chopConfig - configMapList := make(ConfigMapList, 0) - - // ConfigMap common for all resources in CHI - // contains several sections, mapped as separated chopConfig files, - // such as remote servers, zookeeper setup, etc - configMapList = append( - configMapList, - &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: CreateConfigMapCommonName(c.chi), - Namespace: c.chi.Namespace, - Labels: c.getLabelsCommonObject(), - }, - // Data contains several sections which are to be several xml chopConfig files - Data: configs.commonConfigSections, - }, - ) - - // ConfigMap common for all users resources in CHI - configMapList = append( - configMapList, - &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: CreateConfigMapCommonUsersName(c.chi), - Namespace: c.chi.Namespace, - Labels: c.getLabelsCommonObject(), - }, - // Data contains several sections which are to be several xml chopConfig files - Data: configs.commonUsersConfigSections, - }, - ) - - return configMapList -} - -// createConfigMapObjectsPod returns a list of corev1.ConfigMap objects -func (c *Creator) createConfigMapObjectsPod() ConfigMapList { - configMapList := make(ConfigMapList, 0) - replicaProcessor := func(replica *chiv1.ChiReplica) error { - // Prepare for this replica deployment chopConfig files map as filename->content - podConfigSections := make(map[string]string) - util.IncludeNonEmpty(podConfigSections, filenameMacrosXML, c.chConfigGenerator.GetHostMacros(replica)) - // Extra user-specified configs - for filename, content := range c.chopConfig.ChPodConfigs { - util.IncludeNonEmpty(podConfigSections, filename, content) - } - - // Add corev1.ConfigMap object to the list - configMapList = append( - configMapList, - &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: CreateConfigMapPodName(replica), - Namespace: replica.Address.Namespace, - Labels: c.getLabelsReplica(replica, false), - }, - Data: podConfigSections, - }, - ) - - return nil + if err := c.reconcileReplicas(); err != nil { + return err } - c.chi.WalkReplicas(replicaProcessor) - - return configMapList + return nil } -// createServiceObjects returns a list of corev1.Service objects -func (c *Creator) createServiceObjects() ServiceList { - // We'd like to create "number of deployments" + 1 kubernetes services in order to provide access - // to each deployment separately and one common predictably-named access point - common service - serviceList := make(ServiceList, 0) - serviceList = append( - serviceList, - c.createServiceObjectsCommon()..., - ) - serviceList = append( - serviceList, - c.createServiceObjectsPod()..., - ) - - return serviceList -} - -func (c *Creator) createServiceObjectsCommon() ServiceList { - // Create one predictably-named service to access the whole installation - // NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - // service/clickhouse-replcluster ClusterIP None 9000/TCP,9009/TCP,8123/TCP 1h - return ServiceList{ - c.createServiceObjectChi(CreateChiServiceName(c.chi)), - } -} - -func (c *Creator) createServiceObjectsPod() ServiceList { - // Create "number of pods" service - one service for each stateful set - // Each replica has its stateful set and each stateful set has it service - // NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - // service/chi-01a1ce7dce-2 ClusterIP None 9000/TCP,9009/TCP,8123/TCP 1h - serviceList := make(ServiceList, 0) - - replicaProcessor := func(replica *chiv1.ChiReplica) error { - // Add corev1.Service object to the list - serviceList = append( - serviceList, - c.createServiceObjectForStatefulSet(replica), - ) - return nil - } - c.chi.WalkReplicas(replicaProcessor) - - return serviceList -} +func (c *Creator) reconcileServiceChi(serviceName string) error { + glog.V(1).Infof("reconcileServiceObjectChi() for service %s", serviceName) -func (c *Creator) createServiceObjectChi(serviceName string) *corev1.Service { - glog.V(1).Infof("createServiceObjectChi() for service %s", serviceName) - return &corev1.Service{ + service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: c.chi.Namespace, - Labels: c.getLabelsCommonObject(), + Labels: c.labeler.getLabelsCommonObject(), }, Spec: corev1.ServiceSpec{ // ClusterIP: templateDefaultsServiceClusterIP, @@ -244,22 +105,88 @@ func (c *Creator) createServiceObjectChi(serviceName string) *corev1.Service { Port: chDefaultClientPortNumber, }, }, - Selector: c.getSelectorCommonObject(), + Selector: c.labeler.getSelectorCommonObject(), Type: "LoadBalancer", }, } + + return c.reconcile.Service(service) } -func (c *Creator) createServiceObjectForStatefulSet(replica *chiv1.ChiReplica) *corev1.Service { +// reconcileConfigMapObjectsChi returns a list of corev1.ConfigMap objects +func (c *Creator) reconcileConfigMapsChi() error { + c.chConfigSectionsGenerator.CreateConfigsUsers() + c.chConfigSectionsGenerator.CreateConfigsCommon() + + // ConfigMap common for all resources in CHI + // contains several sections, mapped as separated chopConfig files, + // such as remote servers, zookeeper setup, etc + configMapCommon := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: CreateConfigMapCommonName(c.chi), + Namespace: c.chi.Namespace, + Labels: c.labeler.getLabelsCommonObject(), + }, + // Data contains several sections which are to be several xml chopConfig files + Data: c.chConfigSectionsGenerator.commonConfigSections, + } + if err := c.reconcile.ConfigMap(configMapCommon); err != nil { + return err + } + + // ConfigMap common for all users resources in CHI + configMapUsers := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: CreateConfigMapCommonUsersName(c.chi), + Namespace: c.chi.Namespace, + Labels: c.labeler.getLabelsCommonObject(), + }, + // Data contains several sections which are to be several xml chopConfig files + Data: c.chConfigSectionsGenerator.commonUsersConfigSections, + } + if err := c.reconcile.ConfigMap(configMapUsers); err != nil { + return err + } + + return nil +} + +func (c *Creator) reconcileReplicas() error { + replicaProcessor := func(replica *chiv1.ChiReplica) error { + // Add replica's Service + service := c.createService(replica) + if err := c.reconcile.Service(service); err != nil { + return err + } + + // Add replica's ConfigMap + configMap := c.createConfigMap(replica) + if err := c.reconcile.ConfigMap(configMap); err != nil { + return err + } + + // Add replica's StatefulSet + statefulSet := c.createStatefulSet(replica) + if err := c.reconcile.StatefulSet(statefulSet, replica); err != nil { + return err + } + + return nil + } + + return c.chi.WalkReplicasTillError(replicaProcessor) +} + +func (c *Creator) createService(replica *chiv1.ChiReplica) *corev1.Service { serviceName := CreateStatefulSetServiceName(replica) statefulSetName := CreateStatefulSetName(replica) - glog.V(1).Infof("createServiceObjectForStatefulSet() for service %s %s", serviceName, statefulSetName) + glog.V(1).Infof("createService(%s):%s", serviceName, statefulSetName) return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: replica.Address.Namespace, - Labels: c.getLabelsReplica(replica, false), + Labels: c.labeler.getLabelsReplica(replica, false), }, Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ @@ -276,32 +203,25 @@ func (c *Creator) createServiceObjectForStatefulSet(replica *chiv1.ChiReplica) * Port: chDefaultInterServerPortNumber, }, }, - Selector: c.getSelectorReplica(replica), + Selector: c.labeler.getSelectorReplica(replica), ClusterIP: templateDefaultsServiceClusterIP, Type: "ClusterIP", }, } } -// createStatefulSetObjects returns a list of apps.StatefulSet objects -func (c *Creator) createStatefulSetObjects() StatefulSetList { - statefulSetList := make(StatefulSetList, 0) - - // Create list of apps.StatefulSet objects - // StatefulSet is created for each replica.Deployment - - replicaProcessor := func(replica *chiv1.ChiReplica) error { - glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s", CreateStatefulSetName(replica)) - // Append new StatefulSet to the list of stateful sets - statefulSetList = append(statefulSetList, c.createStatefulSetObject(replica)) - return nil +func (c *Creator) createConfigMap(replica *chiv1.ChiReplica) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: CreateConfigMapPodName(replica), + Namespace: replica.Address.Namespace, + Labels: c.labeler.getLabelsReplica(replica, false), + }, + Data: c.chConfigSectionsGenerator.CreateConfigsPod(replica), } - c.chi.WalkReplicas(replicaProcessor) - - return statefulSetList } -func (c *Creator) createStatefulSetObject(replica *chiv1.ChiReplica) *apps.StatefulSet { +func (c *Creator) createStatefulSet(replica *chiv1.ChiReplica) *apps.StatefulSet { statefulSetName := CreateStatefulSetName(replica) serviceName := CreateStatefulSetServiceName(replica) @@ -312,13 +232,13 @@ func (c *Creator) createStatefulSetObject(replica *chiv1.ChiReplica) *apps.State ObjectMeta: metav1.ObjectMeta{ Name: statefulSetName, Namespace: replica.Address.Namespace, - Labels: c.getLabelsReplica(replica, true), + Labels: c.labeler.getLabelsReplica(replica, true), }, Spec: apps.StatefulSetSpec{ Replicas: &replicasNum, ServiceName: serviceName, Selector: &metav1.LabelSelector{ - MatchLabels: c.getSelectorReplica(replica), + MatchLabels: c.labeler.getSelectorReplica(replica), }, // IMPORTANT // VolumeClaimTemplates are to be setup later @@ -347,7 +267,7 @@ func (c *Creator) setupStatefulSetPodTemplate( // All the rest fields would be filled later statefulSetObject.Spec.Template = corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: c.getLabelsReplica(replica, true), + Labels: c.labeler.getLabelsReplica(replica, true), }, } diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go index 9d2454f17..fd4ae8fe1 100644 --- a/pkg/model/labeler.go +++ b/pkg/model/labeler.go @@ -21,25 +21,37 @@ import ( "k8s.io/apimachinery/pkg/labels" ) -func (c *Creator) getLabelsCommonObject() map[string]string { +type Labeler struct { + version string + chi *chi.ClickHouseInstallation +} + +func NewLabeler(version string, chi *chi.ClickHouseInstallation) *Labeler { + return &Labeler{ + version: version, + chi: chi, + } +} + +func (l *Labeler) getLabelsCommonObject() map[string]string { return map[string]string{ LabelApp: LabelAppValue, - LabelChop: c.appVersion, - LabelChi: nameSectionChi(c.chi), + LabelChop: l.version, + LabelChi: nameSectionChi(l.chi), } } -func (c *Creator) getSelectorCommonObject() map[string]string { +func (l *Labeler) getSelectorCommonObject() map[string]string { return map[string]string{ LabelApp: LabelAppValue, - LabelChi: nameSectionChi(c.chi), + LabelChi: nameSectionChi(l.chi), } } -func (c *Creator) getLabelsReplica(replica *chi.ChiReplica, zk bool) map[string]string { +func (l *Labeler) getLabelsReplica(replica *chi.ChiReplica, zk bool) map[string]string { labels := map[string]string{ LabelApp: LabelAppValue, - LabelChop: c.appVersion, + LabelChop: l.version, LabelChi: nameSectionChi(replica), LabelCluster: nameSectionCluster(replica), LabelShard: nameSectionShard(replica), @@ -52,7 +64,7 @@ func (c *Creator) getLabelsReplica(replica *chi.ChiReplica, zk bool) map[string] return labels } -func (c *Creator) getSelectorReplica(replica *chi.ChiReplica) map[string]string { +func (l *Labeler) getSelectorReplica(replica *chi.ChiReplica) map[string]string { return map[string]string{ LabelApp: LabelAppValue, // skip chop diff --git a/pkg/model/normalizer.go b/pkg/model/normalizer.go index 9f22dda01..bda93faf0 100644 --- a/pkg/model/normalizer.go +++ b/pkg/model/normalizer.go @@ -55,7 +55,7 @@ func (n *Normalizer) DoChi(chi *chiv1.ClickHouseInstallation) (*chiv1.ClickHouse // Walk over ChiSpec datatype fields n.doDefaults(&n.chi.Spec.Defaults) n.doConfiguration(&n.chi.Spec.Configuration) - // ChiSpec.Templates + n.doTemplates(&n.chi.Spec.Templates) endpoint := CreateChiServiceFQDN(chi) pods := make([]string, 0) @@ -86,6 +86,24 @@ func (n *Normalizer) doConfiguration(conf *chiv1.ChiConfiguration) { n.doClusters() } +// doTemplates normalizes .spec.templates +func (n *Normalizer) doTemplates(templates *chiv1.ChiTemplates) { + for i := range templates.VolumeClaimTemplates { + vcTemplate := &templates.VolumeClaimTemplates[i] + n.doVolumeClaimTemplate(vcTemplate) + } +} + +// doVolumeClaimTemplate normalizes .spec.templates.volumeClaimTemplates +func (n *Normalizer) doVolumeClaimTemplate(template *chiv1.ChiVolumeClaimTemplate) { + // Check name + // Check PVCReclaimPolicy + if !template.PVCReclaimPolicy.IsValid() { + template.PVCReclaimPolicy = chiv1.PVCReclaimPolicyDelete + } + // Check Spec +} + // doClusters normalizes clusters func (n *Normalizer) doClusters() { diff --git a/pkg/model/types.go b/pkg/model/types.go index 7c4543d62..0a10d65fa 100644 --- a/pkg/model/types.go +++ b/pkg/model/types.go @@ -16,6 +16,7 @@ package model import ( "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + "github.com/altinity/clickhouse-operator/pkg/config" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" ) @@ -32,9 +33,11 @@ type ServiceList []*corev1.Service type configSections struct { // commonConfigSections maps section name to section XML chopConfig commonConfigSections map[string]string - // commonUsersConfigSections maps section name to section XML chopConfig commonUsersConfigSections map[string]string + + chConfigGenerator *ClickHouseConfigGenerator + chopConfig *config.Config } // volumeClaimTemplatesIndex maps volume claim template name - which From 813d0cf387a5145333a379a6b8fda3ebeafd9188 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 4 Jun 2019 12:59:47 +0300 Subject: [PATCH 03/31] env: clarify .yaml manifest build scripts --- manifests/dev/cat-clickhouse-operator-yaml.sh | 45 +++++++++++++++++-- ...build-clickhouse-operator-install-yaml.sh} | 3 ++ 2 files changed, 45 insertions(+), 3 deletions(-) rename manifests/operator/{build-clickhouse-operator-yaml.sh => build-clickhouse-operator-install-yaml.sh} (69%) diff --git a/manifests/dev/cat-clickhouse-operator-yaml.sh b/manifests/dev/cat-clickhouse-operator-yaml.sh index 1f9e09a88..b3c3b919b 100755 --- a/manifests/dev/cat-clickhouse-operator-yaml.sh +++ b/manifests/dev/cat-clickhouse-operator-yaml.sh @@ -7,20 +7,59 @@ CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" MANIFEST_ROOT=$(realpath ${CUR_DIR}/..) PROJECT_ROOT=$(realpath ${CUR_DIR}/../..) -# clickhouse-operator details +########################################## +## +## clickhouse-operator .yaml configuration +## +########################################## + +# Namespace to install operator CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE:-kube-system}" + +# Operator's docker image CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE:-altinity/clickhouse-operator:latest}" + +# Local path to operator's config file to be injected into .yaml CHOPERATOR_CONFIG_FILE="${PROJECT_ROOT}/config/config.yaml" + +# Local path to folder with ClickHouse's .xml configuration files which will be injected into .yaml +# as content of /etc/clickhouse-server/conf.d folder CHOPERATOR_CONFD_FOLDER="${PROJECT_ROOT}/config/conf.d" + +# Local path to folder with ClickHouse's .xml configuration files which will be injected into .yaml +# as content of /etc/clickhouse-server/config.d folder CHOPERATOR_CONFIGD_FOLDER="${PROJECT_ROOT}/config/config.d" -CHOPERATOR_TEMPLATESD_FOLDER="${PROJECT_ROOT}/config/templates.d" + +# Local path to folder with ClickHouse's .xml configuration files which will be injected into .yaml +# as content of /etc/clickhouse-server/users.d folder CHOPERATOR_USERSD_FOLDER="${PROJECT_ROOT}/config/users.d" -# .yaml manifest sections to be rendered +# Local path to folder with operator's .yaml template files which will be injected into .yaml +# as content of /etc/clickhouse-server/templates.d folder +CHOPERATOR_TEMPLATESD_FOLDER="${PROJECT_ROOT}/config/templates.d" + + +## +## .yaml manifest sections to be rendered +## + +# Render operator's CRD MANIFEST_PRINT_CRD="${MANIFEST_PRINT_CRD:-yes}" + +# Render operator's RBAC and other parts needed during operator's install procedure MANIFEST_PRINT_RBAC="${MANIFEST_PRINT_RBAC:-yes}" + +# Render operator's Deployment section. May be not required in case of dev localhost run MANIFEST_PRINT_DEPLOYMENT="${MANIFEST_PRINT_DEPLOYMENT:-yes}" + +################################## +## +## Render .yaml manifest +## +################################## + + # Render CRD section if [[ "${MANIFEST_PRINT_CRD}" == "yes" ]]; then cat ${CUR_DIR}/clickhouse-operator-template-01-section-crd.yaml | \ diff --git a/manifests/operator/build-clickhouse-operator-yaml.sh b/manifests/operator/build-clickhouse-operator-install-yaml.sh similarity index 69% rename from manifests/operator/build-clickhouse-operator-yaml.sh rename to manifests/operator/build-clickhouse-operator-install-yaml.sh index b38dd5490..2990e4a52 100755 --- a/manifests/operator/build-clickhouse-operator-yaml.sh +++ b/manifests/operator/build-clickhouse-operator-install-yaml.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Full list of available vars check in ${MANIFEST_ROOT}/dev/cat-clickhouse-operator-yaml.sh file + +# Here we just build production all-sections-included .yaml manifest with namespace and image parameters CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE:-kube-system}" CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE:-altinity/clickhouse-operator:latest}" From 51f8aae65ee3802742da7601b1473bc3341d7ec5 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 5 Jun 2019 00:36:00 +0300 Subject: [PATCH 04/31] dev: PVC Reclaim Policy implementation --- dev/binary_build.sh | 2 +- dev/image_build_universal.sh | 2 +- ...1shard-1repl-simple-persistent-volume.yaml | 1 + ...ouse-operator-template-01-section-crd.yaml | 5 + .../operator/clickhouse-operator-install.yaml | 5 + .../clickhouse.altinity.com/v1/type_chi.go | 31 +++ pkg/apis/clickhouse.altinity.com/v1/types.go | 23 +- .../v1/zz_generated.deepcopy.go | 55 +++- pkg/controller/chi/controller.go | 6 + pkg/controller/chi/creators.go | 11 +- pkg/controller/chi/deleters.go | 10 +- pkg/model/ch_config_sections.go | 12 + pkg/model/creator.go | 240 ++++++++---------- pkg/model/deleter.go | 39 +++ pkg/model/normalizer.go | 41 ++- pkg/model/types.go | 51 ---- 16 files changed, 328 insertions(+), 206 deletions(-) create mode 100644 pkg/model/deleter.go delete mode 100644 pkg/model/types.go diff --git a/dev/binary_build.sh b/dev/binary_build.sh index 7c2722f81..9fbd1bffa 100755 --- a/dev/binary_build.sh +++ b/dev/binary_build.sh @@ -12,7 +12,7 @@ VERSION=$(cd ${SRC_ROOT}; cat release) GIT_SHA=$(cd ${CUR_DIR}; git rev-parse --short HEAD) # Build clickhouse-operator install .yaml manifest -${SRC_ROOT}/manifests/operator/build-clickhouse-operator-yaml.sh +${SRC_ROOT}/manifests/operator/build-clickhouse-operator-install-yaml.sh #CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ${CUR_DIR}/clickhouse-operator ${SRC_ROOT}/cmd/clickhouse-operator CGO_ENABLED=0 go build \ diff --git a/dev/image_build_universal.sh b/dev/image_build_universal.sh index 79a4b44bb..7f0bcccea 100755 --- a/dev/image_build_universal.sh +++ b/dev/image_build_universal.sh @@ -15,7 +15,7 @@ DOCKERFILE_DIR="${SRC_ROOT}" DOCKERFILE="${DOCKERFILE_DIR}/Dockerfile" # Build clickhouse-operator install .yaml manifest -${SRC_ROOT}/manifests/operator/build-clickhouse-operator-yaml.sh +${SRC_ROOT}/manifests/operator/build-clickhouse-operator-install-yaml.sh # Build image with Docker if [[ "${MINIKUBE}" == "yes" ]]; then diff --git a/docs/examples/02-simple-layout-01-1shard-1repl-simple-persistent-volume.yaml b/docs/examples/02-simple-layout-01-1shard-1repl-simple-persistent-volume.yaml index b275b8784..613258c96 100644 --- a/docs/examples/02-simple-layout-01-1shard-1repl-simple-persistent-volume.yaml +++ b/docs/examples/02-simple-layout-01-1shard-1repl-simple-persistent-volume.yaml @@ -15,6 +15,7 @@ spec: templates: volumeClaimTemplates: - name: volumeclaim-template +# reclaimPolicy: Retain spec: accessModes: - ReadWriteOnce diff --git a/manifests/dev/clickhouse-operator-template-01-section-crd.yaml b/manifests/dev/clickhouse-operator-template-01-section-crd.yaml index a0706ce53..f5c75dd60 100644 --- a/manifests/dev/clickhouse-operator-template-01-section-crd.yaml +++ b/manifests/dev/clickhouse-operator-template-01-section-crd.yaml @@ -176,6 +176,11 @@ spec: properties: name: type: string + reclaimPolicy: + type: string + enum: + - Retain + - Delete spec: # TODO specify PersistentVolumeClaimSpec type: object diff --git a/manifests/operator/clickhouse-operator-install.yaml b/manifests/operator/clickhouse-operator-install.yaml index d9d1912f1..505e64d71 100644 --- a/manifests/operator/clickhouse-operator-install.yaml +++ b/manifests/operator/clickhouse-operator-install.yaml @@ -176,6 +176,11 @@ spec: properties: name: type: string + reclaimPolicy: + type: string + enum: + - Retain + - Delete spec: # TODO specify PersistentVolumeClaimSpec type: object diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go index 9de4ca996..e817ddfec 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go @@ -90,6 +90,25 @@ func (chi *ClickHouseInstallation) FillAddressInfo() int { return replicasCount } +func (chi *ClickHouseInstallation) FillChiPointer() { + + replicaProcessor := func( + chi *ClickHouseInstallation, + clusterIndex int, + cluster *ChiCluster, + shardIndex int, + shard *ChiShard, + replicaIndex int, + replica *ChiReplica, + ) error { + cluster.Chi = chi + shard.Chi = chi + replica.Chi = chi + return nil + } + chi.WalkReplicasFullPath(replicaProcessor) +} + func (chi *ClickHouseInstallation) WalkClustersFullPath( f func(chi *ClickHouseInstallation, clusterIndex int, cluster *ChiCluster) error, ) []error { @@ -268,3 +287,15 @@ func (chi *ClickHouseInstallation) ReplicasCount() int { }) return count } + +// GetVolumeClaimTemplate gets VolumeClaimTemplate by name +func (chi *ClickHouseInstallation) GetVolumeClaimTemplate(name string) (*ChiVolumeClaimTemplate, bool) { + volumeClaimTemplate, ok := chi.Spec.Templates.VolumeClaimTemplatesIndex[name] + return volumeClaimTemplate, ok +} + +// GetPodTemplate gets PodTemplate by name +func (chi *ClickHouseInstallation) GetPodTemplate(name string) (*ChiPodTemplate, bool) { + podTemplate, ok := chi.Spec.Templates.PodTemplatesIndex[name] + return podTemplate, ok +} diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go index 173f48471..6ac017cbb 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse.altinity.com/v1/types.go @@ -78,7 +78,9 @@ type ChiCluster struct { Layout ChiLayout `json:"layout"` Templates ChiTemplateNames `json:"templates,omitempty"` - Address ChiClusterAddress `json:"address"` + // Internal data + Address ChiClusterAddress `json:"address"` + Chi *ClickHouseInstallation `json:"-"` } // ChiClusterAddress defines address of a cluster within ClickHouseInstallation @@ -109,7 +111,9 @@ type ChiShard struct { ReplicasCount int `json:"replicasCount,omitempty"` Replicas []ChiReplica `json:"replicas,omitempty"` - Address ChiShardAddress `json:"address"` + // Internal data + Address ChiShardAddress `json:"address"` + Chi *ClickHouseInstallation `json:"-"` } // ChiShardAddress defines address of a shard within ClickHouseInstallation @@ -128,8 +132,10 @@ type ChiReplica struct { Port int32 `json:"port,omitempty"` Templates ChiTemplateNames `json:"templates,omitempty"` - Address ChiReplicaAddress `json:"address"` - Config ChiReplicaConfig `json:"config"` + // Internal data + Address ChiReplicaAddress `json:"address"` + Config ChiReplicaConfig `json:"config"` + Chi *ClickHouseInstallation `json:"-"` } // ChiReplicaAddress defines address of a replica within ClickHouseInstallation @@ -152,10 +158,13 @@ type ChiReplicaConfig struct { // ChiTemplates defines templates section of .spec type ChiTemplates struct { - // TODO refactor into [string]ChiPodTemplate - PodTemplates []ChiPodTemplate `json:"podTemplates,omitempty" yaml:"podTemplates"` - // TODO refactor into [string]ChiVolumeClaimTemplate + // Templates + PodTemplates []ChiPodTemplate `json:"podTemplates,omitempty" yaml:"podTemplates"` VolumeClaimTemplates []ChiVolumeClaimTemplate `json:"volumeClaimTemplates,omitempty" yaml:"volumeClaimTemplates"` + + // Index maps template name to template itself + PodTemplatesIndex map[string]*ChiPodTemplate + VolumeClaimTemplatesIndex map[string]*ChiVolumeClaimTemplate } // ChiPodTemplate defines full Pod Template, directly used by StatefulSet diff --git a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go index 2a13f320c..54731523e 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go +++ b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go @@ -30,6 +30,15 @@ func (in *ChiCluster) DeepCopyInto(out *ChiCluster) { in.Layout.DeepCopyInto(&out.Layout) out.Templates = in.Templates out.Address = in.Address + if in.Chi != nil { + in, out := &in.Chi, &out.Chi + if *in == nil { + *out = nil + } else { + *out = new(ClickHouseInstallation) + (*in).DeepCopyInto(*out) + } + } return } @@ -207,6 +216,15 @@ func (in *ChiReplica) DeepCopyInto(out *ChiReplica) { out.Templates = in.Templates out.Address = in.Address out.Config = in.Config + if in.Chi != nil { + in, out := &in.Chi, &out.Chi + if *in == nil { + *out = nil + } else { + *out = new(ClickHouseInstallation) + (*in).DeepCopyInto(*out) + } + } return } @@ -259,9 +277,20 @@ func (in *ChiShard) DeepCopyInto(out *ChiShard) { if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = make([]ChiReplica, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } out.Address = in.Address + if in.Chi != nil { + in, out := &in.Chi, &out.Chi + if *in == nil { + *out = nil + } else { + *out = new(ClickHouseInstallation) + (*in).DeepCopyInto(*out) + } + } return } @@ -364,6 +393,30 @@ func (in *ChiTemplates) DeepCopyInto(out *ChiTemplates) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.PodTemplatesIndex != nil { + in, out := &in.PodTemplatesIndex, &out.PodTemplatesIndex + *out = make(map[string]*ChiPodTemplate, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = new(ChiPodTemplate) + val.DeepCopyInto((*out)[key]) + } + } + } + if in.VolumeClaimTemplatesIndex != nil { + in, out := &in.VolumeClaimTemplatesIndex, &out.VolumeClaimTemplatesIndex + *out = make(map[string]*ChiVolumeClaimTemplate, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = new(ChiVolumeClaimTemplate) + val.DeepCopyInto((*out)[key]) + } + } + } return } diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index d1bc57310..0b857c588 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -559,6 +559,12 @@ func (c *Controller) onUpdateChi(old, new *chop.ClickHouseInstallation) error { } func (c *Controller) onDeleteChi(chi *chop.ClickHouseInstallation) error { + chi, err := c.normalizer.CreateTemplatedChi(chi) + if err != nil { + glog.V(1).Infof("ClickHouseInstallation (%q): unable to normalize: %q", chi.Name, err) + return err + } + c.eventChi(chi, eventTypeNormal, eventActionDelete, eventReasonDeleteStarted, fmt.Sprintf("onDeleteChi(%s/%s) started", chi.Namespace, chi.Name)) c.deleteChi(chi) c.eventChi(chi, eventTypeNormal, eventActionDelete, eventReasonDeleteCompleted, fmt.Sprintf("onDeleteChi(%s/%s) completed", chi.Namespace, chi.Name)) diff --git a/pkg/controller/chi/creators.go b/pkg/controller/chi/creators.go index 373590c98..604e0236f 100644 --- a/pkg/controller/chi/creators.go +++ b/pkg/controller/chi/creators.go @@ -30,16 +30,16 @@ import ( // reconcileChi reconciles ClickHouseInstallation func (c *Controller) reconcile(chi *chop.ClickHouseInstallation) error { - creator := chopmodel.NewCreator( + reconciler := chopmodel.NewReconciler( chi, c.chopConfig, c.version, &chopmodel.ReconcileFuncs{ - ConfigMap: c.ReconcileConfigMap, - Service: c.ReconcileService, - StatefulSet: c.ReconcileStatefulSet, + ReconcileConfigMap: c.ReconcileConfigMap, + ReconcileService: c.ReconcileService, + ReconcileStatefulSet: c.ReconcileStatefulSet, }) - return creator.Reconcile() + return reconciler.Reconcile() } // reconcileConfigMap reconciles core.ConfigMap @@ -116,6 +116,7 @@ func (c *Controller) ReconcileStatefulSet(newStatefulSet *apps.StatefulSet, repl func (c *Controller) createStatefulSet(statefulSet *apps.StatefulSet, replica *chop.ChiReplica) error { if statefulSet, err := c.kubeClient.AppsV1().StatefulSets(statefulSet.Namespace).Create(statefulSet); err != nil { + // Error call Create() return err } else if err := c.waitStatefulSetGeneration(statefulSet.Namespace, statefulSet.Name, statefulSet.Generation); err == nil { // Target generation reached, StatefulSet created successfully diff --git a/pkg/controller/chi/deleters.go b/pkg/controller/chi/deleters.go index 047aae392..8a0e9f85d 100644 --- a/pkg/controller/chi/deleters.go +++ b/pkg/controller/chi/deleters.go @@ -161,15 +161,19 @@ func (c *Controller) statefulSetDelete(replica *chop.ChiReplica) error { // persistentVolumeClaimDelete deletes PersistentVolumeClaim func (c *Controller) persistentVolumeClaimDelete(replica *chop.ChiReplica) error { + name := "volumeclaim-template-" + chopmodel.CreatePodName(replica) namespace := replica.Address.Namespace - glog.V(1).Infof("persistentVolumeClaimDelete(%s/%s)", namespace, name) + if !chopmodel.ReplicaCanDeletePVC(replica) { + glog.V(1).Infof("KEPT PersistentVolumeClaim %s/%s", namespace, name) + return nil + } if err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Delete(name, newDeleteOptions()); err == nil { - glog.V(1).Infof("PersistentVolumeClaim %s/%s deleted", namespace, name) + glog.V(1).Infof("OK delete PersistentVolumeClaim %s/%s", namespace, name) } else { - glog.V(1).Infof("PersistentVolumeClaim %s/%s FAILED TO DELETE %v", namespace, name, err) + glog.V(1).Infof("FAIL delete PersistentVolumeClaim %s/%s %v", namespace, name, err) } return nil diff --git a/pkg/model/ch_config_sections.go b/pkg/model/ch_config_sections.go index a4b9f834c..2507afe95 100644 --- a/pkg/model/ch_config_sections.go +++ b/pkg/model/ch_config_sections.go @@ -20,6 +20,18 @@ import ( "github.com/altinity/clickhouse-operator/pkg/util" ) +type configSections struct { + // commonConfigSections maps section name to section XML chopConfig + commonConfigSections map[string]string + // commonUsersConfigSections maps section name to section XML chopConfig + commonUsersConfigSections map[string]string + + // ClickHouse config generator + chConfigGenerator *ClickHouseConfigGenerator + // clickhouse-operator configuration + chopConfig *config.Config +} + func NewConfigSections(chConfigGenerator *ClickHouseConfigGenerator, chopConfig *config.Config) *configSections { return &configSections{ commonConfigSections: make(map[string]string), diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 18983dde7..3bdc1faac 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -26,72 +26,70 @@ import ( "github.com/golang/glog" ) -// Creator is the base struct to create k8s objects -type Creator struct { +// Reconciler is the base struct to create k8s objects +type Reconciler struct { appVersion string chi *chiv1.ClickHouseInstallation chopConfig *config.Config chConfigGenerator *ClickHouseConfigGenerator chConfigSectionsGenerator *configSections labeler *Labeler - - podTemplatesIndex podTemplatesIndex - volumeClaimTemplatesIndex volumeClaimTemplatesIndex - reconcile *ReconcileFuncs + funcs *ReconcileFuncs } type ReconcileFuncs struct { - ConfigMap func(configMap *corev1.ConfigMap) error - Service func(service *corev1.Service) error - StatefulSet func(newStatefulSet *apps.StatefulSet, replica *chiv1.ChiReplica) error + ReconcileConfigMap func(configMap *corev1.ConfigMap) error + ReconcileService func(service *corev1.Service) error + ReconcileStatefulSet func(newStatefulSet *apps.StatefulSet, replica *chiv1.ChiReplica) error } -// NewCreator creates new creator -func NewCreator( +// NewReconciler creates new creator +func NewReconciler( chi *chiv1.ClickHouseInstallation, chopConfig *config.Config, appVersion string, - reconcile *ReconcileFuncs, -) *Creator { - creator := &Creator{ + funcs *ReconcileFuncs, +) *Reconciler { + reconciler := &Reconciler{ chi: chi, chopConfig: chopConfig, appVersion: appVersion, chConfigGenerator: NewClickHouseConfigGenerator(chi), labeler: NewLabeler(appVersion, chi), - reconcile: reconcile, + funcs: funcs, } - creator.chConfigSectionsGenerator = NewConfigSections(creator.chConfigGenerator, creator.chopConfig) - creator.createPodTemplatesIndex() - creator.createVolumeClaimTemplatesIndex() + reconciler.chConfigSectionsGenerator = NewConfigSections(reconciler.chConfigGenerator, reconciler.chopConfig) - return creator + return reconciler } -// ChiCreateObjects returns a map of the k8s objects created based on ClickHouseInstallation Object properties -func (c *Creator) Reconcile() error { - if err := c.reconcileServiceChi(CreateChiServiceName(c.chi)); err != nil { +// Reconcile runs reconcile process +func (r *Reconciler) Reconcile() error { + if err := r.reconcileServiceChi(r.chi); err != nil { return err } - if err := c.reconcileConfigMapsChi(); err != nil { + if err := r.reconcileConfigMapsChi(); err != nil { return err } - if err := c.reconcileReplicas(); err != nil { + if err := r.reconcileReplicas(); err != nil { return err } return nil } -func (c *Creator) reconcileServiceChi(serviceName string) error { - glog.V(1).Infof("reconcileServiceObjectChi() for service %s", serviceName) +// reconcileServiceChi reconciles global Services belonging to CHI +func (r *Reconciler) reconcileServiceChi(chi *chiv1.ClickHouseInstallation) error { + + serviceName := CreateChiServiceName(chi) + glog.V(1).Infof("reconcileServiceObjectChi() for service %s", serviceName) service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, - Namespace: c.chi.Namespace, - Labels: c.labeler.getLabelsCommonObject(), + Namespace: r.chi.Namespace, + Labels: r.labeler.getLabelsCommonObject(), }, Spec: corev1.ServiceSpec{ // ClusterIP: templateDefaultsServiceClusterIP, @@ -105,79 +103,81 @@ func (c *Creator) reconcileServiceChi(serviceName string) error { Port: chDefaultClientPortNumber, }, }, - Selector: c.labeler.getSelectorCommonObject(), + Selector: r.labeler.getSelectorCommonObject(), Type: "LoadBalancer", }, } - return c.reconcile.Service(service) + return r.funcs.ReconcileService(service) } -// reconcileConfigMapObjectsChi returns a list of corev1.ConfigMap objects -func (c *Creator) reconcileConfigMapsChi() error { - c.chConfigSectionsGenerator.CreateConfigsUsers() - c.chConfigSectionsGenerator.CreateConfigsCommon() +// reconcileConfigMapsChi reconciles global ConfigMaps belonging to CHI +func (r *Reconciler) reconcileConfigMapsChi() error { + r.chConfigSectionsGenerator.CreateConfigsUsers() + r.chConfigSectionsGenerator.CreateConfigsCommon() // ConfigMap common for all resources in CHI // contains several sections, mapped as separated chopConfig files, // such as remote servers, zookeeper setup, etc configMapCommon := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: CreateConfigMapCommonName(c.chi), - Namespace: c.chi.Namespace, - Labels: c.labeler.getLabelsCommonObject(), + Name: CreateConfigMapCommonName(r.chi), + Namespace: r.chi.Namespace, + Labels: r.labeler.getLabelsCommonObject(), }, // Data contains several sections which are to be several xml chopConfig files - Data: c.chConfigSectionsGenerator.commonConfigSections, + Data: r.chConfigSectionsGenerator.commonConfigSections, } - if err := c.reconcile.ConfigMap(configMapCommon); err != nil { + if err := r.funcs.ReconcileConfigMap(configMapCommon); err != nil { return err } // ConfigMap common for all users resources in CHI configMapUsers := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: CreateConfigMapCommonUsersName(c.chi), - Namespace: c.chi.Namespace, - Labels: c.labeler.getLabelsCommonObject(), + Name: CreateConfigMapCommonUsersName(r.chi), + Namespace: r.chi.Namespace, + Labels: r.labeler.getLabelsCommonObject(), }, // Data contains several sections which are to be several xml chopConfig files - Data: c.chConfigSectionsGenerator.commonUsersConfigSections, + Data: r.chConfigSectionsGenerator.commonUsersConfigSections, } - if err := c.reconcile.ConfigMap(configMapUsers); err != nil { + if err := r.funcs.ReconcileConfigMap(configMapUsers); err != nil { return err } return nil } -func (c *Creator) reconcileReplicas() error { +// reconcileReplicas reconciles all replicas +func (r *Reconciler) reconcileReplicas() error { replicaProcessor := func(replica *chiv1.ChiReplica) error { // Add replica's Service - service := c.createService(replica) - if err := c.reconcile.Service(service); err != nil { + service := r.createService(replica) + if err := r.funcs.ReconcileService(service); err != nil { return err } // Add replica's ConfigMap - configMap := c.createConfigMap(replica) - if err := c.reconcile.ConfigMap(configMap); err != nil { + configMap := r.createConfigMap(replica) + if err := r.funcs.ReconcileConfigMap(configMap); err != nil { return err } // Add replica's StatefulSet - statefulSet := c.createStatefulSet(replica) - if err := c.reconcile.StatefulSet(statefulSet, replica); err != nil { + statefulSet := r.createStatefulSet(replica) + if err := r.funcs.ReconcileStatefulSet(statefulSet, replica); err != nil { return err } return nil } - return c.chi.WalkReplicasTillError(replicaProcessor) + return r.chi.WalkReplicasTillError(replicaProcessor) } -func (c *Creator) createService(replica *chiv1.ChiReplica) *corev1.Service { +// createService creates new corev1.Service +func (r *Reconciler) createService(replica *chiv1.ChiReplica) *corev1.Service { serviceName := CreateStatefulSetServiceName(replica) statefulSetName := CreateStatefulSetName(replica) @@ -186,7 +186,7 @@ func (c *Creator) createService(replica *chiv1.ChiReplica) *corev1.Service { ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: replica.Address.Namespace, - Labels: c.labeler.getLabelsReplica(replica, false), + Labels: r.labeler.getLabelsReplica(replica, false), }, Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ @@ -203,25 +203,27 @@ func (c *Creator) createService(replica *chiv1.ChiReplica) *corev1.Service { Port: chDefaultInterServerPortNumber, }, }, - Selector: c.labeler.getSelectorReplica(replica), + Selector: r.labeler.getSelectorReplica(replica), ClusterIP: templateDefaultsServiceClusterIP, Type: "ClusterIP", }, } } -func (c *Creator) createConfigMap(replica *chiv1.ChiReplica) *corev1.ConfigMap { +// createConfigMap creates new corev1.ConfigMap +func (r *Reconciler) createConfigMap(replica *chiv1.ChiReplica) *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: CreateConfigMapPodName(replica), Namespace: replica.Address.Namespace, - Labels: c.labeler.getLabelsReplica(replica, false), + Labels: r.labeler.getLabelsReplica(replica, false), }, - Data: c.chConfigSectionsGenerator.CreateConfigsPod(replica), + Data: r.chConfigSectionsGenerator.CreateConfigsPod(replica), } } -func (c *Creator) createStatefulSet(replica *chiv1.ChiReplica) *apps.StatefulSet { +// createStatefulSet creates new apps.StatefulSet +func (r *Reconciler) createStatefulSet(replica *chiv1.ChiReplica) *apps.StatefulSet { statefulSetName := CreateStatefulSetName(replica) serviceName := CreateStatefulSetServiceName(replica) @@ -232,13 +234,13 @@ func (c *Creator) createStatefulSet(replica *chiv1.ChiReplica) *apps.StatefulSet ObjectMeta: metav1.ObjectMeta{ Name: statefulSetName, Namespace: replica.Address.Namespace, - Labels: c.labeler.getLabelsReplica(replica, true), + Labels: r.labeler.getLabelsReplica(replica, true), }, Spec: apps.StatefulSetSpec{ Replicas: &replicasNum, ServiceName: serviceName, Selector: &metav1.LabelSelector{ - MatchLabels: c.labeler.getSelectorReplica(replica), + MatchLabels: r.labeler.getSelectorReplica(replica), }, // IMPORTANT // VolumeClaimTemplates are to be setup later @@ -250,16 +252,14 @@ func (c *Creator) createStatefulSet(replica *chiv1.ChiReplica) *apps.StatefulSet }, } - c.setupStatefulSetPodTemplate(statefulSet, replica) - c.setupStatefulSetVolumeClaimTemplates(statefulSet, replica) + r.setupStatefulSetPodTemplate(statefulSet, replica) + r.setupStatefulSetVolumeClaimTemplates(statefulSet, replica) return statefulSet } -func (c *Creator) setupStatefulSetPodTemplate( - statefulSetObject *apps.StatefulSet, - replica *chiv1.ChiReplica, -) { +// setupStatefulSetPodTemplate performs PodTemplate setup of StatefulSet +func (r *Reconciler) setupStatefulSetPodTemplate(statefulSetObject *apps.StatefulSet, replica *chiv1.ChiReplica) { statefulSetName := CreateStatefulSetName(replica) podTemplateName := replica.Templates.PodTemplate @@ -267,12 +267,12 @@ func (c *Creator) setupStatefulSetPodTemplate( // All the rest fields would be filled later statefulSetObject.Spec.Template = corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: c.labeler.getLabelsReplica(replica, true), + Labels: r.labeler.getLabelsReplica(replica, true), }, } // Specify pod templates - either explicitly defined or default - if podTemplate, ok := c.getPodTemplate(podTemplateName); ok { + if podTemplate, ok := r.chi.GetPodTemplate(podTemplateName); ok { // Replica references known PodTemplate copyPodTemplateFrom(statefulSetObject, podTemplate) glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s - template: %s", statefulSetName, podTemplateName) @@ -282,24 +282,21 @@ func (c *Creator) setupStatefulSetPodTemplate( glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s - default template", statefulSetName) } - c.setupConfigMapVolumes(statefulSetObject, replica) + r.setupConfigMapVolumes(statefulSetObject, replica) } // setupConfigMapVolumes adds to each container in the Pod VolumeMount objects with -func (c *Creator) setupConfigMapVolumes( - statefulSetObject *apps.StatefulSet, - replica *chiv1.ChiReplica, -) { +func (r *Reconciler) setupConfigMapVolumes(statefulSetObject *apps.StatefulSet, replica *chiv1.ChiReplica) { configMapMacrosName := CreateConfigMapPodName(replica) - configMapCommonName := CreateConfigMapCommonName(c.chi) - configMapCommonUsersName := CreateConfigMapCommonUsersName(c.chi) + configMapCommonName := CreateConfigMapCommonName(r.chi) + configMapCommonUsersName := CreateConfigMapCommonUsersName(r.chi) // Add all ConfigMap objects as Volume objects of type ConfigMap statefulSetObject.Spec.Template.Spec.Volumes = append( statefulSetObject.Spec.Template.Spec.Volumes, - createVolumeObjectConfigMap(configMapCommonName), - createVolumeObjectConfigMap(configMapCommonUsersName), - createVolumeObjectConfigMap(configMapMacrosName), + createVolumeForConfigMap(configMapCommonName), + createVolumeForConfigMap(configMapCommonUsersName), + createVolumeForConfigMap(configMapMacrosName), ) // And reference these Volumes in each Container via VolumeMount @@ -310,29 +307,30 @@ func (c *Creator) setupConfigMapVolumes( // Append to each Container current VolumeMount's to VolumeMount's declared in template container.VolumeMounts = append( container.VolumeMounts, - createVolumeMountObject(configMapCommonName, dirPathConfigd), - createVolumeMountObject(configMapCommonUsersName, dirPathUsersd), - createVolumeMountObject(configMapMacrosName, dirPathConfd), + createVolumeMount(configMapCommonName, dirPathConfigd), + createVolumeMount(configMapCommonUsersName, dirPathUsersd), + createVolumeMount(configMapMacrosName, dirPathConfd), ) } } -func (c *Creator) setupStatefulSetVolumeClaimTemplates( - statefulSetObject *apps.StatefulSet, +// setupStatefulSetVolumeClaimTemplates performs VolumeClaimTemplate setup for Containers in PodTemplate of a StatefulSet +func (r *Reconciler) setupStatefulSetVolumeClaimTemplates( + statefulSet *apps.StatefulSet, replica *chiv1.ChiReplica, ) { // Append VolumeClaimTemplates, that are referenced in Containers' VolumeMount object(s) // to StatefulSet's Spec.VolumeClaimTemplates slice, so these statefulSetName := CreateStatefulSetName(replica) - for i := range statefulSetObject.Spec.Template.Spec.Containers { + for i := range statefulSet.Spec.Template.Spec.Containers { // Convenience wrapper - container := &statefulSetObject.Spec.Template.Spec.Containers[i] + container := &statefulSet.Spec.Template.Spec.Containers[i] for j := range container.VolumeMounts { // Convenience wrapper volumeMount := &container.VolumeMounts[j] - if volumeClaimTemplate, ok := c.getVolumeClaimTemplate(volumeMount.Name); ok { + if volumeClaimTemplate, ok := r.chi.GetVolumeClaimTemplate(volumeMount.Name); ok { // Found VolumeClaimTemplate to mount by VolumeMount - appendVolumeClaimTemplateFrom(statefulSetObject, volumeClaimTemplate) + appendVolumeClaimTemplateFrom(statefulSet, volumeClaimTemplate) } } } @@ -341,7 +339,7 @@ func (c *Creator) setupStatefulSetVolumeClaimTemplates( // // We want to mount this default VolumeClaimTemplate into /var/lib/clickhouse in case: // 1. This default VolumeClaimTemplate is not already mounted with any VolumeMount - // 2. And /var/lib/clickhouse is not already mounted with any VolumeMount + // 2. And /var/lib/clickhouse is not already mounted with any VolumeMount defaultVolumeClaimTemplateName := replica.Templates.VolumeClaimTemplate @@ -350,15 +348,16 @@ func (c *Creator) setupStatefulSetVolumeClaimTemplates( return } - if _, ok := c.getVolumeClaimTemplate(defaultVolumeClaimTemplateName); !ok { - // Incorrect .templates.VolumeClaimTemplate specified + if _, ok := r.chi.GetVolumeClaimTemplate(defaultVolumeClaimTemplateName); !ok { + // Incorrect/unknown .templates.VolumeClaimTemplate specified return } // 1. Check explicit usage - whether this default VolumeClaimTemplate is already listed in VolumeMount - for i := range statefulSetObject.Spec.Template.Spec.Containers[ClickHouseContainerIndex].VolumeMounts { + clickHouseContainer := getClickHouseContainer(statefulSet) + for i := range clickHouseContainer.VolumeMounts { // Convenience wrapper - volumeMount := &statefulSetObject.Spec.Template.Spec.Containers[ClickHouseContainerIndex].VolumeMounts[i] + volumeMount := &clickHouseContainer.VolumeMounts[i] if volumeMount.Name == defaultVolumeClaimTemplateName { // This .templates.VolumeClaimTemplate is already used in VolumeMount glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s - VC template 1: %s", statefulSetName, volumeMount.Name) @@ -371,9 +370,9 @@ func (c *Creator) setupStatefulSetVolumeClaimTemplates( // However, mount point /var/lib/clickhouse may be used already explicitly. Need to check // 2. Check whether /var/lib/clickhouse is already mounted - for i := range statefulSetObject.Spec.Template.Spec.Containers[ClickHouseContainerIndex].VolumeMounts { + for i := range clickHouseContainer.VolumeMounts { // Convenience wrapper - volumeMount := &statefulSetObject.Spec.Template.Spec.Containers[ClickHouseContainerIndex].VolumeMounts[i] + volumeMount := &clickHouseContainer.VolumeMounts[i] if volumeMount.MountPath == dirPathClickHouseData { // /var/lib/clickhouse is already mounted glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s - VC template 2: /var/lib/clickhouse already mounted", statefulSetName) @@ -383,13 +382,13 @@ func (c *Creator) setupStatefulSetVolumeClaimTemplates( // This default volumeClaimTemplate is not used explicitly by name and /var/lib/clickhouse is not mounted also. // Let's mount this default VolumeClaimTemplate into /var/lib/clickhouse - if template, ok := c.getVolumeClaimTemplate(defaultVolumeClaimTemplateName); ok { + if template, ok := r.chi.GetVolumeClaimTemplate(defaultVolumeClaimTemplateName); ok { // Add VolumeClaimTemplate to StatefulSet - appendVolumeClaimTemplateFrom(statefulSetObject, template) + appendVolumeClaimTemplateFrom(statefulSet, template) // Add VolumeMount to ClickHouse container to /var/lib/clickhouse point - statefulSetObject.Spec.Template.Spec.Containers[ClickHouseContainerIndex].VolumeMounts = append( - statefulSetObject.Spec.Template.Spec.Containers[ClickHouseContainerIndex].VolumeMounts, - createVolumeMountObject(replica.Templates.VolumeClaimTemplate, dirPathClickHouseData), + clickHouseContainer.VolumeMounts = append( + clickHouseContainer.VolumeMounts, + createVolumeMount(replica.Templates.VolumeClaimTemplate, dirPathClickHouseData), ) } @@ -458,8 +457,8 @@ func createDefaultPodTemplate(name string) *chiv1.ChiPodTemplate { } } -// createVolumeObjectConfigMap returns corev1.Volume object with defined name -func createVolumeObjectConfigMap(name string) corev1.Volume { +// createVolumeForConfigMap returns corev1.Volume object with defined name +func createVolumeForConfigMap(name string) corev1.Volume { return corev1.Volume{ Name: name, VolumeSource: corev1.VolumeSource{ @@ -472,42 +471,15 @@ func createVolumeObjectConfigMap(name string) corev1.Volume { } } -// createVolumeMountObject returns corev1.VolumeMount object with name and mount path -func createVolumeMountObject(name, mountPath string) corev1.VolumeMount { +// createVolumeMount returns corev1.VolumeMount object with name and mount path +func createVolumeMount(name, mountPath string) corev1.VolumeMount { return corev1.VolumeMount{ Name: name, MountPath: mountPath, } } -// createVolumeClaimTemplatesIndex creates a map of volumeClaimTemplatesIndexData used as a reference storage for VolumeClaimTemplates -func (c *Creator) createVolumeClaimTemplatesIndex() { - c.volumeClaimTemplatesIndex = make(volumeClaimTemplatesIndex) - for i := range c.chi.Spec.Templates.VolumeClaimTemplates { - // Convenience wrapper - volumeClaimTemplate := &c.chi.Spec.Templates.VolumeClaimTemplates[i] - c.volumeClaimTemplatesIndex[volumeClaimTemplate.Name] = volumeClaimTemplate - } -} - -// getVolumeClaimTemplate gets VolumeClaimTemplate by name -func (c *Creator) getVolumeClaimTemplate(name string) (*chiv1.ChiVolumeClaimTemplate, bool) { - volumeClaimTemplate, ok := c.volumeClaimTemplatesIndex[name] - return volumeClaimTemplate, ok -} - -// createPodTemplatesIndex creates a map of podTemplatesIndexData used as a reference storage for PodTemplates -func (c *Creator) createPodTemplatesIndex() { - c.podTemplatesIndex = make(podTemplatesIndex) - for i := range c.chi.Spec.Templates.PodTemplates { - // Convenience wrapper - podTemplate := &c.chi.Spec.Templates.PodTemplates[i] - c.podTemplatesIndex[podTemplate.Name] = podTemplate - } -} - -// getPodTemplate gets PodTemplate by name -func (c *Creator) getPodTemplate(name string) (*chiv1.ChiPodTemplate, bool) { - podTemplate, ok := c.podTemplatesIndex[name] - return podTemplate, ok +// getClickHouseContainer finds Container with ClickHouse amond all containers of Pod specified in StatefulSet +func getClickHouseContainer(statefulSet *apps.StatefulSet) *corev1.Container { + return &statefulSet.Spec.Template.Spec.Containers[ClickHouseContainerIndex] } diff --git a/pkg/model/deleter.go b/pkg/model/deleter.go new file mode 100644 index 000000000..d163fdc76 --- /dev/null +++ b/pkg/model/deleter.go @@ -0,0 +1,39 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" +) + +func ReplicaCanDeletePVC(replica *chiv1.ChiReplica) bool { + templateName := replica.Templates.VolumeClaimTemplate + template, ok := replica.Chi.GetVolumeClaimTemplate(templateName) + if !ok { + // Unknown template name, however, this is strange + return true + } + + switch template.PVCReclaimPolicy { + case chiv1.PVCReclaimPolicyRetain: + return false + case chiv1.PVCReclaimPolicyDelete: + return true + default: + // Unknown PVCReclaimPolicy + return true + } + +} diff --git a/pkg/model/normalizer.go b/pkg/model/normalizer.go index bda93faf0..f07c0cb21 100644 --- a/pkg/model/normalizer.go +++ b/pkg/model/normalizer.go @@ -18,6 +18,7 @@ import ( chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopconfig "github.com/altinity/clickhouse-operator/pkg/config" "github.com/altinity/clickhouse-operator/pkg/util" + "regexp" "strconv" "strings" @@ -57,15 +58,20 @@ func (n *Normalizer) DoChi(chi *chiv1.ClickHouseInstallation) (*chiv1.ClickHouse n.doConfiguration(&n.chi.Spec.Configuration) n.doTemplates(&n.chi.Spec.Templates) - endpoint := CreateChiServiceFQDN(chi) + n.doStatus() + + return n.chi, nil +} + +// doStatus prepares .status section +func (n *Normalizer) doStatus() { + endpoint := CreateChiServiceFQDN(n.chi) pods := make([]string, 0) n.chi.WalkReplicas(func(replica *chiv1.ChiReplica) error { pods = append(pods, CreatePodName(replica)) return nil }) n.chi.StatusFill(endpoint, pods) - - return n.chi, nil } // doDefaults normalizes .spec.defaults @@ -88,12 +94,27 @@ func (n *Normalizer) doConfiguration(conf *chiv1.ChiConfiguration) { // doTemplates normalizes .spec.templates func (n *Normalizer) doTemplates(templates *chiv1.ChiTemplates) { + for i := range templates.PodTemplates { + podTemplate := &templates.PodTemplates[i] + n.doPodTemplate(podTemplate) + } + for i := range templates.VolumeClaimTemplates { vcTemplate := &templates.VolumeClaimTemplates[i] n.doVolumeClaimTemplate(vcTemplate) } } +// doPodTemplate normalizes .spec.templates.podTemplates +func (n *Normalizer) doPodTemplate(template *chiv1.ChiPodTemplate) { + // Ensure map is in place + if n.chi.Spec.Templates.PodTemplatesIndex == nil { + n.chi.Spec.Templates.PodTemplatesIndex = make(map[string]*chiv1.ChiPodTemplate) + } + + n.chi.Spec.Templates.PodTemplatesIndex[template.Name] = template +} + // doVolumeClaimTemplate normalizes .spec.templates.volumeClaimTemplates func (n *Normalizer) doVolumeClaimTemplate(template *chiv1.ChiVolumeClaimTemplate) { // Check name @@ -102,6 +123,12 @@ func (n *Normalizer) doVolumeClaimTemplate(template *chiv1.ChiVolumeClaimTemplat template.PVCReclaimPolicy = chiv1.PVCReclaimPolicyDelete } // Check Spec + + // Ensure map is in place + if n.chi.Spec.Templates.VolumeClaimTemplatesIndex == nil { + n.chi.Spec.Templates.VolumeClaimTemplatesIndex = make(map[string]*chiv1.ChiVolumeClaimTemplate) + } + n.chi.Spec.Templates.VolumeClaimTemplatesIndex[template.Name] = template } // doClusters normalizes clusters @@ -121,6 +148,7 @@ func (n *Normalizer) doClusters() { return n.doCluster(cluster) }) n.chi.FillAddressInfo() + n.chi.FillChiPointer() n.chi.WalkReplicas(func(replica *chiv1.ChiReplica) error { replica.Config.ZkFingerprint = fingerprint(n.chi.Spec.Configuration.Zookeeper) return nil @@ -147,6 +175,13 @@ func (n *Normalizer) doConfigurationUsers(users *map[string]interface{}) { for path := range *users { // Split 'admin/password' tags := strings.Split(path, "/") + + // Basic sanity check - need to have at least "username/something" pair + if len(tags) < 2 { + // Skip incorrect entry + continue + } + username := tags[0] usernameMap[username] = true } diff --git a/pkg/model/types.go b/pkg/model/types.go deleted file mode 100644 index 0a10d65fa..000000000 --- a/pkg/model/types.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" - "github.com/altinity/clickhouse-operator/pkg/config" - apps "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" -) - -// ConfigMapList defines a list of the ConfigMap objects -type ConfigMapList []*corev1.ConfigMap - -// StatefulSetList defines a list of the StatefulSet objects -type StatefulSetList []*apps.StatefulSet - -// ServiceList defines a list of the Service objects -type ServiceList []*corev1.Service - -type configSections struct { - // commonConfigSections maps section name to section XML chopConfig - commonConfigSections map[string]string - // commonUsersConfigSections maps section name to section XML chopConfig - commonUsersConfigSections map[string]string - - chConfigGenerator *ClickHouseConfigGenerator - chopConfig *config.Config -} - -// volumeClaimTemplatesIndex maps volume claim template name - which -// is .spec.templates.volumeClaimTemplates.name to VolumeClaimTemplate itself -// Used to provide dictionary/index for templates -type volumeClaimTemplatesIndex map[string]*v1.ChiVolumeClaimTemplate - -// podTemplatesIndex maps pod template name - which -// is .spec.templates.podTemplates.name to PodTemplate itself -// Used to provide dictionary/index for templates -type podTemplatesIndex map[string]*v1.ChiPodTemplate From 20dbe4e97497d8eb7815f396d09d9b3614f07f5d Mon Sep 17 00:00:00 2001 From: Artem Silenkov Date: Wed, 5 Jun 2019 21:56:24 +0300 Subject: [PATCH 05/31] DEMO files for clickhouse --- ...0-zones-aws-02-pod-per-host-baremetal(old) | 62 +++++++++++++++ ...2-pod-per-host-baremetal-localstorage.yaml | 72 ++++++++++++++++++ ...0-zones-aws-02-pod-per-host-baremetal.yaml | 75 +++++++++++++++++++ .../operator/clickhouse-operator-install.yaml | 52 ++++++------- 4 files changed, 235 insertions(+), 26 deletions(-) create mode 100755 docs/examples/100-zones-aws-02-pod-per-host-baremetal(old) create mode 100755 docs/examples/100-zones-aws-02-pod-per-host-baremetal-localstorage.yaml create mode 100755 docs/examples/100-zones-aws-02-pod-per-host-baremetal.yaml diff --git a/docs/examples/100-zones-aws-02-pod-per-host-baremetal(old) b/docs/examples/100-zones-aws-02-pod-per-host-baremetal(old) new file mode 100755 index 000000000..0f9ff769c --- /dev/null +++ b/docs/examples/100-zones-aws-02-pod-per-host-baremetal(old) @@ -0,0 +1,62 @@ +# +# AWS-specific labels, applicable in 'nodeAffinity' statements +# +# beta.kubernetes.io/arch=amd64 +# beta.kubernetes.io/instance-type=t2.medium +# beta.kubernetes.io/os=linux +# +# failure-domain.beta.kubernetes.io/region=us-east-1 +# failure-domain.beta.kubernetes.io/zone=us-east-1a +# +# kubernetes.io/hostname=ip-172-20-37-97.ec2.internal +# kubernetes.io/role=node +# node-role.kubernetes.io/node= +# +# kops.k8s.io/instancegroup=nodes2 +# +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" +metadata: + name: "zones-pod-host" +spec: + defaults: + templates: + podTemplate: clickhouse-per-host-baremetal + configuration: + clusters: + - name: zoned-cluster + layout: + shardsCount: 3 + templates: + podTemplate: pod-template-with-volume + volumeClaimTemplate: storage-vc-template + templates: + podTemplates: + # Specify Pod Templates with affinity + + - name: clickhouse-per-host-in-baremetal + spec: + # Specify Pod anti-affinity to Pods with the same label "/app" on the same "hostname" + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "clickhouse.altinity.com/app" + operator: In + values: + - "chop" + topologyKey: "kubernetes.io/hostname" + containers: + - name: clickhouse-pod + image: yandex/clickhouse-server:19.3.7 + volumeMounts: + # Specify reference to volume on local filesystem + - name: local-path + mountPath: /var/lib/clickhouse + ports: + - name: http + containerPort: 8123 + - name: client + containerPort: 9000 + - name: interserver + containerPort: 9009 diff --git a/docs/examples/100-zones-aws-02-pod-per-host-baremetal-localstorage.yaml b/docs/examples/100-zones-aws-02-pod-per-host-baremetal-localstorage.yaml new file mode 100755 index 000000000..0d24f187b --- /dev/null +++ b/docs/examples/100-zones-aws-02-pod-per-host-baremetal-localstorage.yaml @@ -0,0 +1,72 @@ +# +# AWS-specific labels, applicable in 'nodeAffinity' statements +# +# beta.kubernetes.io/arch=amd64 +# beta.kubernetes.io/instance-type=t2.medium +# beta.kubernetes.io/os=linux +# +# failure-domain.beta.kubernetes.io/region=us-east-1 +# failure-domain.beta.kubernetes.io/zone=us-east-1a +# +# kubernetes.io/hostname=ip-172-20-37-97.ec2.internal +# kubernetes.io/role=node +# node-role.kubernetes.io/node= +# +# kops.k8s.io/instancegroup=nodes2 +# +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" +metadata: + name: "ch-localstorage" +spec: + defaults: + templates: + podTemplate: clickhouse-per-host-in-baremetal-localstorage + configuration: + clusters: + - name: ch-localstorage + layout: + shardsCount: 2 + templates: + podTemplates: + # Specify Pod Templates with affinity + - name: clickhouse-per-host-in-baremetal-localstorage + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "clickhouse" + operator: In + values: + - "allow" + # Specify Pod anti-affinity to Pods with the same label "/app" on the same "hostname" + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "clickhouse.altinity.com/app" + operator: In + values: + - "chop" + topologyKey: "kubernetes.io/hostname" + volumes: + # Specify volume as path on local filesystem as a directory which will be created, if need be + - name: local-path + hostPath: + path: /mnt/data/clickhouse-test + type: DirectoryOrCreate + containers: + - name: clickhouse-pod + image: yandex/clickhouse-server:19.3.7 + ports: + - name: http + containerPort: 8123 + - name: client + containerPort: 9000 + - name: interserver + containerPort: 9009 + volumeMounts: + - name: local-path + mountPath: /var/lib/clickhouse diff --git a/docs/examples/100-zones-aws-02-pod-per-host-baremetal.yaml b/docs/examples/100-zones-aws-02-pod-per-host-baremetal.yaml new file mode 100755 index 000000000..20ca775f4 --- /dev/null +++ b/docs/examples/100-zones-aws-02-pod-per-host-baremetal.yaml @@ -0,0 +1,75 @@ +# +# AWS-specific labels, applicable in 'nodeAffinity' statements +# +# beta.kubernetes.io/arch=amd64 +# beta.kubernetes.io/instance-type=t2.medium +# beta.kubernetes.io/os=linux +# +# failure-domain.beta.kubernetes.io/region=us-east-1 +# failure-domain.beta.kubernetes.io/zone=us-east-1a +# +# kubernetes.io/hostname=ip-172-20-37-97.ec2.internal +# kubernetes.io/role=node +# node-role.kubernetes.io/node= +# +# kops.k8s.io/instancegroup=nodes2 +# +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" +metadata: + name: "zones-pod-host" +spec: + defaults: + templates: + podTemplate: clickhouse-per-host-in-baremetal + volumeClaimTemplate: storage-vc-template + configuration: + clusters: + - name: zoned-cluster + layout: + shardsCount: 2 + templates: + podTemplates: + # Specify Pod Templates with affinity + - name: clickhouse-per-host-in-baremetal + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "clickhouse" + operator: In + values: + - "allow" + # Specify Pod anti-affinity to Pods with the same label "/app" on the same "hostname" + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "clickhouse.altinity.com/app" + operator: In + values: + - "chop" + topologyKey: "kubernetes.io/hostname" + containers: + - name: clickhouse-pod + image: yandex/clickhouse-server:19.3.7 + ports: + - name: http + containerPort: 8123 + - name: client + containerPort: 9000 + - name: interserver + containerPort: 9009 + volumeMounts: + - name: storage-vc-template + mountPath: /var/lib/clickhouse + volumeClaimTemplates: + - name: storage-vc-template + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi diff --git a/manifests/operator/clickhouse-operator-install.yaml b/manifests/operator/clickhouse-operator-install.yaml index d9d1912f1..8b3481596 100644 --- a/manifests/operator/clickhouse-operator-install.yaml +++ b/manifests/operator/clickhouse-operator-install.yaml @@ -1,7 +1,7 @@ # Possible Template Parameters: # -# kube-system -# altinity/clickhouse-operator:latest +# clickhouse-dev-mfs +# sunsingerus/clickhouse-operator:dev # # Setup CustomResourceDefinition # CustomResourceDefinition is namespace-less and must have unique name @@ -195,8 +195,8 @@ spec: --- # Possible Template Parameters: # -# kube-system -# altinity/clickhouse-operator:latest +# clickhouse-dev-mfs +# sunsingerus/clickhouse-operator:dev # # Setup ServiceAccount # ServiceAccount would be created in kubectl-specified namespace @@ -204,7 +204,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: clickhouse-operator - namespace: kube-system + namespace: clickhouse-dev-mfs --- # Setup ClusterRoleBinding between ClusterRole and ServiceAccount. # ClusterRoleBinding is namespace-less and must have unique name @@ -219,7 +219,7 @@ roleRef: subjects: - kind: ServiceAccount name: clickhouse-operator - namespace: kube-system + namespace: clickhouse-dev-mfs --- # Setup ClusterIP Service to provide monitoring metrics for Prometheus # Service would be created in kubectl-specified namespace @@ -230,7 +230,7 @@ kind: Service apiVersion: v1 metadata: name: clickhouse-operator-metrics - namespace: kube-system + namespace: clickhouse-dev-mfs labels: app: clickhouse-operator spec: @@ -242,15 +242,15 @@ spec: --- # Possible Template Parameters: # -# kube-system -# altinity/clickhouse-operator:latest +# clickhouse-dev-mfs +# sunsingerus/clickhouse-operator:dev # etc-clickhouse-operator-files # apiVersion: v1 kind: ConfigMap metadata: name: etc-clickhouse-operator-files - namespace: kube-system + namespace: clickhouse-dev-mfs data: config.yaml: | # Namespaces where clickhouse-operator listens for events. @@ -341,28 +341,28 @@ data: --- # Possible Template Parameters: # -# kube-system -# altinity/clickhouse-operator:latest +# clickhouse-dev-mfs +# sunsingerus/clickhouse-operator:dev # etc-clickhouse-operator-confd-files # apiVersion: v1 kind: ConfigMap metadata: name: etc-clickhouse-operator-confd-files - namespace: kube-system + namespace: clickhouse-dev-mfs data: --- # Possible Template Parameters: # -# kube-system -# altinity/clickhouse-operator:latest +# clickhouse-dev-mfs +# sunsingerus/clickhouse-operator:dev # etc-clickhouse-operator-configd-files # apiVersion: v1 kind: ConfigMap metadata: name: etc-clickhouse-operator-configd-files - namespace: kube-system + namespace: clickhouse-dev-mfs data: 01-clickhouse-operator-listen.xml: | @@ -382,28 +382,28 @@ data: --- # Possible Template Parameters: # -# kube-system -# altinity/clickhouse-operator:latest +# clickhouse-dev-mfs +# sunsingerus/clickhouse-operator:dev # etc-clickhouse-operator-templatesd-files # apiVersion: v1 kind: ConfigMap metadata: name: etc-clickhouse-operator-templatesd-files - namespace: kube-system + namespace: clickhouse-dev-mfs data: --- # Possible Template Parameters: # -# kube-system -# altinity/clickhouse-operator:latest +# clickhouse-dev-mfs +# sunsingerus/clickhouse-operator:dev # etc-clickhouse-operator-usersd-files # apiVersion: v1 kind: ConfigMap metadata: name: etc-clickhouse-operator-usersd-files - namespace: kube-system + namespace: clickhouse-dev-mfs data: 01-clickhouse-operator-user.xml: | @@ -424,8 +424,8 @@ data: --- # Possible Template Parameters: # -# kube-system -# altinity/clickhouse-operator:latest +# clickhouse-dev-mfs +# sunsingerus/clickhouse-operator:dev # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -433,7 +433,7 @@ kind: Deployment apiVersion: apps/v1 metadata: name: clickhouse-operator - namespace: kube-system + namespace: clickhouse-dev-mfs labels: app: clickhouse-operator spec: @@ -465,7 +465,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:latest + image: sunsingerus/clickhouse-operator:dev imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder From bc9a66730870c42420e86db1e6c70927e4bfa94b Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 6 Jun 2019 23:15:30 +0300 Subject: [PATCH 06/31] dev: Affinity shortcuts in PodTemplate --- ...ed-01-aws-pods-in-availability-zones.yaml} | 0 ...0-zones-advanced-02-aws-pod-per-host.yaml} | 0 ...-local-storage-02-advanced-host-path.yaml} | 0 ...ouse-operator-template-01-section-crd.yaml | 15 ++ .../operator/clickhouse-operator-install.yaml | 67 +++--- pkg/apis/clickhouse.altinity.com/v1/types.go | 11 +- .../v1/zz_generated.deepcopy.go | 22 ++ pkg/model/const.go | 5 + pkg/model/normalizer.go | 197 ++++++++++++++++++ 9 files changed, 289 insertions(+), 28 deletions(-) rename docs/examples/{10-zones-aws-01-pods-in-availability-zones.yaml => 10-zones-advanced-01-aws-pods-in-availability-zones.yaml} (100%) rename docs/examples/{10-zones-aws-02-pod-per-host.yaml => 10-zones-advanced-02-aws-pod-per-host.yaml} (100%) rename docs/examples/{11-local-storage-01-host-path.yaml => 11-local-storage-02-advanced-host-path.yaml} (100%) diff --git a/docs/examples/10-zones-aws-01-pods-in-availability-zones.yaml b/docs/examples/10-zones-advanced-01-aws-pods-in-availability-zones.yaml similarity index 100% rename from docs/examples/10-zones-aws-01-pods-in-availability-zones.yaml rename to docs/examples/10-zones-advanced-01-aws-pods-in-availability-zones.yaml diff --git a/docs/examples/10-zones-aws-02-pod-per-host.yaml b/docs/examples/10-zones-advanced-02-aws-pod-per-host.yaml similarity index 100% rename from docs/examples/10-zones-aws-02-pod-per-host.yaml rename to docs/examples/10-zones-advanced-02-aws-pod-per-host.yaml diff --git a/docs/examples/11-local-storage-01-host-path.yaml b/docs/examples/11-local-storage-02-advanced-host-path.yaml similarity index 100% rename from docs/examples/11-local-storage-01-host-path.yaml rename to docs/examples/11-local-storage-02-advanced-host-path.yaml diff --git a/manifests/dev/clickhouse-operator-template-01-section-crd.yaml b/manifests/dev/clickhouse-operator-template-01-section-crd.yaml index f5c75dd60..0fc6efa92 100644 --- a/manifests/dev/clickhouse-operator-template-01-section-crd.yaml +++ b/manifests/dev/clickhouse-operator-template-01-section-crd.yaml @@ -194,6 +194,21 @@ spec: properies: name: type: string + zone: + type: object + required: + - values + properties: + key: + type: string + values: + type: array + items: + type: string + distribution: + type: string + enum: + - OnePerHost spec: # TODO specify PodSpec type: object diff --git a/manifests/operator/clickhouse-operator-install.yaml b/manifests/operator/clickhouse-operator-install.yaml index e5b469c9e..162b2459f 100644 --- a/manifests/operator/clickhouse-operator-install.yaml +++ b/manifests/operator/clickhouse-operator-install.yaml @@ -1,7 +1,7 @@ # Possible Template Parameters: # -# clickhouse-dev-mfs -# sunsingerus/clickhouse-operator:dev +# kube-system +# altinity/clickhouse-operator:latest # # Setup CustomResourceDefinition # CustomResourceDefinition is namespace-less and must have unique name @@ -194,14 +194,29 @@ spec: properies: name: type: string + zone: + type: object + required: + - values + properties: + key: + type: string + values: + type: array + items: + type: string + distribution: + type: string + enum: + - OnePerHost spec: # TODO specify PodSpec type: object --- # Possible Template Parameters: # -# clickhouse-dev-mfs -# sunsingerus/clickhouse-operator:dev +# kube-system +# altinity/clickhouse-operator:latest # # Setup ServiceAccount # ServiceAccount would be created in kubectl-specified namespace @@ -209,7 +224,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: clickhouse-operator - namespace: clickhouse-dev-mfs + namespace: kube-system --- # Setup ClusterRoleBinding between ClusterRole and ServiceAccount. # ClusterRoleBinding is namespace-less and must have unique name @@ -224,7 +239,7 @@ roleRef: subjects: - kind: ServiceAccount name: clickhouse-operator - namespace: clickhouse-dev-mfs + namespace: kube-system --- # Setup ClusterIP Service to provide monitoring metrics for Prometheus # Service would be created in kubectl-specified namespace @@ -235,7 +250,7 @@ kind: Service apiVersion: v1 metadata: name: clickhouse-operator-metrics - namespace: clickhouse-dev-mfs + namespace: kube-system labels: app: clickhouse-operator spec: @@ -247,15 +262,15 @@ spec: --- # Possible Template Parameters: # -# clickhouse-dev-mfs -# sunsingerus/clickhouse-operator:dev +# kube-system +# altinity/clickhouse-operator:latest # etc-clickhouse-operator-files # apiVersion: v1 kind: ConfigMap metadata: name: etc-clickhouse-operator-files - namespace: clickhouse-dev-mfs + namespace: kube-system data: config.yaml: | # Namespaces where clickhouse-operator listens for events. @@ -346,28 +361,28 @@ data: --- # Possible Template Parameters: # -# clickhouse-dev-mfs -# sunsingerus/clickhouse-operator:dev +# kube-system +# altinity/clickhouse-operator:latest # etc-clickhouse-operator-confd-files # apiVersion: v1 kind: ConfigMap metadata: name: etc-clickhouse-operator-confd-files - namespace: clickhouse-dev-mfs + namespace: kube-system data: --- # Possible Template Parameters: # -# clickhouse-dev-mfs -# sunsingerus/clickhouse-operator:dev +# kube-system +# altinity/clickhouse-operator:latest # etc-clickhouse-operator-configd-files # apiVersion: v1 kind: ConfigMap metadata: name: etc-clickhouse-operator-configd-files - namespace: clickhouse-dev-mfs + namespace: kube-system data: 01-clickhouse-operator-listen.xml: | @@ -387,28 +402,28 @@ data: --- # Possible Template Parameters: # -# clickhouse-dev-mfs -# sunsingerus/clickhouse-operator:dev +# kube-system +# altinity/clickhouse-operator:latest # etc-clickhouse-operator-templatesd-files # apiVersion: v1 kind: ConfigMap metadata: name: etc-clickhouse-operator-templatesd-files - namespace: clickhouse-dev-mfs + namespace: kube-system data: --- # Possible Template Parameters: # -# clickhouse-dev-mfs -# sunsingerus/clickhouse-operator:dev +# kube-system +# altinity/clickhouse-operator:latest # etc-clickhouse-operator-usersd-files # apiVersion: v1 kind: ConfigMap metadata: name: etc-clickhouse-operator-usersd-files - namespace: clickhouse-dev-mfs + namespace: kube-system data: 01-clickhouse-operator-user.xml: | @@ -429,8 +444,8 @@ data: --- # Possible Template Parameters: # -# clickhouse-dev-mfs -# sunsingerus/clickhouse-operator:dev +# kube-system +# altinity/clickhouse-operator:latest # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -438,7 +453,7 @@ kind: Deployment apiVersion: apps/v1 metadata: name: clickhouse-operator - namespace: clickhouse-dev-mfs + namespace: kube-system labels: app: clickhouse-operator spec: @@ -470,7 +485,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: sunsingerus/clickhouse-operator:dev + image: altinity/clickhouse-operator:latest imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go index 6ac017cbb..27e57879d 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse.altinity.com/v1/types.go @@ -169,8 +169,15 @@ type ChiTemplates struct { // ChiPodTemplate defines full Pod Template, directly used by StatefulSet type ChiPodTemplate struct { - Name string `json:"name" yaml:"name"` - Spec corev1.PodSpec `json:"spec" yaml:"spec"` + Name string `json:"name" yaml:"name"` + Zone ChiPodTemplateZone `json:"zone" yaml:"zone""` + Distribution string `json:"distribution" yaml:"distribution"` + Spec corev1.PodSpec `json:"spec" yaml:"spec"` +} + +type ChiPodTemplateZone struct { + Key string `json:"key" yaml:"key"` + Values []string `json:"values" yaml:"values"` } // ChiVolumeClaimTemplate defines PersistentVolumeClaim Template, directly used by StatefulSet diff --git a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go index 54731523e..0c0ff06de 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go +++ b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go @@ -196,6 +196,7 @@ func (in *ChiLayout) DeepCopy() *ChiLayout { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ChiPodTemplate) DeepCopyInto(out *ChiPodTemplate) { *out = *in + in.Zone.DeepCopyInto(&out.Zone) in.Spec.DeepCopyInto(&out.Spec) return } @@ -210,6 +211,27 @@ func (in *ChiPodTemplate) DeepCopy() *ChiPodTemplate { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChiPodTemplateZone) DeepCopyInto(out *ChiPodTemplateZone) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiPodTemplateZone. +func (in *ChiPodTemplateZone) DeepCopy() *ChiPodTemplateZone { + if in == nil { + return nil + } + out := new(ChiPodTemplateZone) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ChiReplica) DeepCopyInto(out *ChiReplica) { *out = *in diff --git a/pkg/model/const.go b/pkg/model/const.go index 68ce40ae6..c03f10ec2 100644 --- a/pkg/model/const.go +++ b/pkg/model/const.go @@ -163,3 +163,8 @@ const ( // Default value for ClusterIP service templateDefaultsServiceClusterIP = "None" ) + +const ( + podDistributionOnePerHost = "OnePerHost" + podDistributionUnspecified = "Unspecified" +) diff --git a/pkg/model/normalizer.go b/pkg/model/normalizer.go index f07c0cb21..3d2ecd6e8 100644 --- a/pkg/model/normalizer.go +++ b/pkg/model/normalizer.go @@ -18,6 +18,8 @@ import ( chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopconfig "github.com/altinity/clickhouse-operator/pkg/config" "github.com/altinity/clickhouse-operator/pkg/util" + "k8s.io/api/core/v1" + v12 "k8s.io/apimachinery/pkg/apis/meta/v1" "regexp" "strconv" @@ -107,6 +109,31 @@ func (n *Normalizer) doTemplates(templates *chiv1.ChiTemplates) { // doPodTemplate normalizes .spec.templates.podTemplates func (n *Normalizer) doPodTemplate(template *chiv1.ChiPodTemplate) { + // Name + + // Zone + if len(template.Zone.Values) == 0 { + // In case no values specified - no key is reasonable + template.Zone.Key = "" + } else if template.Zone.Key == "" { + // We have values specified, but no key + // Use default zone key in this case + template.Zone.Key = "failure-domain.beta.kubernetes.io/zone" + } else { + // We have both key and value(s) specified explicitly + } + + // Distribution + if template.Distribution == podDistributionOnePerHost { + // Known distribution, all is fine + } else { + template.Distribution = podDistributionUnspecified + } + + // Spec + template.Spec.Affinity = n.mergeAffinity(template.Spec.Affinity, n.buildAffinity(template)) + + // Introduce PodTemplate into Index // Ensure map is in place if n.chi.Spec.Templates.PodTemplatesIndex == nil { n.chi.Spec.Templates.PodTemplatesIndex = make(map[string]*chiv1.ChiPodTemplate) @@ -115,6 +142,176 @@ func (n *Normalizer) doPodTemplate(template *chiv1.ChiPodTemplate) { n.chi.Spec.Templates.PodTemplatesIndex[template.Name] = template } +func (n *Normalizer) buildAffinity(template *chiv1.ChiPodTemplate) *v1.Affinity { + nodeAffinity := n.buildNodeAffinity(template) + podAntiAffinity := n.buildPodAntiAffinity(template) + + if nodeAffinity == nil && podAntiAffinity == nil { + return nil + } else { + return &v1.Affinity{ + NodeAffinity: nodeAffinity, + PodAffinity: nil, + PodAntiAffinity: podAntiAffinity, + } + } +} + +func (n *Normalizer) mergeAffinity(dst *v1.Affinity, src *v1.Affinity) *v1.Affinity { + if src == nil { + // Nothing to merge from + return dst + } + + if dst == nil { + // No receiver, allocate new one + dst = &v1.Affinity{ + NodeAffinity: n.mergeNodeAffinity(nil, src.NodeAffinity), + PodAffinity: src.PodAffinity, + PodAntiAffinity: n.mergePodAntiAffinity(nil, src.PodAntiAffinity), + } + } + + return dst +} + +func (n *Normalizer) buildNodeAffinity(template *chiv1.ChiPodTemplate) *v1.NodeAffinity { + if template.Zone.Key == "" { + return nil + } else { + return &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + // A list of node selector requirements by node's labels. + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: template.Zone.Key, + Operator: v1.NodeSelectorOpIn, + Values: template.Zone.Values, + }, + }, + // A list of node selector requirements by node's fields. + //MatchFields: []v1.NodeSelectorRequirement{ + // v1.NodeSelectorRequirement{}, + //}, + }, + }, + }, + + PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{}, + } + } +} + +func (n *Normalizer) mergeNodeAffinity(dst *v1.NodeAffinity, src *v1.NodeAffinity) *v1.NodeAffinity { + if src == nil { + // Nothing to merge from + return dst + } + + // Check NodeSelectors are available + if src.RequiredDuringSchedulingIgnoredDuringExecution == nil { + return dst + } + if len(src.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 { + return dst + } + + if dst == nil { + // No receiver, allocate new one + dst = &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{}, + }, + PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{}, + } + } + + // Copy NodeSelectors + for i := range src.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { + dst.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append( + dst.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + src.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[i], + ) + } + + // Copy PreferredSchedulingTerm + for i := range src.PreferredDuringSchedulingIgnoredDuringExecution { + dst.PreferredDuringSchedulingIgnoredDuringExecution = append( + dst.PreferredDuringSchedulingIgnoredDuringExecution, + src.PreferredDuringSchedulingIgnoredDuringExecution[i], + ) + } + + return dst +} + +func (n *Normalizer) buildPodAntiAffinity(template *chiv1.ChiPodTemplate) *v1.PodAntiAffinity { + if template.Distribution == podDistributionOnePerHost { + return &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &v12.LabelSelector{ + // A list of node selector requirements by node's labels. + MatchExpressions: []v12.LabelSelectorRequirement{ + { + Key: LabelApp, + Operator: v12.LabelSelectorOpIn, + Values: []string{ + LabelAppValue, + }, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + + PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{}, + } + } else { + return nil + } +} + +func (n *Normalizer) mergePodAntiAffinity(dst *v1.PodAntiAffinity, src *v1.PodAntiAffinity) *v1.PodAntiAffinity { + if src == nil { + // Nothing to merge from + return dst + } + + if len(src.RequiredDuringSchedulingIgnoredDuringExecution) == 0 { + return dst + } + + if dst == nil { + // No receiver, allocate new one + dst = &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{}, + PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{}, + } + } + + // Copy PodAffinityTerm + for i := range src.RequiredDuringSchedulingIgnoredDuringExecution { + dst.RequiredDuringSchedulingIgnoredDuringExecution = append( + dst.RequiredDuringSchedulingIgnoredDuringExecution, + src.RequiredDuringSchedulingIgnoredDuringExecution[i], + ) + } + + // Copy WeightedPodAffinityTerm + for i := range src.PreferredDuringSchedulingIgnoredDuringExecution { + dst.PreferredDuringSchedulingIgnoredDuringExecution = append( + dst.PreferredDuringSchedulingIgnoredDuringExecution, + src.PreferredDuringSchedulingIgnoredDuringExecution[i], + ) + } + + return dst +} + // doVolumeClaimTemplate normalizes .spec.templates.volumeClaimTemplates func (n *Normalizer) doVolumeClaimTemplate(template *chiv1.ChiVolumeClaimTemplate) { // Check name From e0fcc8f180d6fcb0056fe549fa3f1b60154d39b4 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 6 Jun 2019 23:17:00 +0300 Subject: [PATCH 07/31] env: new examples for affinity shortcuts and examples rearrangement --- ...ple-01-aws-pods-in-availability-zones.yaml | 75 +++++++++++++++++++ ...0-zones-01-simple-02-aws-pod-per-host.yaml | 49 ++++++++++++ ...ed-01-aws-pods-in-availability-zones.yaml} | 0 ...ones-02-advanced-02-aws-pod-per-host.yaml} | 1 - .../11-local-storage-01-simple-host-path.yaml | 61 +++++++++++++++ 5 files changed, 185 insertions(+), 1 deletion(-) create mode 100644 docs/examples/10-zones-01-simple-01-aws-pods-in-availability-zones.yaml create mode 100644 docs/examples/10-zones-01-simple-02-aws-pod-per-host.yaml rename docs/examples/{10-zones-advanced-01-aws-pods-in-availability-zones.yaml => 10-zones-02-advanced-01-aws-pods-in-availability-zones.yaml} (100%) rename docs/examples/{10-zones-advanced-02-aws-pod-per-host.yaml => 10-zones-02-advanced-02-aws-pod-per-host.yaml} (99%) create mode 100644 docs/examples/11-local-storage-01-simple-host-path.yaml diff --git a/docs/examples/10-zones-01-simple-01-aws-pods-in-availability-zones.yaml b/docs/examples/10-zones-01-simple-01-aws-pods-in-availability-zones.yaml new file mode 100644 index 000000000..75215ece7 --- /dev/null +++ b/docs/examples/10-zones-01-simple-01-aws-pods-in-availability-zones.yaml @@ -0,0 +1,75 @@ +# +# AWS-specific labels, applicable in 'nodeAffinity' statements +# +# beta.kubernetes.io/arch=amd64 +# beta.kubernetes.io/instance-type=t2.medium +# beta.kubernetes.io/os=linux +# +# failure-domain.beta.kubernetes.io/region=us-east-1 +# failure-domain.beta.kubernetes.io/zone=us-east-1a +# +# kubernetes.io/hostname=ip-172-20-37-97.ec2.internal +# kubernetes.io/role=node +# node-role.kubernetes.io/node= +# +# kops.k8s.io/instancegroup=nodes2 +# +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" +metadata: + name: "aws-zones" +spec: + configuration: + clusters: + - name: zoned-cluster + layout: + shards: + - replicas: + - templates: + podTemplate: clickhouse-in-zone-us-east-1a + - templates: + podTemplate: clickhouse-in-zone-us-east-1a + - templates: + podTemplate: clickhouse-in-zone-us-east-1a + - templates: + podTemplate: clickhouse-in-zone-us-east-1b + - templates: + podTemplate: clickhouse-in-zone-us-east-1b + - templates: + podTemplate: clickhouse-in-zone-us-east-1b + + templates: + podTemplates: + # Specify Pod Templates with affinity + + - name: clickhouse-in-zone-us-east-1a + zone: + values: + - "us-east-1a" + spec: + containers: + - name: clickhouse-pod + image: yandex/clickhouse-server:19.3.7 + ports: + - name: http + containerPort: 8123 + - name: client + containerPort: 9000 + - name: interserver + containerPort: 9009 + + - name: clickhouse-in-zone-us-east-1b + zone: + values: + - "us-east-1b" + spec: + containers: + - name: clickhouse-pod + image: yandex/clickhouse-server:19.3.7 + ports: + - name: http + containerPort: 8123 + - name: client + containerPort: 9000 + - name: interserver + containerPort: 9009 diff --git a/docs/examples/10-zones-01-simple-02-aws-pod-per-host.yaml b/docs/examples/10-zones-01-simple-02-aws-pod-per-host.yaml new file mode 100644 index 000000000..c91f45389 --- /dev/null +++ b/docs/examples/10-zones-01-simple-02-aws-pod-per-host.yaml @@ -0,0 +1,49 @@ +# +# AWS-specific labels, applicable in 'nodeAffinity' statements +# +# beta.kubernetes.io/arch=amd64 +# beta.kubernetes.io/instance-type=t2.medium +# beta.kubernetes.io/os=linux +# +# failure-domain.beta.kubernetes.io/region=us-east-1 +# failure-domain.beta.kubernetes.io/zone=us-east-1a +# +# kubernetes.io/hostname=ip-172-20-37-97.ec2.internal +# kubernetes.io/role=node +# node-role.kubernetes.io/node= +# +# kops.k8s.io/instancegroup=nodes2 +# +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" +metadata: + name: "zones-pod-host" +spec: + defaults: + templates: + podTemplate: clickhouse-per-host-in-zone-us-east-1a + configuration: + clusters: + - name: zoned-cluster + layout: + shardsCount: 3 + + templates: + podTemplates: + # Specify Pod Templates with affinity + - name: clickhouse-per-host-in-zone-us-east-1a + zone: + values: + - "us-east-1a" + distribution: "OnePerHost" + spec: + containers: + - name: clickhouse-pod + image: yandex/clickhouse-server:19.3.7 + ports: + - name: http + containerPort: 8123 + - name: client + containerPort: 9000 + - name: interserver + containerPort: 9009 diff --git a/docs/examples/10-zones-advanced-01-aws-pods-in-availability-zones.yaml b/docs/examples/10-zones-02-advanced-01-aws-pods-in-availability-zones.yaml similarity index 100% rename from docs/examples/10-zones-advanced-01-aws-pods-in-availability-zones.yaml rename to docs/examples/10-zones-02-advanced-01-aws-pods-in-availability-zones.yaml diff --git a/docs/examples/10-zones-advanced-02-aws-pod-per-host.yaml b/docs/examples/10-zones-02-advanced-02-aws-pod-per-host.yaml similarity index 99% rename from docs/examples/10-zones-advanced-02-aws-pod-per-host.yaml rename to docs/examples/10-zones-02-advanced-02-aws-pod-per-host.yaml index 3d6d94b2c..dd84ba182 100644 --- a/docs/examples/10-zones-advanced-02-aws-pod-per-host.yaml +++ b/docs/examples/10-zones-02-advanced-02-aws-pod-per-host.yaml @@ -31,7 +31,6 @@ spec: templates: podTemplates: # Specify Pod Templates with affinity - - name: clickhouse-per-host-in-zone-us-east-1a spec: affinity: diff --git a/docs/examples/11-local-storage-01-simple-host-path.yaml b/docs/examples/11-local-storage-01-simple-host-path.yaml new file mode 100644 index 000000000..9265d8152 --- /dev/null +++ b/docs/examples/11-local-storage-01-simple-host-path.yaml @@ -0,0 +1,61 @@ +# +# AWS-specific labels, applicable in 'nodeAffinity' statements +# +# beta.kubernetes.io/arch=amd64 +# beta.kubernetes.io/instance-type=t2.medium +# beta.kubernetes.io/os=linux +# +# failure-domain.beta.kubernetes.io/region=us-east-1 +# failure-domain.beta.kubernetes.io/zone=us-east-1a +# +# kubernetes.io/hostname=ip-172-20-37-97.ec2.internal +# kubernetes.io/role=node +# node-role.kubernetes.io/node= +# +# kops.k8s.io/instancegroup=nodes2 +# +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" +metadata: + name: "volume-hostpath" +spec: + defaults: + templates: + podTemplate: clickhouse-per-host-on-servers-with-ssd + configuration: + clusters: + - name: local-storage + layout: + shardsCount: 3 + + templates: + podTemplates: + # Specify Pod Templates with affinity + + - name: clickhouse-per-host-on-servers-with-ssd + zone: + key: "disktype" + values: + - "ssd" + distribution: "OnePerHost" + spec: + volumes: + # Specify volume as path on local filesystem as a directory which will be created, if need be + - name: local-path + hostPath: + path: /mnt/podvolume + type: DirectoryOrCreate + containers: + - name: clickhouse-pod + image: yandex/clickhouse-server:19.3.7 + volumeMounts: + # Specify reference to volume on local filesystem + - name: local-path + mountPath: /var/lib/clickhouse + ports: + - name: http + containerPort: 8123 + - name: client + containerPort: 9000 + - name: interserver + containerPort: 9009 From a336f7324935e12bbb3e5b8671bd070bb092e681 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 7 Jun 2019 17:55:47 +0300 Subject: [PATCH 08/31] dev: Introduce Service Template --- ...ouse-operator-template-01-section-crd.yaml | 57 ++++++--- .../operator/clickhouse-operator-install.yaml | 57 ++++++--- .../clickhouse.altinity.com/v1/type_chi.go | 10 +- pkg/apis/clickhouse.altinity.com/v1/types.go | 11 +- .../v1/zz_generated.deepcopy.go | 36 ++++++ pkg/model/creator.go | 118 +++++++++++------- pkg/model/normalizer.go | 18 +++ pkg/util/map.go | 11 ++ 8 files changed, 232 insertions(+), 86 deletions(-) diff --git a/manifests/dev/clickhouse-operator-template-01-section-crd.yaml b/manifests/dev/clickhouse-operator-template-01-section-crd.yaml index 0fc6efa92..928a4ada9 100644 --- a/manifests/dev/clickhouse-operator-template-01-section-crd.yaml +++ b/manifests/dev/clickhouse-operator-template-01-section-crd.yaml @@ -60,6 +60,8 @@ spec: type: string volumeClaimTemplate: type: string + serviceTemplate: + type: string configuration: type: object properties: @@ -109,6 +111,8 @@ spec: type: string volumeClaimTemplate: type: string + serviceTemplate: + type: string layout: type: object properties: @@ -145,6 +149,8 @@ spec: type: string volumeClaimTemplate: type: string + serviceTemplate: + type: string replicas: type: array items: @@ -163,27 +169,11 @@ spec: type: string volumeClaimTemplate: type: string + serviceTemplate: + type: string templates: type: object properties: - volumeClaimTemplates: - type: array - items: - type: object - required: - - name - - spec - properties: - name: - type: string - reclaimPolicy: - type: string - enum: - - Retain - - Delete - spec: - # TODO specify PersistentVolumeClaimSpec - type: object podTemplates: type: array items: @@ -212,3 +202,34 @@ spec: spec: # TODO specify PodSpec type: object + volumeClaimTemplates: + type: array + items: + type: object + required: + - name + - spec + properties: + name: + type: string + reclaimPolicy: + type: string + enum: + - Retain + - Delete + spec: + # TODO specify PersistentVolumeClaimSpec + type: object + serviceTemplates: + type: array + items: + type: object + required: + - name + - spec + properties: + name: + type: string + spec: + # TODO specify ServiceSpec + type: object diff --git a/manifests/operator/clickhouse-operator-install.yaml b/manifests/operator/clickhouse-operator-install.yaml index 162b2459f..ebbfb52a3 100644 --- a/manifests/operator/clickhouse-operator-install.yaml +++ b/manifests/operator/clickhouse-operator-install.yaml @@ -60,6 +60,8 @@ spec: type: string volumeClaimTemplate: type: string + serviceTemplate: + type: string configuration: type: object properties: @@ -109,6 +111,8 @@ spec: type: string volumeClaimTemplate: type: string + serviceTemplate: + type: string layout: type: object properties: @@ -145,6 +149,8 @@ spec: type: string volumeClaimTemplate: type: string + serviceTemplate: + type: string replicas: type: array items: @@ -163,27 +169,11 @@ spec: type: string volumeClaimTemplate: type: string + serviceTemplate: + type: string templates: type: object properties: - volumeClaimTemplates: - type: array - items: - type: object - required: - - name - - spec - properties: - name: - type: string - reclaimPolicy: - type: string - enum: - - Retain - - Delete - spec: - # TODO specify PersistentVolumeClaimSpec - type: object podTemplates: type: array items: @@ -212,6 +202,37 @@ spec: spec: # TODO specify PodSpec type: object + volumeClaimTemplates: + type: array + items: + type: object + required: + - name + - spec + properties: + name: + type: string + reclaimPolicy: + type: string + enum: + - Retain + - Delete + spec: + # TODO specify PersistentVolumeClaimSpec + type: object + serviceTemplates: + type: array + items: + type: object + required: + - name + - spec + properties: + name: + type: string + spec: + # TODO specify ServiceSpec + type: object --- # Possible Template Parameters: # diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go index e817ddfec..0a88e455e 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go @@ -288,14 +288,20 @@ func (chi *ClickHouseInstallation) ReplicasCount() int { return count } -// GetVolumeClaimTemplate gets VolumeClaimTemplate by name +// GetVolumeClaimTemplate gets ChiVolumeClaimTemplate by name func (chi *ClickHouseInstallation) GetVolumeClaimTemplate(name string) (*ChiVolumeClaimTemplate, bool) { volumeClaimTemplate, ok := chi.Spec.Templates.VolumeClaimTemplatesIndex[name] return volumeClaimTemplate, ok } -// GetPodTemplate gets PodTemplate by name +// GetPodTemplate gets ChiPodTemplate by name func (chi *ClickHouseInstallation) GetPodTemplate(name string) (*ChiPodTemplate, bool) { podTemplate, ok := chi.Spec.Templates.PodTemplatesIndex[name] return podTemplate, ok } + +// GetServiceTemplate gets ChiServiceTemplate by name +func (chi *ClickHouseInstallation) GetServiceTemplate(name string) (*ChiServiceTemplate, bool) { + podTemplate, ok := chi.Spec.Templates.ServiceTemplatesIndex[name] + return podTemplate, ok +} diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go index 27e57879d..8ecb54461 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse.altinity.com/v1/types.go @@ -57,8 +57,9 @@ type ChiDefaults struct { // ChiTemplateNames defines references to .spec.templates to be used on current level of cluster type ChiTemplateNames struct { - PodTemplate string `json:"podTemplate,omitempty" yaml:"podTemplate"` + PodTemplate string `json:"podTemplate,omitempty" yaml:"podTemplate"` VolumeClaimTemplate string `json:"volumeClaimTemplate,omitempty" yaml:"volumeClaimTemplate"` + ServiceTemplate string `json:"serviceTemplate" yaml:"serviceTemplate"` } // ChiConfiguration defines configuration section of .spec @@ -161,10 +162,12 @@ type ChiTemplates struct { // Templates PodTemplates []ChiPodTemplate `json:"podTemplates,omitempty" yaml:"podTemplates"` VolumeClaimTemplates []ChiVolumeClaimTemplate `json:"volumeClaimTemplates,omitempty" yaml:"volumeClaimTemplates"` + ServiceTemplates []ChiServiceTemplate `json:"serviceTemplates" yaml:"serviceTemplates"` // Index maps template name to template itself PodTemplatesIndex map[string]*ChiPodTemplate VolumeClaimTemplatesIndex map[string]*ChiVolumeClaimTemplate + ServiceTemplatesIndex map[string]*ChiServiceTemplate } // ChiPodTemplate defines full Pod Template, directly used by StatefulSet @@ -205,6 +208,12 @@ func (v PVCReclaimPolicy) IsValid() bool { return false } +type ChiServiceTemplate struct { + Name string `json:"name" yaml:"name"` + GenerateName string `json:"generateName" yaml:"generateName"` + Spec corev1.ServiceSpec `json:"spec" yaml:"spec"` +} + // ChiDistributedDDL defines distributedDDL section of .spec.defaults type ChiDistributedDDL struct { Profile string `json:"profile,omitempty" yaml:"profile"` diff --git a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go index 0c0ff06de..93c184c46 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go +++ b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go @@ -292,6 +292,23 @@ func (in *ChiReplicaConfig) DeepCopy() *ChiReplicaConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChiServiceTemplate) DeepCopyInto(out *ChiServiceTemplate) { + *out = *in + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiServiceTemplate. +func (in *ChiServiceTemplate) DeepCopy() *ChiServiceTemplate { + if in == nil { + return nil + } + out := new(ChiServiceTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ChiShard) DeepCopyInto(out *ChiShard) { *out = *in @@ -415,6 +432,13 @@ func (in *ChiTemplates) DeepCopyInto(out *ChiTemplates) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ServiceTemplates != nil { + in, out := &in.ServiceTemplates, &out.ServiceTemplates + *out = make([]ChiServiceTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.PodTemplatesIndex != nil { in, out := &in.PodTemplatesIndex, &out.PodTemplatesIndex *out = make(map[string]*ChiPodTemplate, len(*in)) @@ -439,6 +463,18 @@ func (in *ChiTemplates) DeepCopyInto(out *ChiTemplates) { } } } + if in.ServiceTemplatesIndex != nil { + in, out := &in.ServiceTemplatesIndex, &out.ServiceTemplatesIndex + *out = make(map[string]*ChiServiceTemplate, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = new(ChiServiceTemplate) + val.DeepCopyInto((*out)[key]) + } + } + } return } diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 3bdc1faac..4410f4c46 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -17,6 +17,7 @@ package model import ( chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/config" + "github.com/altinity/clickhouse-operator/pkg/util" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -65,54 +66,32 @@ func NewReconciler( // Reconcile runs reconcile process func (r *Reconciler) Reconcile() error { - if err := r.reconcileServiceChi(r.chi); err != nil { + + // Reconcile CHI + if err := r.reconcileChiService(r.chi); err != nil { return err } - if err := r.reconcileConfigMapsChi(); err != nil { + if err := r.reconcileChiConfigMaps(); err != nil { return err } + // Reconcile Clusters if err := r.reconcileReplicas(); err != nil { return err } + return nil } -// reconcileServiceChi reconciles global Services belonging to CHI -func (r *Reconciler) reconcileServiceChi(chi *chiv1.ClickHouseInstallation) error { - - serviceName := CreateChiServiceName(chi) - - glog.V(1).Infof("reconcileServiceObjectChi() for service %s", serviceName) - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: r.chi.Namespace, - Labels: r.labeler.getLabelsCommonObject(), - }, - Spec: corev1.ServiceSpec{ - // ClusterIP: templateDefaultsServiceClusterIP, - Ports: []corev1.ServicePort{ - { - Name: chDefaultHTTPPortName, - Port: chDefaultHTTPPortNumber, - }, - { - Name: chDefaultClientPortName, - Port: chDefaultClientPortNumber, - }, - }, - Selector: r.labeler.getSelectorCommonObject(), - Type: "LoadBalancer", - }, - } - +// reconcileChiService reconciles global Services belonging to CHI +func (r *Reconciler) reconcileChiService(chi *chiv1.ClickHouseInstallation) error { + service := r.createChiService(chi) return r.funcs.ReconcileService(service) } -// reconcileConfigMapsChi reconciles global ConfigMaps belonging to CHI -func (r *Reconciler) reconcileConfigMapsChi() error { +// reconcileChiConfigMaps reconciles global ConfigMaps belonging to CHI +func (r *Reconciler) reconcileChiConfigMaps() error { r.chConfigSectionsGenerator.CreateConfigsUsers() r.chConfigSectionsGenerator.CreateConfigsCommon() @@ -176,19 +155,19 @@ func (r *Reconciler) reconcileReplicas() error { return r.chi.WalkReplicasTillError(replicaProcessor) } -// createService creates new corev1.Service -func (r *Reconciler) createService(replica *chiv1.ChiReplica) *corev1.Service { - serviceName := CreateStatefulSetServiceName(replica) - statefulSetName := CreateStatefulSetName(replica) +// createChiService creates new corev1.Service +func (r *Reconciler) createChiService(chi *chiv1.ClickHouseInstallation) *corev1.Service { + serviceName := CreateChiServiceName(chi) - glog.V(1).Infof("createService(%s):%s", serviceName, statefulSetName) + glog.V(1).Infof("createChiService(%s/%s)", chi.Namespace, serviceName) return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, - Namespace: replica.Address.Namespace, - Labels: r.labeler.getLabelsReplica(replica, false), + Namespace: r.chi.Namespace, + Labels: r.labeler.getLabelsCommonObject(), }, Spec: corev1.ServiceSpec{ + // ClusterIP: templateDefaultsServiceClusterIP, Ports: []corev1.ServicePort{ { Name: chDefaultHTTPPortName, @@ -198,18 +177,63 @@ func (r *Reconciler) createService(replica *chiv1.ChiReplica) *corev1.Service { Name: chDefaultClientPortName, Port: chDefaultClientPortNumber, }, - { - Name: chDefaultInterServerPortName, - Port: chDefaultInterServerPortNumber, - }, }, - Selector: r.labeler.getSelectorReplica(replica), - ClusterIP: templateDefaultsServiceClusterIP, - Type: "ClusterIP", + Selector: r.labeler.getSelectorCommonObject(), + Type: "LoadBalancer", }, } } +// createService creates new corev1.Service +func (r *Reconciler) createService(replica *chiv1.ChiReplica) *corev1.Service { + serviceName := CreateStatefulSetServiceName(replica) + statefulSetName := CreateStatefulSetName(replica) + + glog.V(1).Infof("createService(%s/%s) for Set %s", replica.Address.Namespace, serviceName, statefulSetName) + if template, ok := replica.Chi.GetServiceTemplate(replica.Templates.ServiceTemplate); ok { + // .templates.ServiceTemplate specified + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: replica.Address.Namespace, + Labels: r.labeler.getLabelsReplica(replica, false), + }, + Spec: *template.Spec.DeepCopy(), + } + service.Spec.Selector = util.MergeStringMaps(service.Spec.Selector, r.labeler.getSelectorReplica(replica)) + + return service + } else { + // Incorrect/unknown .templates.ServiceTemplate specified + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: replica.Address.Namespace, + Labels: r.labeler.getLabelsReplica(replica, false), + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: chDefaultHTTPPortName, + Port: chDefaultHTTPPortNumber, + }, + { + Name: chDefaultClientPortName, + Port: chDefaultClientPortNumber, + }, + { + Name: chDefaultInterServerPortName, + Port: chDefaultInterServerPortNumber, + }, + }, + Selector: r.labeler.getSelectorReplica(replica), + ClusterIP: templateDefaultsServiceClusterIP, + Type: "ClusterIP", + }, + } + } +} + // createConfigMap creates new corev1.ConfigMap func (r *Reconciler) createConfigMap(replica *chiv1.ChiReplica) *corev1.ConfigMap { return &corev1.ConfigMap{ diff --git a/pkg/model/normalizer.go b/pkg/model/normalizer.go index 3d2ecd6e8..b0f87c1d9 100644 --- a/pkg/model/normalizer.go +++ b/pkg/model/normalizer.go @@ -105,6 +105,11 @@ func (n *Normalizer) doTemplates(templates *chiv1.ChiTemplates) { vcTemplate := &templates.VolumeClaimTemplates[i] n.doVolumeClaimTemplate(vcTemplate) } + + for i := range templates.ServiceTemplates { + serviceTemplate := &templates.ServiceTemplates[i] + n.doServiceTemplate(serviceTemplate) + } } // doPodTemplate normalizes .spec.templates.podTemplates @@ -328,6 +333,19 @@ func (n *Normalizer) doVolumeClaimTemplate(template *chiv1.ChiVolumeClaimTemplat n.chi.Spec.Templates.VolumeClaimTemplatesIndex[template.Name] = template } +// doServiceTemplate normalizes .spec.templates.serviceTemplates +func (n *Normalizer) doServiceTemplate(template *chiv1.ChiServiceTemplate) { + // Check name + // Check GenerateName + // Check Spec + + // Ensure map is in place + if n.chi.Spec.Templates.ServiceTemplatesIndex == nil { + n.chi.Spec.Templates.ServiceTemplatesIndex = make(map[string]*chiv1.ChiServiceTemplate) + } + n.chi.Spec.Templates.ServiceTemplatesIndex[template.Name] = template +} + // doClusters normalizes clusters func (n *Normalizer) doClusters() { diff --git a/pkg/util/map.go b/pkg/util/map.go index ea52d87d4..74e83a8ec 100644 --- a/pkg/util/map.go +++ b/pkg/util/map.go @@ -26,3 +26,14 @@ func IncludeNonEmpty(dst map[string]string, key, src string) { return } + +// MergeStringMaps inserts (and overwrites) data into dst map object from src +func MergeStringMaps(dst, src map[string]string) map[string]string { + if dst == nil { + dst = make(map[string]string) + } + for key := range src { + dst[key] = src[key] + } + return dst +} From f1f5444d8cef431802cd560827d043f419b0b4ec Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 7 Jun 2019 21:16:14 +0300 Subject: [PATCH 09/31] dev: exctract model reconciler and continue with Service Template --- .../clickhouse.altinity.com/v1/type_chi.go | 65 +++++-- pkg/apis/clickhouse.altinity.com/v1/types.go | 24 +-- pkg/model/creator.go | 147 +-------------- pkg/model/reconciler.go | 170 ++++++++++++++++++ 4 files changed, 246 insertions(+), 160 deletions(-) create mode 100644 pkg/model/reconciler.go diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go index 0a88e455e..37ae0ed11 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go @@ -246,6 +246,39 @@ func (chi *ClickHouseInstallation) WalkReplicasTillError( return nil } +func (chi *ClickHouseInstallation) WalkClusterTillError( + fChi func(chi *ClickHouseInstallation) error, + fCluster func(cluster *ChiCluster) error, + fShard func(shard *ChiShard) error, + fReplica func(replica *ChiReplica) error, +) error { + + if err := fChi(chi); err != nil { + return err + } + + for clusterIndex := range chi.Spec.Configuration.Clusters { + cluster := &chi.Spec.Configuration.Clusters[clusterIndex] + if err := fCluster(cluster); err != nil { + return err + } + for shardIndex := range cluster.Layout.Shards { + shard := &cluster.Layout.Shards[shardIndex] + if err := fShard(shard); err != nil { + return err + } + for replicaIndex := range shard.Replicas { + replica := &shard.Replicas[replicaIndex] + if err := fReplica(replica); err != nil { + return err + } + } + } + } + + return nil +} + func (chi *ClickHouseInstallation) MergeFrom(from *ClickHouseInstallation) { if from == nil { return @@ -288,20 +321,32 @@ func (chi *ClickHouseInstallation) ReplicasCount() int { return count } -// GetVolumeClaimTemplate gets ChiVolumeClaimTemplate by name -func (chi *ClickHouseInstallation) GetVolumeClaimTemplate(name string) (*ChiVolumeClaimTemplate, bool) { - volumeClaimTemplate, ok := chi.Spec.Templates.VolumeClaimTemplatesIndex[name] - return volumeClaimTemplate, ok -} - // GetPodTemplate gets ChiPodTemplate by name func (chi *ClickHouseInstallation) GetPodTemplate(name string) (*ChiPodTemplate, bool) { - podTemplate, ok := chi.Spec.Templates.PodTemplatesIndex[name] - return podTemplate, ok + if chi.Spec.Templates.PodTemplatesIndex == nil { + return nil, false + } else { + template, ok := chi.Spec.Templates.PodTemplatesIndex[name] + return template, ok + } +} + +// GetVolumeClaimTemplate gets ChiVolumeClaimTemplate by name +func (chi *ClickHouseInstallation) GetVolumeClaimTemplate(name string) (*ChiVolumeClaimTemplate, bool) { + if chi.Spec.Templates.VolumeClaimTemplatesIndex == nil { + return nil, false + } else { + template, ok := chi.Spec.Templates.VolumeClaimTemplatesIndex[name] + return template, ok + } } // GetServiceTemplate gets ChiServiceTemplate by name func (chi *ClickHouseInstallation) GetServiceTemplate(name string) (*ChiServiceTemplate, bool) { - podTemplate, ok := chi.Spec.Templates.ServiceTemplatesIndex[name] - return podTemplate, ok + if chi.Spec.Templates.ServiceTemplatesIndex == nil { + return nil, false + } else { + template, ok := chi.Spec.Templates.ServiceTemplatesIndex[name] + return template, ok + } } diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go index 8ecb54461..41a9a9da0 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse.altinity.com/v1/types.go @@ -52,14 +52,14 @@ type ChiStatus struct { type ChiDefaults struct { ReplicasUseFQDN string `json:"replicasUseFQDN,omitempty" yaml:"replicasUseFQDN"` DistributedDDL ChiDistributedDDL `json:"distributedDDL,omitempty" yaml:"distributedDDL"` - Templates ChiTemplateNames `json:"templates" yaml:"templates"` + Templates ChiTemplateNames `json:"templates,omitempty" yaml:"templates"` } // ChiTemplateNames defines references to .spec.templates to be used on current level of cluster type ChiTemplateNames struct { PodTemplate string `json:"podTemplate,omitempty" yaml:"podTemplate"` VolumeClaimTemplate string `json:"volumeClaimTemplate,omitempty" yaml:"volumeClaimTemplate"` - ServiceTemplate string `json:"serviceTemplate" yaml:"serviceTemplate"` + ServiceTemplate string `json:"serviceTemplate,omitempty" yaml:"serviceTemplate"` } // ChiConfiguration defines configuration section of .spec @@ -160,9 +160,9 @@ type ChiReplicaConfig struct { // ChiTemplates defines templates section of .spec type ChiTemplates struct { // Templates - PodTemplates []ChiPodTemplate `json:"podTemplates,omitempty" yaml:"podTemplates"` + PodTemplates []ChiPodTemplate `json:"podTemplates,omitempty" yaml:"podTemplates"` VolumeClaimTemplates []ChiVolumeClaimTemplate `json:"volumeClaimTemplates,omitempty" yaml:"volumeClaimTemplates"` - ServiceTemplates []ChiServiceTemplate `json:"serviceTemplates" yaml:"serviceTemplates"` + ServiceTemplates []ChiServiceTemplate `json:"serviceTemplates,omitempty" yaml:"serviceTemplates"` // Index maps template name to template itself PodTemplatesIndex map[string]*ChiPodTemplate @@ -172,10 +172,10 @@ type ChiTemplates struct { // ChiPodTemplate defines full Pod Template, directly used by StatefulSet type ChiPodTemplate struct { - Name string `json:"name" yaml:"name"` - Zone ChiPodTemplateZone `json:"zone" yaml:"zone""` + Name string `json:"name" yaml:"name"` + Zone ChiPodTemplateZone `json:"zone" yaml:"zone""` Distribution string `json:"distribution" yaml:"distribution"` - Spec corev1.PodSpec `json:"spec" yaml:"spec"` + Spec corev1.PodSpec `json:"spec" yaml:"spec"` } type ChiPodTemplateZone struct { @@ -185,9 +185,9 @@ type ChiPodTemplateZone struct { // ChiVolumeClaimTemplate defines PersistentVolumeClaim Template, directly used by StatefulSet type ChiVolumeClaimTemplate struct { - Name string `json:"name" yaml:"name"` - PVCReclaimPolicy PVCReclaimPolicy `json:"reclaimPolicy"` - Spec corev1.PersistentVolumeClaimSpec `json:"spec" yaml:"spec"` + Name string `json:"name" yaml:"name"` + PVCReclaimPolicy PVCReclaimPolicy `json:"reclaimPolicy" yaml:"reclaimPolicy"` + Spec corev1.PersistentVolumeClaimSpec `json:"spec" yaml:"spec"` } type PVCReclaimPolicy string @@ -209,9 +209,9 @@ func (v PVCReclaimPolicy) IsValid() bool { } type ChiServiceTemplate struct { - Name string `json:"name" yaml:"name"` + Name string `json:"name" yaml:"name"` GenerateName string `json:"generateName" yaml:"generateName"` - Spec corev1.ServiceSpec `json:"spec" yaml:"spec"` + Spec corev1.ServiceSpec `json:"spec" yaml:"spec"` } // ChiDistributedDDL defines distributedDDL section of .spec.defaults diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 4410f4c46..968fcba34 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -16,7 +16,6 @@ package model import ( chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" - "github.com/altinity/clickhouse-operator/pkg/config" "github.com/altinity/clickhouse-operator/pkg/util" apps "k8s.io/api/apps/v1" @@ -27,139 +26,11 @@ import ( "github.com/golang/glog" ) -// Reconciler is the base struct to create k8s objects -type Reconciler struct { - appVersion string - chi *chiv1.ClickHouseInstallation - chopConfig *config.Config - chConfigGenerator *ClickHouseConfigGenerator - chConfigSectionsGenerator *configSections - labeler *Labeler - funcs *ReconcileFuncs -} - -type ReconcileFuncs struct { - ReconcileConfigMap func(configMap *corev1.ConfigMap) error - ReconcileService func(service *corev1.Service) error - ReconcileStatefulSet func(newStatefulSet *apps.StatefulSet, replica *chiv1.ChiReplica) error -} - -// NewReconciler creates new creator -func NewReconciler( - chi *chiv1.ClickHouseInstallation, - chopConfig *config.Config, - appVersion string, - funcs *ReconcileFuncs, -) *Reconciler { - reconciler := &Reconciler{ - chi: chi, - chopConfig: chopConfig, - appVersion: appVersion, - chConfigGenerator: NewClickHouseConfigGenerator(chi), - labeler: NewLabeler(appVersion, chi), - funcs: funcs, - } - reconciler.chConfigSectionsGenerator = NewConfigSections(reconciler.chConfigGenerator, reconciler.chopConfig) - - return reconciler -} - -// Reconcile runs reconcile process -func (r *Reconciler) Reconcile() error { - - // Reconcile CHI - if err := r.reconcileChiService(r.chi); err != nil { - return err - } - - if err := r.reconcileChiConfigMaps(); err != nil { - return err - } - - // Reconcile Clusters - if err := r.reconcileReplicas(); err != nil { - return err - } - - return nil -} - -// reconcileChiService reconciles global Services belonging to CHI -func (r *Reconciler) reconcileChiService(chi *chiv1.ClickHouseInstallation) error { - service := r.createChiService(chi) - return r.funcs.ReconcileService(service) -} - -// reconcileChiConfigMaps reconciles global ConfigMaps belonging to CHI -func (r *Reconciler) reconcileChiConfigMaps() error { - r.chConfigSectionsGenerator.CreateConfigsUsers() - r.chConfigSectionsGenerator.CreateConfigsCommon() - - // ConfigMap common for all resources in CHI - // contains several sections, mapped as separated chopConfig files, - // such as remote servers, zookeeper setup, etc - configMapCommon := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: CreateConfigMapCommonName(r.chi), - Namespace: r.chi.Namespace, - Labels: r.labeler.getLabelsCommonObject(), - }, - // Data contains several sections which are to be several xml chopConfig files - Data: r.chConfigSectionsGenerator.commonConfigSections, - } - if err := r.funcs.ReconcileConfigMap(configMapCommon); err != nil { - return err - } - - // ConfigMap common for all users resources in CHI - configMapUsers := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: CreateConfigMapCommonUsersName(r.chi), - Namespace: r.chi.Namespace, - Labels: r.labeler.getLabelsCommonObject(), - }, - // Data contains several sections which are to be several xml chopConfig files - Data: r.chConfigSectionsGenerator.commonUsersConfigSections, - } - if err := r.funcs.ReconcileConfigMap(configMapUsers); err != nil { - return err - } - - return nil -} - -// reconcileReplicas reconciles all replicas -func (r *Reconciler) reconcileReplicas() error { - replicaProcessor := func(replica *chiv1.ChiReplica) error { - // Add replica's Service - service := r.createService(replica) - if err := r.funcs.ReconcileService(service); err != nil { - return err - } - - // Add replica's ConfigMap - configMap := r.createConfigMap(replica) - if err := r.funcs.ReconcileConfigMap(configMap); err != nil { - return err - } - - // Add replica's StatefulSet - statefulSet := r.createStatefulSet(replica) - if err := r.funcs.ReconcileStatefulSet(statefulSet, replica); err != nil { - return err - } - - return nil - } - - return r.chi.WalkReplicasTillError(replicaProcessor) -} - -// createChiService creates new corev1.Service -func (r *Reconciler) createChiService(chi *chiv1.ClickHouseInstallation) *corev1.Service { +// createServiceChi creates new corev1.Service +func (r *Reconciler) createServiceChi(chi *chiv1.ClickHouseInstallation) *corev1.Service { serviceName := CreateChiServiceName(chi) - glog.V(1).Infof("createChiService(%s/%s)", chi.Namespace, serviceName) + glog.V(1).Infof("createServiceChi(%s/%s)", chi.Namespace, serviceName) return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, @@ -184,13 +55,13 @@ func (r *Reconciler) createChiService(chi *chiv1.ClickHouseInstallation) *corev1 } } -// createService creates new corev1.Service -func (r *Reconciler) createService(replica *chiv1.ChiReplica) *corev1.Service { +// createServiceReplica creates new corev1.Service +func (r *Reconciler) createServiceReplica(replica *chiv1.ChiReplica) *corev1.Service { serviceName := CreateStatefulSetServiceName(replica) statefulSetName := CreateStatefulSetName(replica) - glog.V(1).Infof("createService(%s/%s) for Set %s", replica.Address.Namespace, serviceName, statefulSetName) - if template, ok := replica.Chi.GetServiceTemplate(replica.Templates.ServiceTemplate); ok { + glog.V(1).Infof("createServiceReplica(%s/%s) for Set %s", replica.Address.Namespace, serviceName, statefulSetName) + if template, ok := r.chi.GetServiceTemplate(replica.Templates.ServiceTemplate); ok { // .templates.ServiceTemplate specified service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -234,8 +105,8 @@ func (r *Reconciler) createService(replica *chiv1.ChiReplica) *corev1.Service { } } -// createConfigMap creates new corev1.ConfigMap -func (r *Reconciler) createConfigMap(replica *chiv1.ChiReplica) *corev1.ConfigMap { +// createConfigMapReplica creates new corev1.ConfigMap +func (r *Reconciler) createConfigMapReplica(replica *chiv1.ChiReplica) *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: CreateConfigMapPodName(replica), diff --git a/pkg/model/reconciler.go b/pkg/model/reconciler.go new file mode 100644 index 000000000..4fb416fd2 --- /dev/null +++ b/pkg/model/reconciler.go @@ -0,0 +1,170 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + "github.com/altinity/clickhouse-operator/pkg/config" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Reconciler is the base struct to create k8s objects +type Reconciler struct { + appVersion string + chi *chiv1.ClickHouseInstallation + chopConfig *config.Config + chConfigGenerator *ClickHouseConfigGenerator + chConfigSectionsGenerator *configSections + labeler *Labeler + funcs *ReconcileFuncs +} + +type ReconcileFuncs struct { + ReconcileConfigMap func(configMap *corev1.ConfigMap) error + ReconcileService func(service *corev1.Service) error + ReconcileStatefulSet func(newStatefulSet *apps.StatefulSet, replica *chiv1.ChiReplica) error +} + +// NewReconciler creates new creator +func NewReconciler( + chi *chiv1.ClickHouseInstallation, + chopConfig *config.Config, + appVersion string, + funcs *ReconcileFuncs, +) *Reconciler { + reconciler := &Reconciler{ + chi: chi, + chopConfig: chopConfig, + appVersion: appVersion, + chConfigGenerator: NewClickHouseConfigGenerator(chi), + labeler: NewLabeler(appVersion, chi), + funcs: funcs, + } + reconciler.chConfigSectionsGenerator = NewConfigSections(reconciler.chConfigGenerator, reconciler.chopConfig) + + return reconciler +} + +// Reconcile runs reconcile process +func (r *Reconciler) Reconcile() error { + return r.chi.WalkClusterTillError( + r.reconcileChi, + r.reconcileCluster, + r.reconcileShard, + r.reconcileReplica, + ) +} + +// reconcileChi reconciles CHI global objects +func (r *Reconciler) reconcileChi(chi *chiv1.ClickHouseInstallation) error { + if err := r.reconcileChiService(r.chi); err != nil { + return err + } + + if err := r.reconcileChiConfigMaps(); err != nil { + return err + } + + return nil +} + +// reconcileCluster reconciles Cluster, excluding nested shards +func (r *Reconciler) reconcileCluster(cluster *chiv1.ChiCluster) error { + // Add replica's Service + //service := r.createServiceReplica(replica) + //if err := r.funcs.ReconcileService(service); err != nil { + // return err + //} + return nil +} + +// reconcileShard reconciles Shard, excluding nested replicas +func (r *Reconciler) reconcileShard(shard *chiv1.ChiShard) error { + // Add replica's Service + //service := r.createServiceReplica(replica) + //if err := r.funcs.ReconcileService(service); err != nil { + // return err + //} + return nil +} + +// reconcileReplica reconciles Replica +func (r *Reconciler) reconcileReplica(replica *chiv1.ChiReplica) error { + // Add replica's Service + service := r.createServiceReplica(replica) + if err := r.funcs.ReconcileService(service); err != nil { + return err + } + + // Add replica's ConfigMap + configMap := r.createConfigMapReplica(replica) + if err := r.funcs.ReconcileConfigMap(configMap); err != nil { + return err + } + + // Add replica's StatefulSet + statefulSet := r.createStatefulSet(replica) + if err := r.funcs.ReconcileStatefulSet(statefulSet, replica); err != nil { + return err + } + + return nil +} + +// reconcileChiService reconciles global Services belonging to CHI +func (r *Reconciler) reconcileChiService(chi *chiv1.ClickHouseInstallation) error { + service := r.createServiceChi(chi) + return r.funcs.ReconcileService(service) +} + +// reconcileChiConfigMaps reconciles global ConfigMaps belonging to CHI +func (r *Reconciler) reconcileChiConfigMaps() error { + r.chConfigSectionsGenerator.CreateConfigsUsers() + r.chConfigSectionsGenerator.CreateConfigsCommon() + + // ConfigMap common for all resources in CHI + // contains several sections, mapped as separated chopConfig files, + // such as remote servers, zookeeper setup, etc + configMapCommon := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: CreateConfigMapCommonName(r.chi), + Namespace: r.chi.Namespace, + Labels: r.labeler.getLabelsCommonObject(), + }, + // Data contains several sections which are to be several xml chopConfig files + Data: r.chConfigSectionsGenerator.commonConfigSections, + } + if err := r.funcs.ReconcileConfigMap(configMapCommon); err != nil { + return err + } + + // ConfigMap common for all users resources in CHI + configMapUsers := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: CreateConfigMapCommonUsersName(r.chi), + Namespace: r.chi.Namespace, + Labels: r.labeler.getLabelsCommonObject(), + }, + // Data contains several sections which are to be several xml chopConfig files + Data: r.chConfigSectionsGenerator.commonUsersConfigSections, + } + if err := r.funcs.ReconcileConfigMap(configMapUsers); err != nil { + return err + } + + return nil +} From 66ea28ea8d8a9d151073d408d151b4c929bd7a15 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 8 Jun 2019 22:43:50 +0300 Subject: [PATCH 10/31] dev: namer use parametrised templates: {chi}, {cluster}, etc --- pkg/model/ch_config.go | 4 +- pkg/model/const.go | 55 -------------- pkg/model/labeler.go | 20 ++--- pkg/model/namer.go | 166 +++++++++++++++++++++++++++-------------- 4 files changed, 121 insertions(+), 124 deletions(-) diff --git a/pkg/model/ch_config.go b/pkg/model/ch_config.go index 78db502c1..da76cc2f2 100644 --- a/pkg/model/ch_config.go +++ b/pkg/model/ch_config.go @@ -23,6 +23,8 @@ import ( ) const ( + distributedDDLPathPattern = "/clickhouse/%s/task_queue/ddl" + // Special auto-generated clusters. Each of these clusters lay over all replicas in CHI // 1. Cluster with one shard and all replicas. Used to duplicate data over all replicas. // 2. Cluster with all shards (1 replica). Used to gather/scatter data over all replicas. @@ -329,7 +331,7 @@ func (c *ClickHouseConfigGenerator) getRemoteServersReplicaHostname(replica *chi // In case .Spec.Defaults.ReplicasUseFQDN is set replicas would use FQDN pod hostname, // otherwise hostname+service name (unique within namespace) would be used // .my-dev-namespace.svc.cluster.local - return CreatePodHostname(replica) + "." + CreateNamespaceDomainName(replica.Address.Namespace) + return CreatePodFQDN(replica) } else { return CreatePodHostname(replica) } diff --git a/pkg/model/const.go b/pkg/model/const.go index c03f10ec2..264f6334f 100644 --- a/pkg/model/const.go +++ b/pkg/model/const.go @@ -86,61 +86,6 @@ const ( dirPathClickHouseData = "/var/lib/clickhouse" ) -const ( - // NAME READY AGE CONTAINERS IMAGES - // statefulset.apps/ss-1eb454-1 0/1 2s ss-1eb454-1 yandex/clickhouse-server:latest - statefulSetNamePattern = "chi-%s-%s-%s-%s" - - // NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR - // service/svc-1eb454-1 ClusterIP None 9000/TCP,9009/TCP,8123/TCP 2s clickhouse.altinity.com/app=ss-1eb454-1 - // service/svc-1eb454-2 ClusterIP None 9000/TCP,9009/TCP,8123/TCP 2s clickhouse.altinity.com/app=ss-1eb454-2 - // In this pattern "%s" is substituted with fullDeploymentIDPattern-generated value - // Ex.: svc-1eb454-2 - statefulSetServiceNamePattern = "chi-%s-%s-%s-%s" - - // namespaceDomainPattern presents Domain Name pattern of a namespace - // In this pattern "%s" is substituted namespace name's value - // Ex.: my-dev-namespace.svc.cluster.local - namespaceDomainPattern = "%s.svc.cluster.local" - - // NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR - // service/clickhouse-replcluster ClusterIP None 9000/TCP,9009/TCP,8123/TCP 1h - // In this pattern "%s" is substituted with clickhouse installation name - 'replcluster' in this case - // Ex.: test - chiServiceNamePattern = "clickhouse-%s" - - // ServiceName.domain.name - chiServiceFQDNPattern = "%s" + "." + namespaceDomainPattern - - // podFQDNPattern consists of 3 parts: - // 1. nameless service of of stateful set - // 2. namespace name - // Hostname.domain.name - podFQDNPattern = "%s" + "." + namespaceDomainPattern - - // podNamePattern is a name of a Pod as ServiceName-0 - podNamePattern = "%s-0" - - // NAME DATA AGE - // chi-example-01-common-configd 2 2s - // chi-example-01-common-usersd 0 2s - // chi-example-01-deploy-confd-4a8ff63336-0 1 1s - - // configMapCommonNamePattern is a template of common settings for the CHI ConfigMap - // Ex.: chi-example02-common-configd for chi named as 'example02' - configMapCommonNamePattern = "chi-%s-common-configd" - - // configMapCommonUsersNamePattern is a template of common users settings for the CHI ConfigMap - // Ex.: chi-example02-common-usersd for chi named as 'example02' - configMapCommonUsersNamePattern = "chi-%s-common-usersd" - - // configMapDeploymentNamePattern is a template of macros ConfigMap - // Ex.: chi-example02-deploy-confd-33260f1800-2 for chi named as 'example02' - configMapDeploymentNamePattern = "chi-%s-deploy-confd-%s-%s-%s" - - distributedDDLPathPattern = "/clickhouse/%s/task_queue/ddl" -) - const ( // Default docker image to be used defaultClickHouseDockerImage = "yandex/clickhouse-server:latest" diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go index fd4ae8fe1..c0b8397cf 100644 --- a/pkg/model/labeler.go +++ b/pkg/model/labeler.go @@ -37,14 +37,14 @@ func (l *Labeler) getLabelsCommonObject() map[string]string { return map[string]string{ LabelApp: LabelAppValue, LabelChop: l.version, - LabelChi: nameSectionChi(l.chi), + LabelChi: getNamePartChiName(l.chi), } } func (l *Labeler) getSelectorCommonObject() map[string]string { return map[string]string{ LabelApp: LabelAppValue, - LabelChi: nameSectionChi(l.chi), + LabelChi: getNamePartChiName(l.chi), } } @@ -52,10 +52,10 @@ func (l *Labeler) getLabelsReplica(replica *chi.ChiReplica, zk bool) map[string] labels := map[string]string{ LabelApp: LabelAppValue, LabelChop: l.version, - LabelChi: nameSectionChi(replica), - LabelCluster: nameSectionCluster(replica), - LabelShard: nameSectionShard(replica), - LabelReplica: nameSectionReplica(replica), + LabelChi: getNamePartChiName(replica), + LabelCluster: getNamePartClusterName(replica), + LabelShard: getNamePartShardName(replica), + LabelReplica: getNamePartReplicaName(replica), LabelStatefulSet: CreateStatefulSetName(replica), } if zk { @@ -68,10 +68,10 @@ func (l *Labeler) getSelectorReplica(replica *chi.ChiReplica) map[string]string return map[string]string{ LabelApp: LabelAppValue, // skip chop - LabelChi: nameSectionChi(replica), - LabelCluster: nameSectionCluster(replica), - LabelShard: nameSectionShard(replica), - LabelReplica: nameSectionReplica(replica), + LabelChi: getNamePartChiName(replica), + LabelCluster: getNamePartClusterName(replica), + LabelShard: getNamePartShardName(replica), + LabelReplica: getNamePartReplicaName(replica), // skip StatefulSet // skip Zookeeper } diff --git a/pkg/model/namer.go b/pkg/model/namer.go index f26461b48..d1d30b659 100644 --- a/pkg/model/namer.go +++ b/pkg/model/namer.go @@ -19,84 +19,152 @@ import ( chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/util" apps "k8s.io/api/apps/v1" + "strconv" + "strings" ) -func createChiNameID(name string) string { - //return util.CreateStringID(name, 6) - return util.StringHead(name, 15) +const ( + namePartChiMaxLen = 15 + namePartClusterMaxLen = 15 + namePartShardMaxLen = 15 + namePartReplicaMaxLen = 15 +) + +const ( + // chiServiceNamePattern is a template of CHI Service name + chiServiceNamePattern = "clickhouse-{chi}" + + // statefulSetNamePattern is a template of replica's StatefulSet's name + statefulSetNamePattern = "chi-{chi}-{cluster}-{shard}-{replica}" + + // statefulSetServiceNamePattern is a template of replica's StatefulSet's Service name + statefulSetServiceNamePattern = "chi-{chi}-{cluster}-{shard}-{replica}" + + // configMapCommonNamePattern is a template of common settings for the CHI ConfigMap + configMapCommonNamePattern = "chi-{chi}-common-configd" + + // configMapCommonUsersNamePattern is a template of common users settings for the CHI ConfigMap + configMapCommonUsersNamePattern = "chi-{chi}-common-usersd" + + // configMapDeploymentNamePattern is a template of macros ConfigMap + configMapDeploymentNamePattern = "chi-{chi}-deploy-confd-{cluster}-{shard}-{replica}" + + // namespaceDomainPattern presents Domain Name pattern of a namespace + // In this pattern "%s" is substituted namespace name's value + // Ex.: my-dev-namespace.svc.cluster.local + namespaceDomainPattern = "%s.svc.cluster.local" + + // ServiceName.domain.name + chiServiceFQDNPattern = "%s" + "." + namespaceDomainPattern + + // podFQDNPattern consists of 3 parts: + // 1. nameless service of of stateful set + // 2. namespace name + // Hostname.domain.name + podFQDNPattern = "%s" + "." + namespaceDomainPattern + + // podNamePattern is a name of a Pod as ServiceName-0 + podNamePattern = "%s-0" +) + +func namePartChiName(name string) string { + return util.StringHead(name, namePartChiMaxLen) +} + +func namePartChiNameID(name string) string { + return util.CreateStringID(name, namePartChiMaxLen) +} + +func namePartClusterName(name string) string { + return util.StringHead(name, namePartClusterMaxLen) +} + +func namePartClusterNameID(name string) string { + return util.CreateStringID(name, namePartClusterMaxLen) +} + +func namePartShardName(name string) string { + return util.StringHead(name, namePartShardMaxLen) } -func createClusterNameID(name string) string { - //return util.CreateStringID(name, 4) - return util.StringHead(name, 15) +func namePartShardNameID(name string) string { + return util.CreateStringID(name, namePartShardMaxLen) } -func createShardNameID(name string) string { - return util.StringHead(name, 8) +func namePartReplicaName(name string) string { + return util.StringHead(name, namePartReplicaMaxLen) } -func createReplicaNameID(name string) string { - return util.StringHead(name, 8) +func namePartReplicaNameID(name string) string { + return util.CreateStringID(name, namePartReplicaMaxLen) } -func nameSectionChi(obj interface{}) string { +func getNamePartChiName(obj interface{}) string { switch obj.(type) { case *chop.ChiReplica: replica := obj.(*chop.ChiReplica) - return createChiNameID(replica.Address.ChiName) + return namePartChiName(replica.Address.ChiName) case *chop.ClickHouseInstallation: chi := obj.(*chop.ClickHouseInstallation) - return createChiNameID(chi.Name) + return namePartChiName(chi.Name) } return "ERROR" } -func nameSectionCluster(replica *chop.ChiReplica) string { - return createClusterNameID(replica.Address.ClusterName) +func getNamePartClusterName(replica *chop.ChiReplica) string { + return namePartClusterName(replica.Address.ClusterName) } -func nameSectionShard(replica *chop.ChiReplica) string { - return createShardNameID(replica.Address.ShardName) +func getNamePartShardName(replica *chop.ChiReplica) string { + return namePartShardName(replica.Address.ShardName) } -func nameSectionReplica(replica *chop.ChiReplica) string { - return createReplicaNameID(replica.Address.ReplicaName) +func getNamePartReplicaName(replica *chop.ChiReplica) string { + return namePartReplicaName(replica.Address.ReplicaName) +} + +func newReplacerReplica(replica *chop.ChiReplica) *strings.Replacer { + return strings.NewReplacer( + "{chi}", namePartChiName(replica.Address.ChiName), + "{chiID}", namePartChiNameID(replica.Address.ChiName), + "{cluster}", namePartClusterName(replica.Address.ClusterName), + "{clusterID}", namePartClusterNameID(replica.Address.ClusterName), + "{clusterIndex}", strconv.Itoa(replica.Address.ClusterIndex), + "{shard}", namePartShardName(replica.Address.ShardName), + "{shardID}", namePartShardNameID(replica.Address.ShardName), + "{shardIndex}", strconv.Itoa(replica.Address.ShardIndex), + "{replica}", namePartReplicaName(replica.Address.ReplicaName), + "{replicaID}", namePartReplicaNameID(replica.Address.ReplicaName), + "{replicaIndex}", strconv.Itoa(replica.Address.ReplicaIndex), + ) +} + +func newReplacerChi(chi *chop.ClickHouseInstallation) *strings.Replacer { + return strings.NewReplacer( + "{chi}", namePartChiName(chi.Name), + "{chiID}", namePartChiNameID(chi.Name), + ) } // CreateConfigMapPodName returns a name for a ConfigMap for replica's pod func CreateConfigMapPodName(replica *chop.ChiReplica) string { - return fmt.Sprintf( - configMapDeploymentNamePattern, - nameSectionChi(replica), - nameSectionCluster(replica), - nameSectionShard(replica), - nameSectionReplica(replica), - ) + return newReplacerReplica(replica).Replace(configMapDeploymentNamePattern) } // CreateConfigMapCommonName returns a name for a ConfigMap for replica's common chopConfig func CreateConfigMapCommonName(chi *chop.ClickHouseInstallation) string { - return fmt.Sprintf( - configMapCommonNamePattern, - nameSectionChi(chi), - ) + return newReplacerChi(chi).Replace(configMapCommonNamePattern) } // CreateConfigMapCommonUsersName returns a name for a ConfigMap for replica's common chopConfig func CreateConfigMapCommonUsersName(chi *chop.ClickHouseInstallation) string { - return fmt.Sprintf( - configMapCommonUsersNamePattern, - nameSectionChi(chi), - ) + return newReplacerChi(chi).Replace(configMapCommonUsersNamePattern) } // CreateChiServiceName creates a name of a Installation Service resource func CreateChiServiceName(chi *chop.ClickHouseInstallation) string { - return fmt.Sprintf( - chiServiceNamePattern, - chi.Name, - ) + return newReplacerChi(chi).Replace(chiServiceNamePattern) } // CreateChiServiceName creates a name of a Installation Service resource @@ -110,24 +178,12 @@ func CreateChiServiceFQDN(chi *chop.ClickHouseInstallation) string { // CreateStatefulSetName creates a name of a StatefulSet for replica func CreateStatefulSetName(replica *chop.ChiReplica) string { - return fmt.Sprintf( - statefulSetNamePattern, - nameSectionChi(replica), - nameSectionCluster(replica), - nameSectionShard(replica), - nameSectionReplica(replica), - ) + return newReplacerReplica(replica).Replace(statefulSetNamePattern) } // CreateStatefulSetServiceName returns a name of a StatefulSet-related Service for replica func CreateStatefulSetServiceName(replica *chop.ChiReplica) string { - return fmt.Sprintf( - statefulSetServiceNamePattern, - nameSectionChi(replica), - nameSectionCluster(replica), - nameSectionShard(replica), - nameSectionReplica(replica), - ) + return newReplacerReplica(replica).Replace(statefulSetServiceNamePattern) } // CreatePodHostname returns a name of a Pod resource for a replica @@ -136,12 +192,6 @@ func CreatePodHostname(replica *chop.ChiReplica) string { return CreateStatefulSetServiceName(replica) } -// CreateNamespaceDomainName creates domain name of a namespace -// .my-dev-namespace.svc.cluster.local -func CreateNamespaceDomainName(chiNamespace string) string { - return fmt.Sprintf(namespaceDomainPattern, chiNamespace) -} - // CreatePodFQDN creates a fully qualified domain name of a pod // ss-1eb454-2-0.my-dev-domain.svc.cluster.local func CreatePodFQDN(replica *chop.ChiReplica) string { From 301072c253ddc357d88e770539124acb5144505e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 8 Jun 2019 22:48:07 +0300 Subject: [PATCH 11/31] dev: unify FQDN pattern naming --- pkg/model/namer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/model/namer.go b/pkg/model/namer.go index d1d30b659..d92f99294 100644 --- a/pkg/model/namer.go +++ b/pkg/model/namer.go @@ -55,7 +55,7 @@ const ( namespaceDomainPattern = "%s.svc.cluster.local" // ServiceName.domain.name - chiServiceFQDNPattern = "%s" + "." + namespaceDomainPattern + serviceFQDNPattern = "%s" + "." + namespaceDomainPattern // podFQDNPattern consists of 3 parts: // 1. nameless service of of stateful set @@ -170,7 +170,7 @@ func CreateChiServiceName(chi *chop.ClickHouseInstallation) string { // CreateChiServiceName creates a name of a Installation Service resource func CreateChiServiceFQDN(chi *chop.ClickHouseInstallation) string { return fmt.Sprintf( - chiServiceFQDNPattern, + serviceFQDNPattern, CreateChiServiceName(chi), chi.Namespace, ) From 6287a4095abcb12c39baba7c1ca570b930053954 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 9 Jun 2019 12:12:37 +0300 Subject: [PATCH 12/31] dev: extract templates Merge function into separate entity --- .../clickhouse.altinity.com/v1/type_chi.go | 2 +- .../v1/type_cluster.go | 7 +-- .../v1/type_defaults.go | 9 +--- .../v1/type_replica.go | 7 +-- .../clickhouse.altinity.com/v1/type_shard.go | 7 +-- .../v1/type_templates.go | 45 +++++++++++++++---- pkg/apis/clickhouse.altinity.com/v1/types.go | 14 ++++++ pkg/controller/chi/controller.go | 4 +- pkg/model/normalizer.go | 6 +-- 9 files changed, 61 insertions(+), 40 deletions(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go index 37ae0ed11..d3e354006 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go @@ -35,7 +35,7 @@ func (chi *ClickHouseInstallation) StatusFill(endpoint string, pods []string) { chi.Status.Endpoint = endpoint } -func (chi *ClickHouseInstallation) IsFilled() bool { +func (chi *ClickHouseInstallation) IsNormalized() bool { filled := true clusters := 0 chi.WalkClusters(func(cluster *ChiCluster) error { diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go index 019755c6b..afe17da67 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go @@ -15,12 +15,7 @@ package v1 func (cluster *ChiCluster) InheritTemplates(chi *ClickHouseInstallation) { - if cluster.Templates.PodTemplate == "" { - cluster.Templates.PodTemplate = chi.Spec.Defaults.Templates.PodTemplate - } - if cluster.Templates.VolumeClaimTemplate == "" { - cluster.Templates.VolumeClaimTemplate = chi.Spec.Defaults.Templates.VolumeClaimTemplate - } + (&cluster.Templates).MergeFrom(&chi.Spec.Defaults.Templates) } func (cluster *ChiCluster) WalkShards( diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_defaults.go b/pkg/apis/clickhouse.altinity.com/v1/type_defaults.go index 3a9a86610..0da3aeec9 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_defaults.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_defaults.go @@ -23,12 +23,5 @@ func (defaults *ChiDefaults) MergeFrom(from *ChiDefaults) { defaults.ReplicasUseFQDN = from.ReplicasUseFQDN } (&defaults.DistributedDDL).MergeFrom(&from.DistributedDDL) - - if defaults.Templates.PodTemplate == "" { - defaults.Templates.PodTemplate = from.Templates.PodTemplate - } - - if defaults.Templates.VolumeClaimTemplate == "" { - defaults.Templates.VolumeClaimTemplate = from.Templates.VolumeClaimTemplate - } + (&defaults.Templates).MergeFrom(&from.Templates) } diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_replica.go b/pkg/apis/clickhouse.altinity.com/v1/type_replica.go index dad7d5f92..3ab211da2 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_replica.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_replica.go @@ -15,10 +15,5 @@ package v1 func (replica *ChiReplica) InheritTemplates(shard *ChiShard) { - if replica.Templates.PodTemplate == "" { - replica.Templates.PodTemplate = shard.Templates.PodTemplate - } - if replica.Templates.VolumeClaimTemplate == "" { - replica.Templates.VolumeClaimTemplate = shard.Templates.VolumeClaimTemplate - } + (&replica.Templates).MergeFrom(&shard.Templates) } diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_shard.go b/pkg/apis/clickhouse.altinity.com/v1/type_shard.go index 8787100c0..e2d7d49b0 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_shard.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_shard.go @@ -15,12 +15,7 @@ package v1 func (shard *ChiShard) InheritTemplates(cluster *ChiCluster) { - if shard.Templates.PodTemplate == "" { - shard.Templates.PodTemplate = cluster.Templates.PodTemplate - } - if shard.Templates.VolumeClaimTemplate == "" { - shard.Templates.VolumeClaimTemplate = cluster.Templates.VolumeClaimTemplate - } + (&shard.Templates).MergeFrom(&cluster.Templates) } func (shard *ChiShard) WalkReplicas( diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_templates.go b/pkg/apis/clickhouse.altinity.com/v1/type_templates.go index 25bda5be7..e9d1efb29 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_templates.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_templates.go @@ -27,13 +27,13 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates) { } // Loop over all 'from' templates and copy it in case no such template in receiver for fromIndex := range from.PodTemplates { - fromPodTemplate := &from.PodTemplates[fromIndex] + fromTemplate := &from.PodTemplates[fromIndex] // Try to find equal entry among local templates in receiver equalFound := false for toIndex := range templates.PodTemplates { - toPodTemplate := &templates.PodTemplates[toIndex] - if toPodTemplate.Name == fromPodTemplate.Name { + toTemplate := &templates.PodTemplates[toIndex] + if toTemplate.Name == fromTemplate.Name { // Receiver already have such a template equalFound = true break @@ -43,7 +43,7 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates) { if !equalFound { // Receiver has not such template // Append template from `from` - templates.PodTemplates = append(templates.PodTemplates, *fromPodTemplate.DeepCopy()) + templates.PodTemplates = append(templates.PodTemplates, *fromTemplate.DeepCopy()) } } } @@ -56,13 +56,13 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates) { } // Loop over all 'from' templates and copy it in case no such template in receiver for fromIndex := range from.VolumeClaimTemplates { - fromVolumeClaimTemplate := &from.VolumeClaimTemplates[fromIndex] + fromTemplate := &from.VolumeClaimTemplates[fromIndex] // Try to find equal entry among local templates in receiver equalFound := false for toIndex := range templates.VolumeClaimTemplates { - toVolumeClaimTemplate := &templates.VolumeClaimTemplates[toIndex] - if toVolumeClaimTemplate.Name == fromVolumeClaimTemplate.Name { + toTemplate := &templates.VolumeClaimTemplates[toIndex] + if toTemplate.Name == fromTemplate.Name { // Received already have such a node equalFound = true break @@ -72,7 +72,36 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates) { if !equalFound { // Receiver has not such template // Append Node from `from` - templates.VolumeClaimTemplates = append(templates.VolumeClaimTemplates, *fromVolumeClaimTemplate.DeepCopy()) + templates.VolumeClaimTemplates = append(templates.VolumeClaimTemplates, *fromTemplate.DeepCopy()) + } + } + } + + if len(from.ServiceTemplates) > 0 { + // We have templates to copy from + // Append ServiceTemplates from `from` to receiver + if templates.ServiceTemplates == nil { + templates.ServiceTemplates = make([]ChiServiceTemplate, 0) + } + // Loop over all 'from' templates and copy it in case no such template in receiver + for fromIndex := range from.ServiceTemplates { + fromTemplate := &from.ServiceTemplates[fromIndex] + + // Try to find equal entry among local templates in receiver + equalFound := false + for toIndex := range templates.ServiceTemplates { + toTemplate := &templates.ServiceTemplates[toIndex] + if toTemplate.Name == fromTemplate.Name { + // Received already have such a node + equalFound = true + break + } + } + + if !equalFound { + // Receiver has not such template + // Append Node from `from` + templates.ServiceTemplates = append(templates.ServiceTemplates, *fromTemplate.DeepCopy()) } } } diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go index 41a9a9da0..9d7c2775c 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse.altinity.com/v1/types.go @@ -62,6 +62,18 @@ type ChiTemplateNames struct { ServiceTemplate string `json:"serviceTemplate,omitempty" yaml:"serviceTemplate"` } +func (templates *ChiTemplateNames) MergeFrom(from *ChiTemplateNames) { + if templates.PodTemplate == "" { + templates.PodTemplate = from.PodTemplate + } + if templates.VolumeClaimTemplate == "" { + templates.VolumeClaimTemplate = from.VolumeClaimTemplate + } + if templates.ServiceTemplate == "" { + templates.ServiceTemplate = from.ServiceTemplate + } +} + // ChiConfiguration defines configuration section of .spec type ChiConfiguration struct { Zookeeper ChiZookeeperConfig `json:"zookeeper,omitempty" yaml:"zookeeper"` @@ -98,6 +110,7 @@ type ChiLayout struct { Type string `json:"type"` ShardsCount int `json:"shardsCount,omitempty"` ReplicasCount int `json:"replicasCount,omitempty"` + // TODO refactor into map[string]ChiShard Shards []ChiShard `json:"shards,omitempty"` } @@ -110,6 +123,7 @@ type ChiShard struct { InternalReplication string `json:"internalReplication,omitempty"` Templates ChiTemplateNames `json:"templates,omitempty"` ReplicasCount int `json:"replicasCount,omitempty"` + // TODO refactor into map[string]ChiReplica Replicas []ChiReplica `json:"replicas,omitempty"` // Internal data diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index 0b857c588..65b5ea0af 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -493,11 +493,11 @@ func (c *Controller) onUpdateChi(old, new *chop.ClickHouseInstallation) error { return nil } - if !old.IsFilled() { + if !old.IsNormalized() { old, _ = c.normalizer.CreateTemplatedChi(old) } - if !new.IsFilled() { + if !new.IsNormalized() { new, _ = c.normalizer.CreateTemplatedChi(new) } diff --git a/pkg/model/normalizer.go b/pkg/model/normalizer.go index b0f87c1d9..d9c9c7a52 100644 --- a/pkg/model/normalizer.go +++ b/pkg/model/normalizer.go @@ -445,7 +445,7 @@ func (n *Normalizer) doConfigurationSettings(settings *map[string]interface{}) { // doCluster normalizes cluster and returns deployments usage counters for this cluster func (n *Normalizer) doCluster(cluster *chiv1.ChiCluster) error { - // Inherit PodTemplate from .spec.defaults + // Use PodTemplate from .spec.defaults cluster.InheritTemplates(n.chi) // Convenience wrapper @@ -506,7 +506,7 @@ func (n *Normalizer) doShardReplicasCount(shard *chiv1.ChiShard, layoutReplicasC // We have Replicas specified as slice - ok, this means exact ReplicasCount is known shard.ReplicasCount = len(shard.Replicas) } else { - // Inherit ReplicasCount from layout + // MergeFrom ReplicasCount from layout shard.ReplicasCount = layoutReplicasCount } } @@ -578,7 +578,7 @@ func (n *Normalizer) doShardReplicas(shard *chiv1.ChiShard) { // Normalize a replica n.doReplicaName(replica, replicaIndex) n.doReplicaPort(replica) - // Inherit PodTemplate from shard + // Use PodTemplate from shard replica.InheritTemplates(shard) } } From 482ea855e385488dd83bee969280be2851e4d106 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 9 Jun 2019 12:53:42 +0300 Subject: [PATCH 13/31] dev: extract all label-related activities from getter into labeler --- pkg/apis/clickhouse.altinity.com/v1/types.go | 10 ++--- pkg/controller/chi/controller.go | 1 + pkg/controller/chi/creators.go | 4 +- pkg/controller/chi/getters.go | 46 +++----------------- pkg/model/creator.go | 1 + pkg/model/labeler.go | 20 +++++++++ 6 files changed, 36 insertions(+), 46 deletions(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go index 9d7c2775c..4b36f96a4 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse.altinity.com/v1/types.go @@ -107,11 +107,11 @@ type ChiClusterAddress struct { // ChiLayout defines layout section of .spec.configuration.clusters type ChiLayout struct { // DEPRECATED - to be removed soon - Type string `json:"type"` - ShardsCount int `json:"shardsCount,omitempty"` - ReplicasCount int `json:"replicasCount,omitempty"` + Type string `json:"type"` + ShardsCount int `json:"shardsCount,omitempty"` + ReplicasCount int `json:"replicasCount,omitempty"` // TODO refactor into map[string]ChiShard - Shards []ChiShard `json:"shards,omitempty"` + Shards []ChiShard `json:"shards,omitempty"` } // ChiShard defines item of a shard section of .spec.configuration.clusters[n].shards @@ -124,7 +124,7 @@ type ChiShard struct { Templates ChiTemplateNames `json:"templates,omitempty"` ReplicasCount int `json:"replicasCount,omitempty"` // TODO refactor into map[string]ChiReplica - Replicas []ChiReplica `json:"replicas,omitempty"` + Replicas []ChiReplica `json:"replicas,omitempty"` // Internal data Address ChiShardAddress `json:"address"` diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index 65b5ea0af..52f11a87c 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -512,6 +512,7 @@ func (c *Controller) onUpdateChi(old, new *chop.ClickHouseInstallation) error { c.eventChi(old, eventTypeNormal, eventActionUpdate, eventReasonUpdateStarted, fmt.Sprintf("onUpdateChi(%s/%s):", old.Namespace, old.Name)) // Deal with removed items + // TODO refactor to map[string]object handling, instead of slice for path := range diff.Removed { switch diff.Removed[path].(type) { case chop.ChiCluster: diff --git a/pkg/controller/chi/creators.go b/pkg/controller/chi/creators.go index 604e0236f..434f2e9f7 100644 --- a/pkg/controller/chi/creators.go +++ b/pkg/controller/chi/creators.go @@ -272,7 +272,7 @@ func (c *Controller) shouldContinueOnCreateFailed() error { } // Do not continue update - return errors.New(fmt.Sprintf("Create stopped due to previous errors")) + return fmt.Errorf("create stopped due to previous errors") } // shouldContinueOnUpdateFailed return nil in case 'continue' or error in case 'do not continue' @@ -286,7 +286,7 @@ func (c *Controller) shouldContinueOnUpdateFailed() error { } // Do not continue update - return errors.New(fmt.Sprintf("Update stopped due to previous errors")) + return fmt.Errorf("update stopped due to previous errors") } // hasStatefulSetReachedGeneration returns whether has StatefulSet reached the expected generation after upgrade or not diff --git a/pkg/controller/chi/getters.go b/pkg/controller/chi/getters.go index a16f23e82..631a3d2bc 100644 --- a/pkg/controller/chi/getters.go +++ b/pkg/controller/chi/getters.go @@ -123,26 +123,10 @@ func (c *Controller) getStatefulSet(obj *meta.ObjectMeta) (*apps.StatefulSet, er return nil, err } -// TODO move labels into models modules func (c *Controller) createChiFromObjectMeta(objectMeta *meta.ObjectMeta) (*chiv1.ClickHouseInstallation, error) { - // Parse Labels - // Labels: map[string]string{ - // labelChop: AppVersion, - // LabelChi: replica.Address.ChiName, - // LabelCluster: replica.Address.ClusterName, - // LabelClusterIndex: strconv.Itoa(replica.Address.ClusterIndex), - // LabelReplicaIndex: strconv.Itoa(replica.Address.ReplicaIndex), - // }, - - // ObjectMeta must have some labels - if len(objectMeta.Labels) == 0 { - return nil, fmt.Errorf("ObjectMeta %s does not have labels", objectMeta.Name) - } - - // ObjectMeta must have LabelChi: chi.Name label - chiName, ok := objectMeta.Labels[chopmodel.LabelChi] - if !ok { - return nil, fmt.Errorf("ObjectMeta %s does not generated by CHI", objectMeta.Name) + chiName, err := chopmodel.GetChiNameFromObjectMeta(objectMeta) + if err != nil { + return nil, fmt.Errorf("ObjectMeta %s does not generated by CHI %v", objectMeta.Name, err) } chi, err := c.chiLister.ClickHouseInstallations(objectMeta.Namespace).Get(chiName) @@ -158,26 +142,10 @@ func (c *Controller) createChiFromObjectMeta(objectMeta *meta.ObjectMeta) (*chiv return chi, nil } -// TODO move labels into models modules func (c *Controller) createClusterFromObjectMeta(objectMeta *meta.ObjectMeta) (*chiv1.ChiCluster, error) { - // Parse Labels - // Labels: map[string]string{ - // labelChop: AppVersion, - // LabelChi: replica.Address.ChiName, - // LabelCluster: replica.Address.ClusterName, - // LabelClusterIndex: strconv.Itoa(replica.Address.ClusterIndex), - // LabelReplicaIndex: strconv.Itoa(replica.Address.ReplicaIndex), - // }, - - // ObjectMeta must have some labels - if len(objectMeta.Labels) == 0 { - return nil, fmt.Errorf("ObjectMeta %s does not have labels", objectMeta.Name) - } - - // ObjectMeta must have LabelCluster - clusterName, ok := objectMeta.Labels[chopmodel.LabelCluster] - if !ok { - return nil, fmt.Errorf("ObjectMeta %s does not generated by CHI", objectMeta.Name) + clusterName, err := chopmodel.GetClusterNameFromObjectMeta(objectMeta) + if err != nil { + return nil, fmt.Errorf("ObjectMeta %s does not generated by CHI %v", objectMeta.Name, err) } chi, err := c.createChiFromObjectMeta(objectMeta) @@ -187,7 +155,7 @@ func (c *Controller) createClusterFromObjectMeta(objectMeta *meta.ObjectMeta) (* cluster := chi.FindCluster(clusterName) if cluster == nil { - return nil, fmt.Errorf("Can't find cluster %s in CHI %s", clusterName, chi.Name) + return nil, fmt.Errorf("can't find cluster %s in CHI %s", clusterName, chi.Name) } return cluster, nil diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 968fcba34..d47102abd 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -76,6 +76,7 @@ func (r *Reconciler) createServiceReplica(replica *chiv1.ChiReplica) *corev1.Ser return service } else { // Incorrect/unknown .templates.ServiceTemplate specified + // Create default Service return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go index c0b8397cf..f8f0a04d6 100644 --- a/pkg/model/labeler.go +++ b/pkg/model/labeler.go @@ -118,3 +118,23 @@ func IsChopGeneratedObject(objectMeta *meta.ObjectMeta) bool { return ok } + +func GetChiNameFromObjectMeta(meta *meta.ObjectMeta) (string, error) { + // ObjectMeta must have LabelChi: chi.Name label + name, ok := meta.Labels[LabelChi] + if ok { + return name, nil + } else { + return "", fmt.Errorf("can not find %s label in meta", LabelChi) + } +} + +func GetClusterNameFromObjectMeta(meta *meta.ObjectMeta) (string, error) { + // ObjectMeta must have LabelCluster + name, ok := meta.Labels[LabelCluster] + if ok { + return name, nil + } else { + return "", fmt.Errorf("can not find %s label in meta", LabelChi) + } +} From a0a4630803cd0ac202b307298865c731e7fe6fde Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 9 Jun 2019 15:21:49 +0300 Subject: [PATCH 14/31] dev: templatize CHI Service --- .../99-clickhouseinstallation-max.yaml | 46 ++++++++++--- ...ouse-operator-template-01-section-crd.yaml | 9 +++ .../operator/clickhouse-operator-install.yaml | 9 +++ .../clickhouse.altinity.com/v1/type_chi.go | 9 ++- .../v1/type_replica.go | 18 ++++++ pkg/apis/clickhouse.altinity.com/v1/types.go | 1 + .../v1/zz_generated.deepcopy.go | 1 + pkg/model/creator.go | 64 ++++++++++++------- 8 files changed, 124 insertions(+), 33 deletions(-) diff --git a/docs/examples/99-clickhouseinstallation-max.yaml b/docs/examples/99-clickhouseinstallation-max.yaml index 1d198739d..fbbe9ea41 100644 --- a/docs/examples/99-clickhouseinstallation-max.yaml +++ b/docs/examples/99-clickhouseinstallation-max.yaml @@ -6,7 +6,7 @@ metadata: spec: defaults: - replicasUseFQDN: 0 # 0 - by default, 1 - enabled + replicasUseFQDN: "no" distributedDDL: profile: default templates: @@ -14,13 +14,15 @@ spec: volumeClaimTemplate: default-volume-claim configuration: + templates: + serviceTemplate: chi-service-template zookeeper: nodes: - - host: zk-statefulset-0.zk-service.default.svc.cluster.local + - host: zookeeper-0.zookeepers.zoo3ns.svc.cluster.local port: 2181 - - host: zk-statefulset-1.zk-service.default.svc.cluster.local + - host: zookeeper-1.zookeepers.zoo3ns.svc.cluster.local port: 2181 - - host: zk-statefulset-2.zk-service.default.svc.cluster.local + - host: zookeeper-2.zookeepers.zoo3ns.svc.cluster.local port: 2181 users: readonly/profile: readonly @@ -69,7 +71,7 @@ spec: clusters: - - name: sharded-replicated + - name: all-counts templates: podTemplate: clickhouse-v18.16.1 volumeClaimTemplate: default-volume-claim @@ -77,7 +79,7 @@ spec: shardsCount: 3 replicasCount: 2 - - name: sharded-non-replicated + - name: shards-only templates: podTemplate: clickhouse-v18.16.1 volumeClaimTemplate: default-volume-claim @@ -85,13 +87,13 @@ spec: shardsCount: 3 # replicasCount not specified, assumed = 1, by default - - name: replicated + - name: replicas-only templates: podTemplate: clickhouse-v18.16.1 volumeClaimTemplate: default-volume-claim layout: # shardsCount not specified, assumed = 1, by default - replicasCount: 4 + replicasCount: 3 - name: customized templates: @@ -121,14 +123,42 @@ spec: templates: podTemplate: clickhouse-v18.16.1 volumeClaimTemplate: default-volume-claim + serviceTemplate: replica-service-template replicas: - name: replica0 port: 9000 templates: podTemplate: clickhouse-v18.16.2 volumeClaimTemplate: default-volume-claim + serviceTemplate: replica-service-template templates: + serviceTemplates: + - name: chi-service-template + generateName: "service-{chi}" + # type ServiceSpec struct from k8s.io/core/v1 + spec: + ports: + - name: http + port: 8123 + - name: client + port: 9000 + type: LoadBalancer + + - name: replica-service-template + # type ServiceSpec struct from k8s.io/core/v1 + spec: + ports: + - name: http + port: 8123 + - name: client + port: 9000 + - name: interserver + port: 9009 + type: ClusterIP + ClusterIP: None + + volumeClaimTemplates: - name: default-volume-claim # type PersistentVolumeClaimSpec struct from k8s.io/core/v1 diff --git a/manifests/dev/clickhouse-operator-template-01-section-crd.yaml b/manifests/dev/clickhouse-operator-template-01-section-crd.yaml index 928a4ada9..712bde455 100644 --- a/manifests/dev/clickhouse-operator-template-01-section-crd.yaml +++ b/manifests/dev/clickhouse-operator-template-01-section-crd.yaml @@ -65,6 +65,15 @@ spec: configuration: type: object properties: + templates: + type: object + properties: + podTemplate: + type: string + volumeClaimTemplate: + type: string + serviceTemplate: + type: string zookeeper: type: object properties: diff --git a/manifests/operator/clickhouse-operator-install.yaml b/manifests/operator/clickhouse-operator-install.yaml index ebbfb52a3..6f17ce9bb 100644 --- a/manifests/operator/clickhouse-operator-install.yaml +++ b/manifests/operator/clickhouse-operator-install.yaml @@ -65,6 +65,15 @@ spec: configuration: type: object properties: + templates: + type: object + properties: + podTemplate: + type: string + volumeClaimTemplate: + type: string + serviceTemplate: + type: string zookeeper: type: object properties: diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go index d3e354006..3aa5fd594 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go @@ -40,7 +40,7 @@ func (chi *ClickHouseInstallation) IsNormalized() bool { clusters := 0 chi.WalkClusters(func(cluster *ChiCluster) error { clusters++ - if cluster.Address.Namespace == "" { + if cluster.Chi == nil { filled = false } return nil @@ -350,3 +350,10 @@ func (chi *ClickHouseInstallation) GetServiceTemplate(name string) (*ChiServiceT return template, ok } } + +// GetServiceTemplate gets own ChiServiceTemplate +func (chi *ClickHouseInstallation) GetOwnServiceTemplate() (*ChiServiceTemplate, bool) { + name := chi.Spec.Configuration.Templates.ServiceTemplate + template, ok := chi.GetServiceTemplate(name) + return template, ok +} diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_replica.go b/pkg/apis/clickhouse.altinity.com/v1/type_replica.go index 3ab211da2..0ad1a2cf9 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_replica.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_replica.go @@ -17,3 +17,21 @@ package v1 func (replica *ChiReplica) InheritTemplates(shard *ChiShard) { (&replica.Templates).MergeFrom(&shard.Templates) } + +func (replica *ChiReplica) GetPodTemplate() (*ChiPodTemplate, bool) { + name := replica.Templates.ServiceTemplate + template, ok := replica.Chi.GetPodTemplate(name) + return template, ok +} + +func (replica *ChiReplica) GetVolumeClaimTemplate() (*ChiVolumeClaimTemplate, bool) { + name := replica.Templates.VolumeClaimTemplate + template, ok := replica.Chi.GetVolumeClaimTemplate(name) + return template, ok +} + +func (replica *ChiReplica) GetServiceTemplate() (*ChiServiceTemplate, bool) { + name := replica.Templates.ServiceTemplate + template, ok := replica.Chi.GetServiceTemplate(name) + return template, ok +} diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go index 4b36f96a4..54fd98a40 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse.altinity.com/v1/types.go @@ -76,6 +76,7 @@ func (templates *ChiTemplateNames) MergeFrom(from *ChiTemplateNames) { // ChiConfiguration defines configuration section of .spec type ChiConfiguration struct { + Templates ChiTemplateNames `json:"templates" yaml:"templates"` Zookeeper ChiZookeeperConfig `json:"zookeeper,omitempty" yaml:"zookeeper"` Users map[string]interface{} `json:"users,omitempty" yaml:"users"` Profiles map[string]interface{} `json:"profiles,omitempty" yaml:"profiles"` diff --git a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go index 93c184c46..b1e509c7d 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go +++ b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go @@ -71,6 +71,7 @@ func (in *ChiClusterAddress) DeepCopy() *ChiClusterAddress { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ChiConfiguration) DeepCopyInto(out *ChiConfiguration) { *out = *in + out.Templates = in.Templates in.Zookeeper.DeepCopyInto(&out.Zookeeper) if in.Users != nil { in, out := &in.Users, &out.Users diff --git a/pkg/model/creator.go b/pkg/model/creator.go index d47102abd..167d6e6ca 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -17,11 +17,11 @@ package model import ( chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/util" + "k8s.io/apimachinery/pkg/util/intstr" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" "github.com/golang/glog" ) @@ -31,27 +31,44 @@ func (r *Reconciler) createServiceChi(chi *chiv1.ClickHouseInstallation) *corev1 serviceName := CreateChiServiceName(chi) glog.V(1).Infof("createServiceChi(%s/%s)", chi.Namespace, serviceName) - return &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: r.chi.Namespace, - Labels: r.labeler.getLabelsCommonObject(), - }, - Spec: corev1.ServiceSpec{ - // ClusterIP: templateDefaultsServiceClusterIP, - Ports: []corev1.ServicePort{ - { - Name: chDefaultHTTPPortName, - Port: chDefaultHTTPPortNumber, - }, - { - Name: chDefaultClientPortName, - Port: chDefaultClientPortNumber, + if template, ok := r.chi.GetOwnServiceTemplate(); ok { + // .templates.ServiceTemplate specified + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: r.chi.Namespace, + Labels: r.labeler.getLabelsCommonObject(), + }, + Spec: *template.Spec.DeepCopy(), + } + service.Spec.Selector = util.MergeStringMaps(service.Spec.Selector, r.labeler.getSelectorCommonObject()) + + return service + } else { + // Incorrect/unknown .templates.ServiceTemplate specified + // Create default Service + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: r.chi.Namespace, + Labels: r.labeler.getLabelsCommonObject(), + }, + Spec: corev1.ServiceSpec{ + // ClusterIP: templateDefaultsServiceClusterIP, + Ports: []corev1.ServicePort{ + { + Name: chDefaultHTTPPortName, + Port: chDefaultHTTPPortNumber, + }, + { + Name: chDefaultClientPortName, + Port: chDefaultClientPortNumber, + }, }, + Selector: r.labeler.getSelectorCommonObject(), + Type: "LoadBalancer", }, - Selector: r.labeler.getSelectorCommonObject(), - Type: "LoadBalancer", - }, + } } } @@ -61,7 +78,7 @@ func (r *Reconciler) createServiceReplica(replica *chiv1.ChiReplica) *corev1.Ser statefulSetName := CreateStatefulSetName(replica) glog.V(1).Infof("createServiceReplica(%s/%s) for Set %s", replica.Address.Namespace, serviceName, statefulSetName) - if template, ok := r.chi.GetServiceTemplate(replica.Templates.ServiceTemplate); ok { + if template, ok := replica.GetServiceTemplate(); ok { // .templates.ServiceTemplate specified service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -157,7 +174,6 @@ func (r *Reconciler) createStatefulSet(replica *chiv1.ChiReplica) *apps.Stateful // setupStatefulSetPodTemplate performs PodTemplate setup of StatefulSet func (r *Reconciler) setupStatefulSetPodTemplate(statefulSetObject *apps.StatefulSet, replica *chiv1.ChiReplica) { statefulSetName := CreateStatefulSetName(replica) - podTemplateName := replica.Templates.PodTemplate // Initial PodTemplateSpec value // All the rest fields would be filled later @@ -168,10 +184,10 @@ func (r *Reconciler) setupStatefulSetPodTemplate(statefulSetObject *apps.Statefu } // Specify pod templates - either explicitly defined or default - if podTemplate, ok := r.chi.GetPodTemplate(podTemplateName); ok { + if podTemplate, ok := replica.GetPodTemplate(); ok { // Replica references known PodTemplate copyPodTemplateFrom(statefulSetObject, podTemplate) - glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s - template: %s", statefulSetName, podTemplateName) + glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s - template used", statefulSetName) } else { // Replica references UNKNOWN PodTemplate copyPodTemplateFrom(statefulSetObject, createDefaultPodTemplate(statefulSetName)) From 26e17e3fdb4c1822db3698f81be383312d023ca6 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 10 Jun 2019 17:52:43 +0300 Subject: [PATCH 15/31] env: binary build in verbose mode --- dev/binary_build.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/dev/binary_build.sh b/dev/binary_build.sh index 9fbd1bffa..96bf8c9ef 100755 --- a/dev/binary_build.sh +++ b/dev/binary_build.sh @@ -16,6 +16,7 @@ ${SRC_ROOT}/manifests/operator/build-clickhouse-operator-install-yaml.sh #CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ${CUR_DIR}/clickhouse-operator ${SRC_ROOT}/cmd/clickhouse-operator CGO_ENABLED=0 go build \ + -v -a \ -ldflags "-X ${REPO}/pkg/version.Version=${VERSION} -X ${REPO}/pkg/version.GitSHA=${GIT_SHA}" \ -o ${OPERATOR_BIN} \ ${SRC_ROOT}/cmd/manager/main.go From f2fd2e9daede95f8a7eaa9029dc69f024d8da743 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 11 Jun 2019 18:54:40 +0300 Subject: [PATCH 16/31] dev: use Service.GeneratedName as name template --- pkg/model/namer.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/pkg/model/namer.go b/pkg/model/namer.go index d92f99294..753ac99bd 100644 --- a/pkg/model/namer.go +++ b/pkg/model/namer.go @@ -164,6 +164,15 @@ func CreateConfigMapCommonUsersName(chi *chop.ClickHouseInstallation) string { // CreateChiServiceName creates a name of a Installation Service resource func CreateChiServiceName(chi *chop.ClickHouseInstallation) string { + if template, ok := chi.GetOwnServiceTemplate(); ok { + // Service template available + if template.GenerateName != "" { + // Service template has explicitly specified service name template + return newReplacerChi(chi).Replace(template.GenerateName) + } + } + + // Create Service name based on default Service Name template return newReplacerChi(chi).Replace(chiServiceNamePattern) } @@ -183,6 +192,15 @@ func CreateStatefulSetName(replica *chop.ChiReplica) string { // CreateStatefulSetServiceName returns a name of a StatefulSet-related Service for replica func CreateStatefulSetServiceName(replica *chop.ChiReplica) string { + if template, ok := replica.GetServiceTemplate(); ok { + // Service template available + if template.GenerateName != "" { + // Service template has explicitly specified service name template + return newReplacerReplica(replica).Replace(template.GenerateName) + } + } + + // Create Service name based on default Service Name template return newReplacerReplica(replica).Replace(statefulSetServiceNamePattern) } From 95dea1eb047cc2e338af397806acebe7462427af Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 11 Jun 2019 19:02:57 +0300 Subject: [PATCH 17/31] env: unify operator-install-yaml catter and builder --- ...ator-yaml.sh => cat-clickhouse-operator-install-yaml.sh} | 0 manifests/dev/clickhouse-operator-delete.sh | 2 +- manifests/dev/clickhouse-operator-install.sh | 2 +- manifests/dev/dev-install.sh | 6 +++--- .../operator/build-clickhouse-operator-install-yaml.sh | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) rename manifests/dev/{cat-clickhouse-operator-yaml.sh => cat-clickhouse-operator-install-yaml.sh} (100%) diff --git a/manifests/dev/cat-clickhouse-operator-yaml.sh b/manifests/dev/cat-clickhouse-operator-install-yaml.sh similarity index 100% rename from manifests/dev/cat-clickhouse-operator-yaml.sh rename to manifests/dev/cat-clickhouse-operator-install-yaml.sh diff --git a/manifests/dev/clickhouse-operator-delete.sh b/manifests/dev/clickhouse-operator-delete.sh index f9764a285..c15e0725c 100755 --- a/manifests/dev/clickhouse-operator-delete.sh +++ b/manifests/dev/clickhouse-operator-delete.sh @@ -9,7 +9,7 @@ MANIFEST_ROOT=$(realpath ${CUR_DIR}/..) if [[ "${CHOPERATOR_NAMESPACE}" == "kube-system" ]]; then echo "Default k8s namespace 'kube-system' must not be deleted" echo "Delete components only" - kubectl delete --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" ${CUR_DIR}/cat-clickhouse-operator-yaml.sh) + kubectl delete --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" ${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh) else echo "Delete ClickHouse Operator namespace ${CHOPERATOR_NAMESPACE}" kubectl delete namespace "${CHOPERATOR_NAMESPACE}" diff --git a/manifests/dev/clickhouse-operator-install.sh b/manifests/dev/clickhouse-operator-install.sh index 76af802bc..64ba7fa70 100755 --- a/manifests/dev/clickhouse-operator-install.sh +++ b/manifests/dev/clickhouse-operator-install.sh @@ -12,4 +12,4 @@ echo "Setup ClickHouse Operator into ${CHOPERATOR_NAMESPACE} namespace" kubectl create namespace "${CHOPERATOR_NAMESPACE}" # Setup into dedicated namespace -kubectl apply --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" ${CUR_DIR}/cat-clickhouse-operator-yaml.sh) +kubectl apply --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" ${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh) diff --git a/manifests/dev/dev-install.sh b/manifests/dev/dev-install.sh index 62717dfd8..99a8a8b01 100755 --- a/manifests/dev/dev-install.sh +++ b/manifests/dev/dev-install.sh @@ -8,7 +8,7 @@ echo "Create ${CHOPERATOR_NAMESPACE} namespace" kubectl create namespace "${CHOPERATOR_NAMESPACE}" if [[ "${INSTALL_FROM_ALTINITY_RELEASE_DOCKERHUB}" == "yes" ]]; then - kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" ${CUR_DIR}/cat-clickhouse-operator-yaml.sh) + kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" ${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh) # Installation done exit $? @@ -17,10 +17,10 @@ else echo "CHOPERATOR_NAMESPACE=${CHOPERATOR_NAMESPACE}" echo "CHOPERATOR_IMAGE=${CHOPERATOR_IMAGE}" - kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" MANIFEST_PRINT_DEPLOYMENT="no" ${CUR_DIR}/cat-clickhouse-operator-yaml.sh) + kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" MANIFEST_PRINT_DEPLOYMENT="no" ${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh) if [[ "${INSTALL_FROM_DEPLOYMENT_MANIFEST}" == "yes" ]]; then # Install operator from Docker Registry (dockerhub or whatever) - kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" MANIFEST_PRINT_CRD="no" MANIFEST_PRINT_RBAC="no" ${CUR_DIR}/cat-clickhouse-operator-yaml.sh) + kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" MANIFEST_PRINT_CRD="no" MANIFEST_PRINT_RBAC="no" ${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh) fi fi diff --git a/manifests/operator/build-clickhouse-operator-install-yaml.sh b/manifests/operator/build-clickhouse-operator-install-yaml.sh index 2990e4a52..7763c43d7 100755 --- a/manifests/operator/build-clickhouse-operator-install-yaml.sh +++ b/manifests/operator/build-clickhouse-operator-install-yaml.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Full list of available vars check in ${MANIFEST_ROOT}/dev/cat-clickhouse-operator-yaml.sh file +# Full list of available vars check in ${MANIFEST_ROOT}/dev/cat-clickhouse-operator-install-yaml.sh file # Here we just build production all-sections-included .yaml manifest with namespace and image parameters CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE:-kube-system}" @@ -11,4 +11,4 @@ MANIFEST_ROOT=$(realpath ${CUR_DIR}/..) CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" \ CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" \ -${MANIFEST_ROOT}/dev/cat-clickhouse-operator-yaml.sh > ${CUR_DIR}/clickhouse-operator-install.yaml +${MANIFEST_ROOT}/dev/cat-clickhouse-operator-install-yaml.sh > ${CUR_DIR}/clickhouse-operator-install.yaml From 2bc3e236bfacbc48e26db2db6a93f17f265b2f1b Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 12 Jun 2019 01:14:15 +0300 Subject: [PATCH 18/31] dev: add Service Templates for Cluster and Shard --- .../v1/type_cluster.go | 6 + .../clickhouse.altinity.com/v1/type_shard.go | 6 + pkg/model/creator.go | 112 +++++++++++++----- pkg/model/labeler.go | 46 ++++++- pkg/model/namer.go | 112 +++++++++++++++--- pkg/model/reconciler.go | 28 ++--- 6 files changed, 248 insertions(+), 62 deletions(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go index afe17da67..ed6ffcecb 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go @@ -18,6 +18,12 @@ func (cluster *ChiCluster) InheritTemplates(chi *ClickHouseInstallation) { (&cluster.Templates).MergeFrom(&chi.Spec.Defaults.Templates) } +func (cluster *ChiCluster) GetServiceTemplate() (*ChiServiceTemplate, bool) { + name := cluster.Templates.ServiceTemplate + template, ok := cluster.Chi.GetServiceTemplate(name) + return template, ok +} + func (cluster *ChiCluster) WalkShards( f func(shardIndex int, shard *ChiShard) error, ) []error { diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_shard.go b/pkg/apis/clickhouse.altinity.com/v1/type_shard.go index e2d7d49b0..546f32be4 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_shard.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_shard.go @@ -18,6 +18,12 @@ func (shard *ChiShard) InheritTemplates(cluster *ChiCluster) { (&shard.Templates).MergeFrom(&cluster.Templates) } +func (shard *ChiShard) GetServiceTemplate() (*ChiServiceTemplate, bool) { + name := shard.Templates.ServiceTemplate + template, ok := shard.Chi.GetServiceTemplate(name) + return template, ok +} + func (shard *ChiShard) WalkReplicas( f func(replica *ChiReplica) error, ) []error { diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 167d6e6ca..aa8f15a45 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -33,17 +33,13 @@ func (r *Reconciler) createServiceChi(chi *chiv1.ClickHouseInstallation) *corev1 glog.V(1).Infof("createServiceChi(%s/%s)", chi.Namespace, serviceName) if template, ok := r.chi.GetOwnServiceTemplate(); ok { // .templates.ServiceTemplate specified - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: r.chi.Namespace, - Labels: r.labeler.getLabelsCommonObject(), - }, - Spec: *template.Spec.DeepCopy(), - } - service.Spec.Selector = util.MergeStringMaps(service.Spec.Selector, r.labeler.getSelectorCommonObject()) - - return service + return r.createServiceFromTemplate( + template, + r.chi.Namespace, + serviceName, + r.labeler.getLabelsChiScope(), + r.labeler.getSelectorChiScope(), + ) } else { // Incorrect/unknown .templates.ServiceTemplate specified // Create default Service @@ -51,7 +47,7 @@ func (r *Reconciler) createServiceChi(chi *chiv1.ClickHouseInstallation) *corev1 ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: r.chi.Namespace, - Labels: r.labeler.getLabelsCommonObject(), + Labels: r.labeler.getLabelsChiScope(), }, Spec: corev1.ServiceSpec{ // ClusterIP: templateDefaultsServiceClusterIP, @@ -65,13 +61,51 @@ func (r *Reconciler) createServiceChi(chi *chiv1.ClickHouseInstallation) *corev1 Port: chDefaultClientPortNumber, }, }, - Selector: r.labeler.getSelectorCommonObject(), + Selector: r.labeler.getSelectorChiScope(), Type: "LoadBalancer", }, } } } +// createServiceCluster +func (r *Reconciler) createServiceCluster(cluster *chiv1.ChiCluster) *corev1.Service { + serviceName := CreateClusterServiceName(cluster) + + glog.V(1).Infof("createServiceCluster(%s/%s)", cluster.Address.Namespace, serviceName) + if template, ok := cluster.GetServiceTemplate(); ok { + // .templates.ServiceTemplate specified + return r.createServiceFromTemplate( + template, + cluster.Address.Namespace, + serviceName, + r.labeler.getLabelsClusterScope(cluster), + r.labeler.getSelectorClusterScope(cluster), + ) + } else { + return nil + } +} + +// createServiceShard +func (r *Reconciler) createServiceShard(shard *chiv1.ChiShard) *corev1.Service { + serviceName := CreateShardServiceName(shard) + + glog.V(1).Infof("createServiceShard(%s/%s)", shard.Address.Namespace, serviceName) + if template, ok := shard.GetServiceTemplate(); ok { + // .templates.ServiceTemplate specified + return r.createServiceFromTemplate( + template, + shard.Address.Namespace, + serviceName, + r.labeler.getLabelsShardScope(shard), + r.labeler.getSelectorShardScope(shard), + ) + } else { + return nil + } +} + // createServiceReplica creates new corev1.Service func (r *Reconciler) createServiceReplica(replica *chiv1.ChiReplica) *corev1.Service { serviceName := CreateStatefulSetServiceName(replica) @@ -80,17 +114,13 @@ func (r *Reconciler) createServiceReplica(replica *chiv1.ChiReplica) *corev1.Ser glog.V(1).Infof("createServiceReplica(%s/%s) for Set %s", replica.Address.Namespace, serviceName, statefulSetName) if template, ok := replica.GetServiceTemplate(); ok { // .templates.ServiceTemplate specified - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: replica.Address.Namespace, - Labels: r.labeler.getLabelsReplica(replica, false), - }, - Spec: *template.Spec.DeepCopy(), - } - service.Spec.Selector = util.MergeStringMaps(service.Spec.Selector, r.labeler.getSelectorReplica(replica)) - - return service + return r.createServiceFromTemplate( + template, + replica.Address.Namespace, + serviceName, + r.labeler.getLabelsReplicaScope(replica, false), + r.labeler.getSelectorReplicaScope(replica), + ) } else { // Incorrect/unknown .templates.ServiceTemplate specified // Create default Service @@ -98,7 +128,7 @@ func (r *Reconciler) createServiceReplica(replica *chiv1.ChiReplica) *corev1.Ser ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: replica.Address.Namespace, - Labels: r.labeler.getLabelsReplica(replica, false), + Labels: r.labeler.getLabelsReplicaScope(replica, false), }, Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ @@ -115,7 +145,7 @@ func (r *Reconciler) createServiceReplica(replica *chiv1.ChiReplica) *corev1.Ser Port: chDefaultInterServerPortNumber, }, }, - Selector: r.labeler.getSelectorReplica(replica), + Selector: r.labeler.getSelectorReplicaScope(replica), ClusterIP: templateDefaultsServiceClusterIP, Type: "ClusterIP", }, @@ -123,13 +153,35 @@ func (r *Reconciler) createServiceReplica(replica *chiv1.ChiReplica) *corev1.Ser } } +// createServiceFromTemplate create Service from ChiServiceTemplate and additional info +func (r *Reconciler) createServiceFromTemplate( + template *chiv1.ChiServiceTemplate, + namespace string, + name string, + labels map[string]string, + selector map[string]string, +) *corev1.Service { + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: *template.Spec.DeepCopy(), + } + // Append provided Selector to already specified Selector in template + service.Spec.Selector = util.MergeStringMaps(service.Spec.Selector, selector) + + return service +} + // createConfigMapReplica creates new corev1.ConfigMap func (r *Reconciler) createConfigMapReplica(replica *chiv1.ChiReplica) *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: CreateConfigMapPodName(replica), Namespace: replica.Address.Namespace, - Labels: r.labeler.getLabelsReplica(replica, false), + Labels: r.labeler.getLabelsReplicaScope(replica, false), }, Data: r.chConfigSectionsGenerator.CreateConfigsPod(replica), } @@ -147,13 +199,13 @@ func (r *Reconciler) createStatefulSet(replica *chiv1.ChiReplica) *apps.Stateful ObjectMeta: metav1.ObjectMeta{ Name: statefulSetName, Namespace: replica.Address.Namespace, - Labels: r.labeler.getLabelsReplica(replica, true), + Labels: r.labeler.getLabelsReplicaScope(replica, true), }, Spec: apps.StatefulSetSpec{ Replicas: &replicasNum, ServiceName: serviceName, Selector: &metav1.LabelSelector{ - MatchLabels: r.labeler.getSelectorReplica(replica), + MatchLabels: r.labeler.getSelectorReplicaScope(replica), }, // IMPORTANT // VolumeClaimTemplates are to be setup later @@ -179,7 +231,7 @@ func (r *Reconciler) setupStatefulSetPodTemplate(statefulSetObject *apps.Statefu // All the rest fields would be filled later statefulSetObject.Spec.Template = corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: r.labeler.getLabelsReplica(replica, true), + Labels: r.labeler.getLabelsReplicaScope(replica, true), }, } diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go index f8f0a04d6..8e8e2d3e3 100644 --- a/pkg/model/labeler.go +++ b/pkg/model/labeler.go @@ -33,7 +33,7 @@ func NewLabeler(version string, chi *chi.ClickHouseInstallation) *Labeler { } } -func (l *Labeler) getLabelsCommonObject() map[string]string { +func (l *Labeler) getLabelsChiScope() map[string]string { return map[string]string{ LabelApp: LabelAppValue, LabelChop: l.version, @@ -41,14 +41,52 @@ func (l *Labeler) getLabelsCommonObject() map[string]string { } } -func (l *Labeler) getSelectorCommonObject() map[string]string { +func (l *Labeler) getSelectorChiScope() map[string]string { return map[string]string{ LabelApp: LabelAppValue, LabelChi: getNamePartChiName(l.chi), } } -func (l *Labeler) getLabelsReplica(replica *chi.ChiReplica, zk bool) map[string]string { +func (l *Labeler) getLabelsClusterScope(cluster *chi.ChiCluster) map[string]string { + return map[string]string{ + LabelApp: LabelAppValue, + LabelChop: l.version, + LabelChi: getNamePartChiName(cluster), + LabelCluster: getNamePartClusterName(cluster), + } +} + +func (l *Labeler) getSelectorClusterScope(cluster *chi.ChiCluster) map[string]string { + return map[string]string{ + LabelApp: LabelAppValue, + // skip chop + LabelChi: getNamePartChiName(cluster), + LabelCluster: getNamePartClusterName(cluster), + } +} + +func (l *Labeler) getLabelsShardScope(shard *chi.ChiShard) map[string]string { + return map[string]string{ + LabelApp: LabelAppValue, + LabelChop: l.version, + LabelChi: getNamePartChiName(shard), + LabelCluster: getNamePartClusterName(shard), + LabelShard: getNamePartShardName(shard), + } +} + +func (l *Labeler) getSelectorShardScope(shard *chi.ChiShard) map[string]string { + return map[string]string{ + LabelApp: LabelAppValue, + // skip chop + LabelChi: getNamePartChiName(shard), + LabelCluster: getNamePartClusterName(shard), + LabelShard: getNamePartShardName(shard), + } +} + +func (l *Labeler) getLabelsReplicaScope(replica *chi.ChiReplica, zk bool) map[string]string { labels := map[string]string{ LabelApp: LabelAppValue, LabelChop: l.version, @@ -64,7 +102,7 @@ func (l *Labeler) getLabelsReplica(replica *chi.ChiReplica, zk bool) map[string] return labels } -func (l *Labeler) getSelectorReplica(replica *chi.ChiReplica) map[string]string { +func (l *Labeler) getSelectorReplicaScope(replica *chi.ChiReplica) map[string]string { return map[string]string{ LabelApp: LabelAppValue, // skip chop diff --git a/pkg/model/namer.go b/pkg/model/namer.go index 753ac99bd..d2252c481 100644 --- a/pkg/model/namer.go +++ b/pkg/model/namer.go @@ -34,6 +34,12 @@ const ( // chiServiceNamePattern is a template of CHI Service name chiServiceNamePattern = "clickhouse-{chi}" + // clusterServiceNamePattern is a template of cluster Service name + clusterServiceNamePattern = "cluster-{chi}-{cluster}" + + // shardServiceNamePattern is a template of shard Service name + shardServiceNamePattern = "cluster-{chi}-{cluster}-{shard}" + // statefulSetNamePattern is a template of replica's StatefulSet's name statefulSetNamePattern = "chi-{chi}-{cluster}-{shard}-{replica}" @@ -101,29 +107,86 @@ func namePartReplicaNameID(name string) string { func getNamePartChiName(obj interface{}) string { switch obj.(type) { - case *chop.ChiReplica: - replica := obj.(*chop.ChiReplica) - return namePartChiName(replica.Address.ChiName) case *chop.ClickHouseInstallation: chi := obj.(*chop.ClickHouseInstallation) return namePartChiName(chi.Name) + case *chop.ChiCluster: + cluster := obj.(*chop.ChiCluster) + return namePartChiName(cluster.Address.ChiName) + case *chop.ChiShard: + shard := obj.(*chop.ChiShard) + return namePartChiName(shard.Address.ChiName) + case *chop.ChiReplica: + replica := obj.(*chop.ChiReplica) + return namePartChiName(replica.Address.ChiName) } return "ERROR" } -func getNamePartClusterName(replica *chop.ChiReplica) string { - return namePartClusterName(replica.Address.ClusterName) +func getNamePartClusterName(obj interface{}) string { + switch obj.(type) { + case *chop.ChiCluster: + cluster := obj.(*chop.ChiCluster) + return namePartClusterName(cluster.Address.ClusterName) + case *chop.ChiShard: + shard := obj.(*chop.ChiShard) + return namePartClusterName(shard.Address.ClusterName) + case *chop.ChiReplica: + replica := obj.(*chop.ChiReplica) + return namePartClusterName(replica.Address.ClusterName) + } + + return "ERROR" } -func getNamePartShardName(replica *chop.ChiReplica) string { - return namePartShardName(replica.Address.ShardName) +func getNamePartShardName(obj interface{}) string { + switch obj.(type) { + case *chop.ChiShard: + shard := obj.(*chop.ChiShard) + return namePartShardName(shard.Address.ShardName) + case *chop.ChiReplica: + replica := obj.(*chop.ChiReplica) + return namePartShardName(replica.Address.ShardName) + } + + return "ERROR" } func getNamePartReplicaName(replica *chop.ChiReplica) string { return namePartReplicaName(replica.Address.ReplicaName) } +func newReplacerChi(chi *chop.ClickHouseInstallation) *strings.Replacer { + return strings.NewReplacer( + "{chi}", namePartChiName(chi.Name), + "{chiID}", namePartChiNameID(chi.Name), + ) +} + +func newReplacerCluster(cluster *chop.ChiCluster) *strings.Replacer { + return strings.NewReplacer( + "{chi}", namePartChiName(cluster.Address.ChiName), + "{chiID}", namePartChiNameID(cluster.Address.ChiName), + "{cluster}", namePartClusterName(cluster.Address.ClusterName), + "{clusterID}", namePartClusterNameID(cluster.Address.ClusterName), + "{clusterIndex}", strconv.Itoa(cluster.Address.ClusterIndex), + ) +} + +func newReplacerShard(shard *chop.ChiShard) *strings.Replacer { + return strings.NewReplacer( + "{chi}", namePartChiName(shard.Address.ChiName), + "{chiID}", namePartChiNameID(shard.Address.ChiName), + "{cluster}", namePartClusterName(shard.Address.ClusterName), + "{clusterID}", namePartClusterNameID(shard.Address.ClusterName), + "{clusterIndex}", strconv.Itoa(shard.Address.ClusterIndex), + "{shard}", namePartShardName(shard.Address.ShardName), + "{shardID}", namePartShardNameID(shard.Address.ShardName), + "{shardIndex}", strconv.Itoa(shard.Address.ShardIndex), + ) +} + func newReplacerReplica(replica *chop.ChiReplica) *strings.Replacer { return strings.NewReplacer( "{chi}", namePartChiName(replica.Address.ChiName), @@ -140,13 +203,6 @@ func newReplacerReplica(replica *chop.ChiReplica) *strings.Replacer { ) } -func newReplacerChi(chi *chop.ClickHouseInstallation) *strings.Replacer { - return strings.NewReplacer( - "{chi}", namePartChiName(chi.Name), - "{chiID}", namePartChiNameID(chi.Name), - ) -} - // CreateConfigMapPodName returns a name for a ConfigMap for replica's pod func CreateConfigMapPodName(replica *chop.ChiReplica) string { return newReplacerReplica(replica).Replace(configMapDeploymentNamePattern) @@ -185,6 +241,34 @@ func CreateChiServiceFQDN(chi *chop.ClickHouseInstallation) string { ) } +// CreateClusterServiceName returns a name of a cluster's Service +func CreateClusterServiceName(cluster *chop.ChiCluster) string { + if template, ok := cluster.GetServiceTemplate(); ok { + // Service template available + if template.GenerateName != "" { + // Service template has explicitly specified service name template + return newReplacerCluster(cluster).Replace(template.GenerateName) + } + } + + // Create Service name based on default Service Name template + return newReplacerCluster(cluster).Replace(clusterServiceNamePattern) +} + +// CreateShardServiceName returns a name of a shard's Service +func CreateShardServiceName(shard *chop.ChiShard) string { + if template, ok := shard.GetServiceTemplate(); ok { + // Service template available + if template.GenerateName != "" { + // Service template has explicitly specified service name template + return newReplacerShard(shard).Replace(template.GenerateName) + } + } + + // Create Service name based on default Service Name template + return newReplacerShard(shard).Replace(shardServiceNamePattern) +} + // CreateStatefulSetName creates a name of a StatefulSet for replica func CreateStatefulSetName(replica *chop.ChiReplica) string { return newReplacerReplica(replica).Replace(statefulSetNamePattern) diff --git a/pkg/model/reconciler.go b/pkg/model/reconciler.go index 4fb416fd2..8c9440e55 100644 --- a/pkg/model/reconciler.go +++ b/pkg/model/reconciler.go @@ -84,22 +84,22 @@ func (r *Reconciler) reconcileChi(chi *chiv1.ClickHouseInstallation) error { // reconcileCluster reconciles Cluster, excluding nested shards func (r *Reconciler) reconcileCluster(cluster *chiv1.ChiCluster) error { - // Add replica's Service - //service := r.createServiceReplica(replica) - //if err := r.funcs.ReconcileService(service); err != nil { - // return err - //} - return nil + // Add Cluster's Service + if service := r.createServiceCluster(cluster); service != nil { + return r.funcs.ReconcileService(service) + } else { + return nil + } } // reconcileShard reconciles Shard, excluding nested replicas func (r *Reconciler) reconcileShard(shard *chiv1.ChiShard) error { - // Add replica's Service - //service := r.createServiceReplica(replica) - //if err := r.funcs.ReconcileService(service); err != nil { - // return err - //} - return nil + // Add Shard's Service + if service := r.createServiceShard(shard); service != nil { + return r.funcs.ReconcileService(service) + } else { + return nil + } } // reconcileReplica reconciles Replica @@ -143,7 +143,7 @@ func (r *Reconciler) reconcileChiConfigMaps() error { ObjectMeta: metav1.ObjectMeta{ Name: CreateConfigMapCommonName(r.chi), Namespace: r.chi.Namespace, - Labels: r.labeler.getLabelsCommonObject(), + Labels: r.labeler.getLabelsChiScope(), }, // Data contains several sections which are to be several xml chopConfig files Data: r.chConfigSectionsGenerator.commonConfigSections, @@ -157,7 +157,7 @@ func (r *Reconciler) reconcileChiConfigMaps() error { ObjectMeta: metav1.ObjectMeta{ Name: CreateConfigMapCommonUsersName(r.chi), Namespace: r.chi.Namespace, - Labels: r.labeler.getLabelsCommonObject(), + Labels: r.labeler.getLabelsChiScope(), }, // Data contains several sections which are to be several xml chopConfig files Data: r.chConfigSectionsGenerator.commonUsersConfigSections, From 2db0e9ee6a89ab7af44156254f4eb5db04c6c8eb Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 12 Jun 2019 16:58:41 +0300 Subject: [PATCH 19/31] dev: enhance PVC deletion and Service Templates processing --- ...3-pod-per-host-default-storage-class.yaml} | 44 +++++++------ ...vanced-04-pod-per-host-local-storage.yaml} | 42 ++++++------- ...0-zones-aws-02-pod-per-host-baremetal(old) | 62 ------------------- ...ouse-operator-template-01-section-crd.yaml | 2 + .../operator/clickhouse-operator-install.yaml | 2 + .../v1/type_replica.go | 2 +- pkg/controller/chi/controller.go | 19 ------ pkg/controller/chi/deleters.go | 25 +++++--- pkg/controller/chi/labeler.go | 18 +++--- pkg/controller/chi/lister.go | 28 +++++++++ pkg/model/creator.go | 15 ++++- pkg/model/labeler.go | 2 +- pkg/model/namer.go | 2 +- 13 files changed, 113 insertions(+), 150 deletions(-) rename docs/examples/{100-zones-aws-02-pod-per-host-baremetal.yaml => 10-zones-03-advanced-03-pod-per-host-default-storage-class.yaml} (59%) mode change 100755 => 100644 rename docs/examples/{100-zones-aws-02-pod-per-host-baremetal-localstorage.yaml => 10-zones-04-advanced-04-pod-per-host-local-storage.yaml} (60%) mode change 100755 => 100644 delete mode 100755 docs/examples/100-zones-aws-02-pod-per-host-baremetal(old) create mode 100644 pkg/controller/chi/lister.go diff --git a/docs/examples/100-zones-aws-02-pod-per-host-baremetal.yaml b/docs/examples/10-zones-03-advanced-03-pod-per-host-default-storage-class.yaml old mode 100755 new mode 100644 similarity index 59% rename from docs/examples/100-zones-aws-02-pod-per-host-baremetal.yaml rename to docs/examples/10-zones-03-advanced-03-pod-per-host-default-storage-class.yaml index 20ca775f4..3a6e34761 --- a/docs/examples/100-zones-aws-02-pod-per-host-baremetal.yaml +++ b/docs/examples/10-zones-03-advanced-03-pod-per-host-default-storage-class.yaml @@ -17,41 +17,39 @@ apiVersion: "clickhouse.altinity.com/v1" kind: "ClickHouseInstallation" metadata: - name: "zones-pod-host" + name: "ch-per-host-pvc" spec: defaults: templates: - podTemplate: clickhouse-per-host-in-baremetal + podTemplate: clickhouse-per-host volumeClaimTemplate: storage-vc-template configuration: + templates: + serviceTemplate: ch-service clusters: - - name: zoned-cluster + - name: zoned layout: shardsCount: 2 templates: + serviceTemplates: + - name: ch-service + generateName: chendpoint + spec: + ports: + - name: http + port: 8123 + - name: client + port: 9000 + type: LoadBalancer podTemplates: # Specify Pod Templates with affinity - - name: clickhouse-per-host-in-baremetal + - name: clickhouse-per-host + zone: + key: "clickhouse" + values: + - "allow" + distribution: "OnePerHost" spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "clickhouse" - operator: In - values: - - "allow" - # Specify Pod anti-affinity to Pods with the same label "/app" on the same "hostname" - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: "clickhouse.altinity.com/app" - operator: In - values: - - "chop" - topologyKey: "kubernetes.io/hostname" containers: - name: clickhouse-pod image: yandex/clickhouse-server:19.3.7 diff --git a/docs/examples/100-zones-aws-02-pod-per-host-baremetal-localstorage.yaml b/docs/examples/10-zones-04-advanced-04-pod-per-host-local-storage.yaml old mode 100755 new mode 100644 similarity index 60% rename from docs/examples/100-zones-aws-02-pod-per-host-baremetal-localstorage.yaml rename to docs/examples/10-zones-04-advanced-04-pod-per-host-local-storage.yaml index 0d24f187b..cf42af7f1 --- a/docs/examples/100-zones-aws-02-pod-per-host-baremetal-localstorage.yaml +++ b/docs/examples/10-zones-04-advanced-04-pod-per-host-local-storage.yaml @@ -17,40 +17,38 @@ apiVersion: "clickhouse.altinity.com/v1" kind: "ClickHouseInstallation" metadata: - name: "ch-localstorage" + name: "ch-per-host-localstorage" spec: defaults: templates: - podTemplate: clickhouse-per-host-in-baremetal-localstorage + podTemplate: clickhouse-per-host-localstorage configuration: + templates: + serviceTemplate: ch-service clusters: - name: ch-localstorage layout: shardsCount: 2 templates: + serviceTemplates: + - name: ch-service + generateName: chendpoint + spec: + ports: + - name: http + port: 8123 + - name: client + port: 9000 + type: LoadBalancer podTemplates: # Specify Pod Templates with affinity - - name: clickhouse-per-host-in-baremetal-localstorage + - name: clickhouse-per-host-localstorage + zone: + key: "clickhouse" + values: + - "allow" + distribution: "OnePerHost" spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "clickhouse" - operator: In - values: - - "allow" - # Specify Pod anti-affinity to Pods with the same label "/app" on the same "hostname" - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: "clickhouse.altinity.com/app" - operator: In - values: - - "chop" - topologyKey: "kubernetes.io/hostname" volumes: # Specify volume as path on local filesystem as a directory which will be created, if need be - name: local-path diff --git a/docs/examples/100-zones-aws-02-pod-per-host-baremetal(old) b/docs/examples/100-zones-aws-02-pod-per-host-baremetal(old) deleted file mode 100755 index 0f9ff769c..000000000 --- a/docs/examples/100-zones-aws-02-pod-per-host-baremetal(old) +++ /dev/null @@ -1,62 +0,0 @@ -# -# AWS-specific labels, applicable in 'nodeAffinity' statements -# -# beta.kubernetes.io/arch=amd64 -# beta.kubernetes.io/instance-type=t2.medium -# beta.kubernetes.io/os=linux -# -# failure-domain.beta.kubernetes.io/region=us-east-1 -# failure-domain.beta.kubernetes.io/zone=us-east-1a -# -# kubernetes.io/hostname=ip-172-20-37-97.ec2.internal -# kubernetes.io/role=node -# node-role.kubernetes.io/node= -# -# kops.k8s.io/instancegroup=nodes2 -# -apiVersion: "clickhouse.altinity.com/v1" -kind: "ClickHouseInstallation" -metadata: - name: "zones-pod-host" -spec: - defaults: - templates: - podTemplate: clickhouse-per-host-baremetal - configuration: - clusters: - - name: zoned-cluster - layout: - shardsCount: 3 - templates: - podTemplate: pod-template-with-volume - volumeClaimTemplate: storage-vc-template - templates: - podTemplates: - # Specify Pod Templates with affinity - - - name: clickhouse-per-host-in-baremetal - spec: - # Specify Pod anti-affinity to Pods with the same label "/app" on the same "hostname" - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: "clickhouse.altinity.com/app" - operator: In - values: - - "chop" - topologyKey: "kubernetes.io/hostname" - containers: - - name: clickhouse-pod - image: yandex/clickhouse-server:19.3.7 - volumeMounts: - # Specify reference to volume on local filesystem - - name: local-path - mountPath: /var/lib/clickhouse - ports: - - name: http - containerPort: 8123 - - name: client - containerPort: 9000 - - name: interserver - containerPort: 9009 diff --git a/manifests/dev/clickhouse-operator-template-01-section-crd.yaml b/manifests/dev/clickhouse-operator-template-01-section-crd.yaml index 712bde455..5ae07ac33 100644 --- a/manifests/dev/clickhouse-operator-template-01-section-crd.yaml +++ b/manifests/dev/clickhouse-operator-template-01-section-crd.yaml @@ -239,6 +239,8 @@ spec: properties: name: type: string + generateName: + type: string spec: # TODO specify ServiceSpec type: object diff --git a/manifests/operator/clickhouse-operator-install.yaml b/manifests/operator/clickhouse-operator-install.yaml index 6f17ce9bb..20b69c806 100644 --- a/manifests/operator/clickhouse-operator-install.yaml +++ b/manifests/operator/clickhouse-operator-install.yaml @@ -239,6 +239,8 @@ spec: properties: name: type: string + generateName: + type: string spec: # TODO specify ServiceSpec type: object diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_replica.go b/pkg/apis/clickhouse.altinity.com/v1/type_replica.go index 0ad1a2cf9..1520fef89 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_replica.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_replica.go @@ -19,7 +19,7 @@ func (replica *ChiReplica) InheritTemplates(shard *ChiShard) { } func (replica *ChiReplica) GetPodTemplate() (*ChiPodTemplate, bool) { - name := replica.Templates.ServiceTemplate + name := replica.Templates.PodTemplate template, ok := replica.Chi.GetPodTemplate(name) return template, ok } diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index 52f11a87c..890e4171c 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -28,7 +28,6 @@ import ( apps "k8s.io/api/apps/v1" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" appsinformers "k8s.io/client-go/informers/apps/v1" @@ -639,21 +638,3 @@ func waitForCacheSync(name string, stopCh <-chan struct{}, cacheSyncs ...cache.I glog.V(1).Infof("Caches are synced for %s controller", name) return true } - -// clusterWideSelector returns labels.Selector object -func clusterWideSelector(name string) labels.Selector { - return labels.SelectorFromSet(labels.Set{ - chopmodels.LabelChop: name, - }) - /* - glog.V(2).Infof("ClickHouseInstallation (%q) listing controlled resources", chi.Name) - ssList, err := c.statefulSetLister.StatefulSets(chi.Namespace).List(clusterWideSelector(chi.Name)) - if err != nil { - return err - } - // Listing controlled resources - for i := range ssList { - glog.V(2).Infof("ClickHouseInstallation (%q) controlls StatefulSet: %q", chi.Name, ssList[i].Name) - } - */ -} diff --git a/pkg/controller/chi/deleters.go b/pkg/controller/chi/deleters.go index 8a0e9f85d..b41b4d935 100644 --- a/pkg/controller/chi/deleters.go +++ b/pkg/controller/chi/deleters.go @@ -15,10 +15,9 @@ package chi import ( - "github.com/golang/glog" - chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopmodel "github.com/altinity/clickhouse-operator/pkg/model" + "github.com/golang/glog" apps "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -162,18 +161,26 @@ func (c *Controller) statefulSetDelete(replica *chop.ChiReplica) error { // persistentVolumeClaimDelete deletes PersistentVolumeClaim func (c *Controller) persistentVolumeClaimDelete(replica *chop.ChiReplica) error { - name := "volumeclaim-template-" + chopmodel.CreatePodName(replica) - namespace := replica.Address.Namespace - if !chopmodel.ReplicaCanDeletePVC(replica) { - glog.V(1).Infof("KEPT PersistentVolumeClaim %s/%s", namespace, name) + glog.V(1).Infof("PVC should not be deleted, leave them intact") return nil } - if err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Delete(name, newDeleteOptions()); err == nil { - glog.V(1).Infof("OK delete PersistentVolumeClaim %s/%s", namespace, name) + namespace := replica.Address.Namespace + labeler := chopmodel.NewLabeler(c.version, replica.Chi) + listOptions := newListOptions(labeler.GetSelectorReplicaScope(replica)) + if list, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).List(listOptions); err == nil { + glog.V(1).Infof("OK get list of PVC for replica %s/%s", namespace, replica.Name) + for i := range list.Items { + pvc := &list.Items[i] + if err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Delete(pvc.Name, newDeleteOptions()); err == nil { + glog.V(1).Infof("OK delete PVC %s/%s", namespace, pvc.Name) + } else { + glog.V(1).Infof("FAIL delete PVC %s/%s %v", namespace, pvc.Name, err) + } + } } else { - glog.V(1).Infof("FAIL delete PersistentVolumeClaim %s/%s %v", namespace, name, err) + glog.V(1).Infof("FAIL get list of PVC for replica %s/%s %v", namespace, replica.Name, err) } return nil diff --git a/pkg/controller/chi/labeler.go b/pkg/controller/chi/labeler.go index 86bffaaee..44d5f8f2f 100644 --- a/pkg/controller/chi/labeler.go +++ b/pkg/controller/chi/labeler.go @@ -51,21 +51,21 @@ func (c *Controller) labelMyObjectsTree() { namespace, ok2 := c.runtimeParams["OPERATOR_POD_NAMESPACE"] if !ok1 || !ok2 { - glog.V(1).Info("ERROR fetch Pod name out of %s/%s", namespace, podName) + glog.V(1).Infof("ERROR fetch Pod name out of %s/%s", namespace, podName) return } // Pod namespaced name found, fetch it pod, err := c.podLister.Pods(namespace).Get(podName) if err != nil { - glog.V(1).Info("ERROR get Pod %s/%s", namespace, podName) + glog.V(1).Infof("ERROR get Pod %s/%s", namespace, podName) return } // Put label on the Pod pod.Labels["version"] = c.version if _, err := c.kubeClient.CoreV1().Pods(namespace).Update(pod); err != nil { - glog.V(1).Info("ERROR put label on Pod %s/%s", namespace, podName) + glog.V(1).Infof("ERROR put label on Pod %s/%s %v", namespace, podName, err) } // Find parent ReplicaSet @@ -81,21 +81,21 @@ func (c *Controller) labelMyObjectsTree() { if replicaSetName == "" { // ReplicaSet not found - glog.V(1).Info("ERROR ReplicaSet for Pod %s/%s not found", namespace, podName) + glog.V(1).Infof("ERROR ReplicaSet for Pod %s/%s not found", namespace, podName) return } // ReplicaSet namespaced name found, fetch it replicaSet, err := c.kubeClient.AppsV1().ReplicaSets(namespace).Get(replicaSetName, v1.GetOptions{}) if err != nil { - glog.V(1).Info("ERROR get ReplicaSet %s/%s", namespace, replicaSetName) + glog.V(1).Infof("ERROR get ReplicaSet %s/%s %v", namespace, replicaSetName, err) return } // Put label on the ReplicaSet replicaSet.Labels["version"] = c.version if _, err := c.kubeClient.AppsV1().ReplicaSets(namespace).Update(replicaSet); err != nil { - glog.V(1).Info("ERROR put label on ReplicaSet %s/%s", namespace, replicaSetName) + glog.V(1).Infof("ERROR put label on ReplicaSet %s/%s %v", namespace, replicaSetName, err) } // Find parent Deployment @@ -111,20 +111,20 @@ func (c *Controller) labelMyObjectsTree() { if deploymentName == "" { // Deployment not found - glog.V(1).Info("ERROR Deployment for %s Pod %s ReplicaSet %s not found", namespace, podName, replicaSetName) + glog.V(1).Infof("ERROR Deployment for %s Pod %s ReplicaSet %s not found", namespace, podName, replicaSetName) return } // Deployment namespaced name found, fetch it deployment, err := c.kubeClient.AppsV1().Deployments(namespace).Get(deploymentName, v1.GetOptions{}) if err != nil { - glog.V(1).Info("ERROR get Deployment %s/%s", namespace, deploymentName) + glog.V(1).Infof("ERROR get Deployment %s/%s", namespace, deploymentName) return } // Put label on the Deployment deployment.Labels["version"] = c.version if _, err := c.kubeClient.AppsV1().Deployments(namespace).Update(deployment); err != nil { - glog.V(1).Info("ERROR put label on Deployment %s/%s", namespace, deploymentName) + glog.V(1).Infof("ERROR put label on Deployment %s/%s %v", namespace, deploymentName, err) } } diff --git a/pkg/controller/chi/lister.go b/pkg/controller/chi/lister.go new file mode 100644 index 000000000..556575b47 --- /dev/null +++ b/pkg/controller/chi/lister.go @@ -0,0 +1,28 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chi + +import ( + "k8s.io/apimachinery/pkg/labels" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func newListOptions(labelsMap map[string]string) metav1.ListOptions { + labelSelector := labels.SelectorFromSet(labelsMap) + return metav1.ListOptions{ + LabelSelector: labelSelector.String(), + } +} diff --git a/pkg/model/creator.go b/pkg/model/creator.go index aa8f15a45..0c3e7bc2f 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -119,7 +119,7 @@ func (r *Reconciler) createServiceReplica(replica *chiv1.ChiReplica) *corev1.Ser replica.Address.Namespace, serviceName, r.labeler.getLabelsReplicaScope(replica, false), - r.labeler.getSelectorReplicaScope(replica), + r.labeler.GetSelectorReplicaScope(replica), ) } else { // Incorrect/unknown .templates.ServiceTemplate specified @@ -145,7 +145,7 @@ func (r *Reconciler) createServiceReplica(replica *chiv1.ChiReplica) *corev1.Ser Port: chDefaultInterServerPortNumber, }, }, - Selector: r.labeler.getSelectorReplicaScope(replica), + Selector: r.labeler.GetSelectorReplicaScope(replica), ClusterIP: templateDefaultsServiceClusterIP, Type: "ClusterIP", }, @@ -161,6 +161,15 @@ func (r *Reconciler) createServiceFromTemplate( labels map[string]string, selector map[string]string, ) *corev1.Service { + // Verify Ports + for i := range template.Spec.Ports { + servicePort := &template.Spec.Ports[i] + if (servicePort.Port < 1) || (servicePort.Port > 65535) { + glog.V(1).Infof("createServiceFromTemplate(%s/%s) INCORRECT PORT: %d ", namespace, name, servicePort.Port ) + return nil + } + } + service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -205,7 +214,7 @@ func (r *Reconciler) createStatefulSet(replica *chiv1.ChiReplica) *apps.Stateful Replicas: &replicasNum, ServiceName: serviceName, Selector: &metav1.LabelSelector{ - MatchLabels: r.labeler.getSelectorReplicaScope(replica), + MatchLabels: r.labeler.GetSelectorReplicaScope(replica), }, // IMPORTANT // VolumeClaimTemplates are to be setup later diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go index 8e8e2d3e3..059907ef5 100644 --- a/pkg/model/labeler.go +++ b/pkg/model/labeler.go @@ -102,7 +102,7 @@ func (l *Labeler) getLabelsReplicaScope(replica *chi.ChiReplica, zk bool) map[st return labels } -func (l *Labeler) getSelectorReplicaScope(replica *chi.ChiReplica) map[string]string { +func (l *Labeler) GetSelectorReplicaScope(replica *chi.ChiReplica) map[string]string { return map[string]string{ LabelApp: LabelAppValue, // skip chop diff --git a/pkg/model/namer.go b/pkg/model/namer.go index d2252c481..3eb79ac9b 100644 --- a/pkg/model/namer.go +++ b/pkg/model/namer.go @@ -38,7 +38,7 @@ const ( clusterServiceNamePattern = "cluster-{chi}-{cluster}" // shardServiceNamePattern is a template of shard Service name - shardServiceNamePattern = "cluster-{chi}-{cluster}-{shard}" + shardServiceNamePattern = "shard-{chi}-{cluster}-{shard}" // statefulSetNamePattern is a template of replica's StatefulSet's name statefulSetNamePattern = "chi-{chi}-{cluster}-{shard}-{replica}" From 5c0ba4063d484f382ff00408cb12ea7ec7a12c7e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 12 Jun 2019 17:21:59 +0300 Subject: [PATCH 20/31] dev: add and macro --- pkg/model/ch_config.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/model/ch_config.go b/pkg/model/ch_config.go index da76cc2f2..9d4751d63 100644 --- a/pkg/model/ch_config.go +++ b/pkg/model/ch_config.go @@ -286,6 +286,11 @@ func (c *ClickHouseConfigGenerator) GetHostMacros(replica *chiv1.ChiReplica) str // 0-based shard index within all-shards-one-replica-cluster would always be GlobalReplicaIndex cline(b, 8, "<%s-shard>%d", allShardsOneReplicaClusterName, replica.Address.GlobalReplicaIndex) + // and macros are applicable to main cluster only. All aux clusters do not have ambiguous macros + // macro + cline(b, 8, "%s", replica.Address.ClusterName) + // macro + cline(b, 8, "%s", replica.Address.ShardName) // replica id = full deployment id // full deployment id is unique to identify replica within the cluster cline(b, 8, "%s", CreatePodHostname(replica)) From bed0616832ea512f9dcf466026e2cbd9de43945e Mon Sep 17 00:00:00 2001 From: alex-zaitsev Date: Sun, 16 Jun 2019 03:43:50 +0300 Subject: [PATCH 21/31] CHO-120 Additional system level metrics added for memory/disk tracking (#116) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit total memory consumed by PKs – metric.MemoryPrimaryKeyBytesAllocated total memory consumed by dicts – metric.MemoryDictionaryBytesAllocated total disk size for all tables – metric.DiskDataBytes free disk size – metric.DiskFreeBytes Those metrics do not exist in system.metrics and extracted from other tables or using function calls. --- pkg/apis/metrics/fetcher.go | 82 +++++++++++++++++++++++++++++-------- 1 file changed, 64 insertions(+), 18 deletions(-) diff --git a/pkg/apis/metrics/fetcher.go b/pkg/apis/metrics/fetcher.go index 8102846ba..91ffb7370 100644 --- a/pkg/apis/metrics/fetcher.go +++ b/pkg/apis/metrics/fetcher.go @@ -22,26 +22,54 @@ import ( const ( queryMetricsSQL = ` - SELECT - concat('metric.', metric) AS metric, - toString(value) AS value, - '' AS description, - 'gauge' AS type - FROM system.asynchronous_metrics - UNION ALL - SELECT - concat('metric.', metric) AS metric, - toString(value) AS value, - description AS description, - 'gauge' AS type - FROM system.metrics + SELECT + concat('metric.', metric) AS metric, + toString(value) AS value, + '' AS description, + 'gauge' AS type + FROM system.asynchronous_metrics + UNION ALL + SELECT + concat('metric.', metric) AS metric, + toString(value) AS value, + description AS description, + 'gauge' AS type + FROM system.metrics UNION ALL SELECT - concat('event.', event) AS metric, - toString(value) AS value, - description AS description, - 'counter' AS type - FROM system.events` + concat('event.', event) AS metric, + toString(value) AS value, + description AS description, + 'counter' AS type + FROM system.events + UNION ALL + SELECT + 'metric.DiskDataBytes' AS metric, + toString(sum(bytes_on_disk)) AS value, + 'Total data size for all ClickHouse tables' AS description, + 'gauge' AS type + FROM system.parts + UNION ALL + SELECT + 'metric.MemoryPrimaryKeyBytesAllocated' AS metric, + toString(sum(primary_key_bytes_in_memory_allocated)) AS value, + 'Memory size allocated for primary keys' AS description, + 'gauge' AS type + FROM system.parts + UNION ALL + SELECT + 'metric.MemoryDictionaryBytesAllocated' AS metric, + toString(sum(bytes_allocated)) AS value, + 'Memory size allocated for dictionaries' AS description, + 'gauge' AS type + FROM system.dictionaries + UNION ALL + SELECT + 'metric.DiskFreeBytes' AS metric, + toString(filesystemFree()) AS value, + 'Free disk space available at file system' AS description, + 'gauge' AS type + ` queryTableSizesSQL = ` SELECT @@ -114,3 +142,21 @@ func (f *Fetcher) clickHouseQueryTableSizes(data *[][]string) error { } return nil } + +// clickHouseDiskFreeSize requests total data disk and free disk sizes +// data is a concealed output +func (f *Fetcher) clickHouseDiskFreeSize(data *[]string) error { + conn := f.newConn() + if rows, err := conn.Query(heredoc.Doc(queryDiskFreeSQL)); err != nil { + return err + } else { + for rows.Next() { + var total_bytes, free_bytes string + if err := rows.Scan(&total_bytes, &free_bytes); err == nil { + *data = []string{total_bytes, free_bytes} + } + break // only one row is expected + } + } + return nil +} From b8db0945036a1b77aae327dc613f7306f4f9a47f Mon Sep 17 00:00:00 2001 From: alex-zaitsev Date: Sun, 16 Jun 2019 03:45:46 +0300 Subject: [PATCH 22/31] Update release version --- release | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release b/release index 0d91a54c7..1d0ba9ea1 100644 --- a/release +++ b/release @@ -1 +1 @@ -0.3.0 +0.4.0 From 34f995bea646d2e1b01def9917f185862789f141 Mon Sep 17 00:00:00 2001 From: alex-zaitsev Date: Mon, 17 Jun 2019 12:23:13 +0300 Subject: [PATCH 23/31] Fixed SQL formatting and removed obsolete code --- pkg/apis/metrics/fetcher.go | 78 ++++++++++++++----------------------- 1 file changed, 30 insertions(+), 48 deletions(-) diff --git a/pkg/apis/metrics/fetcher.go b/pkg/apis/metrics/fetcher.go index 91ffb7370..8c463c640 100644 --- a/pkg/apis/metrics/fetcher.go +++ b/pkg/apis/metrics/fetcher.go @@ -30,45 +30,45 @@ const ( FROM system.asynchronous_metrics UNION ALL SELECT - concat('metric.', metric) AS metric, + concat('metric.', metric) AS metric, toString(value) AS value, description AS description, 'gauge' AS type FROM system.metrics - UNION ALL - SELECT + UNION ALL + SELECT concat('event.', event) AS metric, toString(value) AS value, description AS description, 'counter' AS type - FROM system.events - UNION ALL - SELECT - 'metric.DiskDataBytes' AS metric, - toString(sum(bytes_on_disk)) AS value, - 'Total data size for all ClickHouse tables' AS description, - 'gauge' AS type - FROM system.parts - UNION ALL - SELECT - 'metric.MemoryPrimaryKeyBytesAllocated' AS metric, - toString(sum(primary_key_bytes_in_memory_allocated)) AS value, - 'Memory size allocated for primary keys' AS description, - 'gauge' AS type - FROM system.parts - UNION ALL - SELECT - 'metric.MemoryDictionaryBytesAllocated' AS metric, - toString(sum(bytes_allocated)) AS value, - 'Memory size allocated for dictionaries' AS description, - 'gauge' AS type - FROM system.dictionaries - UNION ALL + FROM system.events + UNION ALL + SELECT + 'metric.DiskDataBytes' AS metric, + toString(sum(bytes_on_disk)) AS value, + 'Total data size for all ClickHouse tables' AS description, + 'gauge' AS type + FROM system.parts + UNION ALL SELECT - 'metric.DiskFreeBytes' AS metric, - toString(filesystemFree()) AS value, - 'Free disk space available at file system' AS description, - 'gauge' AS type + 'metric.MemoryPrimaryKeyBytesAllocated' AS metric, + toString(sum(primary_key_bytes_in_memory_allocated)) AS value, + 'Memory size allocated for primary keys' AS description, + 'gauge' AS type + FROM system.parts + UNION ALL + SELECT + 'metric.MemoryDictionaryBytesAllocated' AS metric, + toString(sum(bytes_allocated)) AS value, + 'Memory size allocated for dictionaries' AS description, + 'gauge' AS type + FROM system.dictionaries + UNION ALL + SELECT + 'metric.DiskFreeBytes' AS metric, + toString(filesystemFree()) AS value, + 'Free disk space available at file system' AS description, + 'gauge' AS type ` queryTableSizesSQL = ` @@ -142,21 +142,3 @@ func (f *Fetcher) clickHouseQueryTableSizes(data *[][]string) error { } return nil } - -// clickHouseDiskFreeSize requests total data disk and free disk sizes -// data is a concealed output -func (f *Fetcher) clickHouseDiskFreeSize(data *[]string) error { - conn := f.newConn() - if rows, err := conn.Query(heredoc.Doc(queryDiskFreeSQL)); err != nil { - return err - } else { - for rows.Next() { - var total_bytes, free_bytes string - if err := rows.Scan(&total_bytes, &free_bytes); err == nil { - *data = []string{total_bytes, free_bytes} - } - break // only one row is expected - } - } - return nil -} From a334ac0dddd3a235533740e71fb0c625875469f0 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 18 Jun 2019 13:22:59 +0300 Subject: [PATCH 24/31] env: make dev scripts paths space-protected --- dev/binary_build.sh | 12 ++++++------ dev/binary_build_config.sh | 4 ++-- dev/binary_clean.sh | 4 ++-- dev/find_unformatted_sources.sh | 6 +++--- dev/format_unformatted_sources.sh | 6 +++--- dev/image_build_altinity.sh | 4 ++-- dev/image_build_dev.sh | 4 ++-- dev/image_build_universal.sh | 4 ++-- dev/run_dev.sh | 16 ++++++++-------- dev/update-codegen.sh | 6 +++--- 10 files changed, 33 insertions(+), 33 deletions(-) diff --git a/dev/binary_build.sh b/dev/binary_build.sh index 96bf8c9ef..aea581607 100755 --- a/dev/binary_build.sh +++ b/dev/binary_build.sh @@ -5,20 +5,20 @@ # Source configuration CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source ${CUR_DIR}/binary_build_config.sh +source "${CUR_DIR}/binary_build_config.sh" REPO="github.com/altinity/clickhouse-operator" -VERSION=$(cd ${SRC_ROOT}; cat release) -GIT_SHA=$(cd ${CUR_DIR}; git rev-parse --short HEAD) +VERSION=$(cd "${SRC_ROOT}"; cat release) +GIT_SHA=$(cd "${CUR_DIR}"; git rev-parse --short HEAD) # Build clickhouse-operator install .yaml manifest -${SRC_ROOT}/manifests/operator/build-clickhouse-operator-install-yaml.sh +"${SRC_ROOT}/manifests/operator/build-clickhouse-operator-install-yaml.sh" #CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ${CUR_DIR}/clickhouse-operator ${SRC_ROOT}/cmd/clickhouse-operator CGO_ENABLED=0 go build \ -v -a \ -ldflags "-X ${REPO}/pkg/version.Version=${VERSION} -X ${REPO}/pkg/version.GitSHA=${GIT_SHA}" \ - -o ${OPERATOR_BIN} \ - ${SRC_ROOT}/cmd/manager/main.go + -o "${OPERATOR_BIN}" \ + "${SRC_ROOT}/cmd/manager/main.go" exit $? diff --git a/dev/binary_build_config.sh b/dev/binary_build_config.sh index 9a6af2863..f863a79da 100755 --- a/dev/binary_build_config.sh +++ b/dev/binary_build_config.sh @@ -3,8 +3,8 @@ # Build configuration options CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -SRC_ROOT=$(realpath ${CUR_DIR}/..) +SRC_ROOT="$(realpath "${CUR_DIR}/..")" # Operator binary name can be specified externally # Default - put 'clickhouse-operator' into cur dir -OPERATOR_BIN=${OPERATOR_BIN:-${CUR_DIR}/clickhouse-operator} +OPERATOR_BIN="${OPERATOR_BIN:-${CUR_DIR}/clickhouse-operator}" diff --git a/dev/binary_clean.sh b/dev/binary_clean.sh index 308210aec..326c1a1d0 100755 --- a/dev/binary_clean.sh +++ b/dev/binary_clean.sh @@ -5,6 +5,6 @@ # Source configuration CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source ${CUR_DIR}/binary_build_config.sh +source "${CUR_DIR}/binary_build_config.sh" -rm -f ${OPERATOR_BIN} +rm -f "${OPERATOR_BIN}" diff --git a/dev/find_unformatted_sources.sh b/dev/find_unformatted_sources.sh index 6d5f53254..25edae0a8 100755 --- a/dev/find_unformatted_sources.sh +++ b/dev/find_unformatted_sources.sh @@ -11,16 +11,16 @@ set -o pipefail # Source configuration CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source ${CUR_DIR}/binary_build_config.sh +source "${CUR_DIR}/binary_build_config.sh" # Prepare list of all .go files in the project, but exclude all files from /vendor/ folder -GO_FILES_LIST=$(find ${SRC_ROOT} -name \*.go -not -path "${SRC_ROOT}/vendor/*" -print) +GO_FILES_LIST=$(find "${SRC_ROOT}" -name \*.go -not -path "${SRC_ROOT}/vendor/*" -print) # Prepare unformatted files list UNFORMATTED_FILES_LIST=$(gofmt -l ${GO_FILES_LIST}) if [[ ${UNFORMATTED_FILES_LIST} ]]; then for FILE in ${UNFORMATTED_FILES_LIST}; do - echo ${FILE} + echo "${FILE}" done exit 1 fi diff --git a/dev/format_unformatted_sources.sh b/dev/format_unformatted_sources.sh index 80dad0581..f356f2fa7 100755 --- a/dev/format_unformatted_sources.sh +++ b/dev/format_unformatted_sources.sh @@ -11,9 +11,9 @@ set -o pipefail # Source configuration CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source ${CUR_DIR}/binary_build_config.sh +source "${CUR_DIR}/binary_build_config.sh" # Iterate over list of unformatted files and format each of them -${CUR_DIR}/find_unformatted_sources.sh | while read -r FILE; do - go fmt ${FILE} +"${CUR_DIR}/find_unformatted_sources.sh" | while read -r FILE; do + go fmt "${FILE}" done diff --git a/dev/image_build_altinity.sh b/dev/image_build_altinity.sh index 7cc84e30e..fcf0aca96 100755 --- a/dev/image_build_altinity.sh +++ b/dev/image_build_altinity.sh @@ -4,7 +4,7 @@ # Source configuration CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source ${CUR_DIR}/binary_build_config.sh +source "${CUR_DIR}/binary_build_config.sh" # Externally configurable build-dependent options TAG="${TAG:-altinity/clickhouse-operator:dev}" @@ -16,4 +16,4 @@ TAG="${TAG}" \ DOCKERHUB_LOGIN="${DOCKERHUB_LOGIN}" \ DOCKERHUB_PUBLISH="${DOCKERHUB_PUBLISH}" \ MINIKUBE="${MINIKUBE}" \ -${CUR_DIR}/image_build_universal.sh +"${CUR_DIR}/image_build_universal.sh" diff --git a/dev/image_build_dev.sh b/dev/image_build_dev.sh index 41fa54f24..3c0894d11 100755 --- a/dev/image_build_dev.sh +++ b/dev/image_build_dev.sh @@ -4,7 +4,7 @@ # Source configuration CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source ${CUR_DIR}/binary_build_config.sh +source "${CUR_DIR}/binary_build_config.sh" # Externally configurable build-dependent options TAG="${TAG:-sunsingerus/clickhouse-operator:dev}" @@ -16,4 +16,4 @@ TAG="${TAG}" \ DOCKERHUB_LOGIN="${DOCKERHUB_LOGIN}" \ DOCKERHUB_PUBLISH="${DOCKERHUB_PUBLISH}" \ MINIKUBE="${MINIKUBE}" \ -${CUR_DIR}/image_build_universal.sh +"${CUR_DIR}/image_build_universal.sh" diff --git a/dev/image_build_universal.sh b/dev/image_build_universal.sh index 7f0bcccea..48c2cd861 100755 --- a/dev/image_build_universal.sh +++ b/dev/image_build_universal.sh @@ -10,12 +10,12 @@ MINIKUBE="${MINIKUBE:-no}" # Source-dependent options CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -SRC_ROOT="$(realpath ${CUR_DIR}/..)" +SRC_ROOT="$(realpath "${CUR_DIR}/..")" DOCKERFILE_DIR="${SRC_ROOT}" DOCKERFILE="${DOCKERFILE_DIR}/Dockerfile" # Build clickhouse-operator install .yaml manifest -${SRC_ROOT}/manifests/operator/build-clickhouse-operator-install-yaml.sh +"${SRC_ROOT}/manifests/operator/build-clickhouse-operator-install-yaml.sh" # Build image with Docker if [[ "${MINIKUBE}" == "yes" ]]; then diff --git a/dev/run_dev.sh b/dev/run_dev.sh index 35f3c226a..8e76fb7df 100755 --- a/dev/run_dev.sh +++ b/dev/run_dev.sh @@ -5,16 +5,16 @@ # Source configuration CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source ${CUR_DIR}/binary_build_config.sh -LOG_DIR=${CUR_DIR}/log +source "${CUR_DIR}/binary_build_config.sh" +LOG_DIR="${CUR_DIR}/log" echo -n "Building binary, please wait..." -if ${CUR_DIR}/binary_build.sh; then +if "${CUR_DIR}/binary_build.sh"; then echo "successfully built clickhouse-operator. Starting" - mkdir -p ${LOG_DIR} - rm -f ${LOG_DIR}/clickhouse-operator.*.log.* - ${OPERATOR_BIN} \ + mkdir -p "${LOG_DIR}" + rm -f "${LOG_DIR}"/clickhouse-operator.*.log.* + "${OPERATOR_BIN}" \ -alsologtostderr=true \ -log_dir=log \ -v=1 @@ -27,12 +27,12 @@ if ${CUR_DIR}/binary_build.sh; then # -stderrthreshold=FATAL Log events at or above this severity are logged to standard error as well as to files # And clean binary after run. It'll be rebuilt next time - ${CUR_DIR}/binary_clean.sh + "${CUR_DIR}/binary_clean.sh" echo "======================" echo "=== Logs available ===" echo "======================" - ls ${LOG_DIR}/* + ls "${LOG_DIR}"/* else echo "unable to build clickhouse-operator" fi diff --git a/dev/update-codegen.sh b/dev/update-codegen.sh index 55294405c..4e1232082 100755 --- a/dev/update-codegen.sh +++ b/dev/update-codegen.sh @@ -7,10 +7,10 @@ set -o nounset # Only exit with zero if all commands of the pipeline exit successfully set -o pipefail -PROJECT_ROOT=$(dirname ${BASH_SOURCE})/.. -CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${PROJECT_ROOT}; ls -d -1 ${PROJECT_ROOT}/vendor/k8s.io/code-generator 2>/dev/null || echo ${GOPATH}/src/k8s.io/code-generator)} +PROJECT_ROOT="$(dirname "${BASH_SOURCE}")/.." +CODEGEN_PKG="${CODEGEN_PKG:-$(cd "${PROJECT_ROOT}"; ls -d -1 "${PROJECT_ROOT}/vendor/k8s.io/code-generator" 2>/dev/null || echo "${GOPATH}/src/k8s.io/code-generator")}" -${PROJECT_ROOT}/vendor/k8s.io/code-generator/generate-groups.sh all \ +"${PROJECT_ROOT}/vendor/k8s.io/code-generator/generate-groups.sh" all \ github.com/altinity/clickhouse-operator/pkg/client \ github.com/altinity/clickhouse-operator/pkg/apis \ "clickhouse.altinity.com:v1" From ab0d9d5c60c3db54b493afcf274fc072a6a11826 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 18 Jun 2019 13:23:27 +0300 Subject: [PATCH 25/31] env: make manifest build scripts space-protected --- .../cat-clickhouse-operator-install-yaml.sh | 34 +++++++++---------- manifests/dev/clickhouse-operator-delete.sh | 4 +-- manifests/dev/clickhouse-operator-install.sh | 4 +-- manifests/dev/dev-delete.sh | 2 +- manifests/dev/dev-install.sh | 8 ++--- manifests/dev/dev-reset.sh | 4 +-- manifests/dev/dev-show.sh | 2 +- manifests/dev/dev-watch.sh | 2 +- 8 files changed, 30 insertions(+), 30 deletions(-) diff --git a/manifests/dev/cat-clickhouse-operator-install-yaml.sh b/manifests/dev/cat-clickhouse-operator-install-yaml.sh index b3c3b919b..224743a9d 100755 --- a/manifests/dev/cat-clickhouse-operator-install-yaml.sh +++ b/manifests/dev/cat-clickhouse-operator-install-yaml.sh @@ -4,8 +4,8 @@ # Paths CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -MANIFEST_ROOT=$(realpath ${CUR_DIR}/..) -PROJECT_ROOT=$(realpath ${CUR_DIR}/../..) +MANIFEST_ROOT="$(realpath "${CUR_DIR}/..")" +PROJECT_ROOT="$(realpath "${CUR_DIR}/../..")" ########################################## ## @@ -62,14 +62,14 @@ MANIFEST_PRINT_DEPLOYMENT="${MANIFEST_PRINT_DEPLOYMENT:-yes}" # Render CRD section if [[ "${MANIFEST_PRINT_CRD}" == "yes" ]]; then - cat ${CUR_DIR}/clickhouse-operator-template-01-section-crd.yaml | \ + cat "${CUR_DIR}/clickhouse-operator-template-01-section-crd.yaml" | \ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst fi # Render RBAC section if [[ "${MANIFEST_PRINT_RBAC}" == "yes" ]]; then echo "---" - cat ${CUR_DIR}/clickhouse-operator-template-02-section-rbac-and-service.yaml | \ + cat "${CUR_DIR}/clickhouse-operator-template-02-section-rbac-and-service.yaml" | \ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst fi @@ -87,7 +87,7 @@ function render_configmap_header() { CM_HEADER_FILE="${CUR_DIR}/clickhouse-operator-template-03-section-configmap-header.yaml" # Render ConfigMap header template with vars substitution - cat ${CM_HEADER_FILE} | \ + cat "${CM_HEADER_FILE}" | \ CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" CONFIGMAP_NAME="${CM_NAME}" envsubst } @@ -110,9 +110,9 @@ function render_configmap_data_section_file() { # line 1 # line 2 # etc - FILE_NAME=$(basename "${FILE_PATH}") + FILE_NAME="$(basename "${FILE_PATH}")" echo " ${FILE_NAME}: |" - cat ${FILE_PATH} | sed 's/^/ /' + cat "${FILE_PATH}" | sed 's/^/ /' echo "" } @@ -121,7 +121,7 @@ if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then if [[ -z "${CHOPERATOR_CONFIG_FILE}" ]]; then # No config file specified, render simple deployment echo "---" - cat ${CUR_DIR}/clickhouse-operator-template-04-section-deployment.yaml | \ + cat "${CUR_DIR}/clickhouse-operator-template-04-section-deployment.yaml" | \ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst else # Config file specified, render all ConfigMaps and then render deployment @@ -134,8 +134,8 @@ if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then # Render confd.d files echo "---" render_configmap_header "etc-clickhouse-operator-confd-files" - if [[ ! -z "${CHOPERATOR_CONFD_FOLDER}" ]] && [[ ! -z "$(ls ${CHOPERATOR_CONFD_FOLDER})" ]]; then - for FILE in ${CHOPERATOR_CONFD_FOLDER}/*; do + if [[ ! -z "${CHOPERATOR_CONFD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_CONFD_FOLDER}")" ]]; then + for FILE in "${CHOPERATOR_CONFD_FOLDER}"/*; do render_configmap_data_section_file "${FILE}" done fi @@ -143,8 +143,8 @@ if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then # Render configd.d files echo "---" render_configmap_header "etc-clickhouse-operator-configd-files" - if [[ ! -z "${CHOPERATOR_CONFIGD_FOLDER}" ]] && [[ ! -z "$(ls ${CHOPERATOR_CONFIGD_FOLDER})" ]]; then - for FILE in ${CHOPERATOR_CONFIGD_FOLDER}/*; do + if [[ ! -z "${CHOPERATOR_CONFIGD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_CONFIGD_FOLDER}")" ]]; then + for FILE in "${CHOPERATOR_CONFIGD_FOLDER}"/*; do render_configmap_data_section_file "${FILE}" done fi @@ -152,8 +152,8 @@ if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then # Render templates.d files echo "---" render_configmap_header "etc-clickhouse-operator-templatesd-files" - if [[ ! -z "${CHOPERATOR_TEMPLATESD_FOLDER}" ]] && [[ ! -z "$(ls ${CHOPERATOR_TEMPLATESD_FOLDER})" ]]; then - for FILE in ${CHOPERATOR_TEMPLATESD_FOLDER}/*; do + if [[ ! -z "${CHOPERATOR_TEMPLATESD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_TEMPLATESD_FOLDER}")" ]]; then + for FILE in "${CHOPERATOR_TEMPLATESD_FOLDER}"/*; do render_configmap_data_section_file "${FILE}" done fi @@ -161,15 +161,15 @@ if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then # Render users.d files echo "---" render_configmap_header "etc-clickhouse-operator-usersd-files" - if [[ ! -z "${CHOPERATOR_USERSD_FOLDER}" ]] && [[ ! -z "$(ls ${CHOPERATOR_USERSD_FOLDER})" ]]; then - for FILE in ${CHOPERATOR_USERSD_FOLDER}/*; do + if [[ ! -z "${CHOPERATOR_USERSD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_USERSD_FOLDER}")" ]]; then + for FILE in "${CHOPERATOR_USERSD_FOLDER}"/*; do render_configmap_data_section_file "${FILE}" done fi # Render Deployment echo "---" - cat ${CUR_DIR}/clickhouse-operator-template-04-section-deployment-with-configmap.yaml | \ + cat "${CUR_DIR}/clickhouse-operator-template-04-section-deployment-with-configmap.yaml" | \ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst fi fi diff --git a/manifests/dev/clickhouse-operator-delete.sh b/manifests/dev/clickhouse-operator-delete.sh index c15e0725c..622c126cf 100755 --- a/manifests/dev/clickhouse-operator-delete.sh +++ b/manifests/dev/clickhouse-operator-delete.sh @@ -4,12 +4,12 @@ CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE:-dev}" CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE:-altinity/clickhouse-operator:dev}" CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -MANIFEST_ROOT=$(realpath ${CUR_DIR}/..) +MANIFEST_ROOT="$(realpath "${CUR_DIR}/..")" if [[ "${CHOPERATOR_NAMESPACE}" == "kube-system" ]]; then echo "Default k8s namespace 'kube-system' must not be deleted" echo "Delete components only" - kubectl delete --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" ${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh) + kubectl delete --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh") else echo "Delete ClickHouse Operator namespace ${CHOPERATOR_NAMESPACE}" kubectl delete namespace "${CHOPERATOR_NAMESPACE}" diff --git a/manifests/dev/clickhouse-operator-install.sh b/manifests/dev/clickhouse-operator-install.sh index 64ba7fa70..e93726b55 100755 --- a/manifests/dev/clickhouse-operator-install.sh +++ b/manifests/dev/clickhouse-operator-install.sh @@ -4,7 +4,7 @@ CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE:-dev}" CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE:-altinity/clickhouse-operator:dev}" CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -MANIFEST_ROOT=$(realpath ${CUR_DIR}/..) +MANIFEST_ROOT="$(realpath "${CUR_DIR}/..")" echo "Setup ClickHouse Operator into ${CHOPERATOR_NAMESPACE} namespace" @@ -12,4 +12,4 @@ echo "Setup ClickHouse Operator into ${CHOPERATOR_NAMESPACE} namespace" kubectl create namespace "${CHOPERATOR_NAMESPACE}" # Setup into dedicated namespace -kubectl apply --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" ${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh) +kubectl apply --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh") diff --git a/manifests/dev/dev-delete.sh b/manifests/dev/dev-delete.sh index b8ec5042a..53d9158d5 100755 --- a/manifests/dev/dev-delete.sh +++ b/manifests/dev/dev-delete.sh @@ -2,7 +2,7 @@ CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source ${CUR_DIR}/dev-config.sh +source "${CUR_DIR}/dev-config.sh" if [[ "${CHOPERATOR_NAMESPACE}" == "kube-system" ]]; then echo "Default k8s namespace 'kube-system' must not be deleted" diff --git a/manifests/dev/dev-install.sh b/manifests/dev/dev-install.sh index 99a8a8b01..e3613ea92 100755 --- a/manifests/dev/dev-install.sh +++ b/manifests/dev/dev-install.sh @@ -2,13 +2,13 @@ CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source ${CUR_DIR}/dev-config.sh +source "${CUR_DIR}/dev-config.sh" echo "Create ${CHOPERATOR_NAMESPACE} namespace" kubectl create namespace "${CHOPERATOR_NAMESPACE}" if [[ "${INSTALL_FROM_ALTINITY_RELEASE_DOCKERHUB}" == "yes" ]]; then - kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" ${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh) + kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh") # Installation done exit $? @@ -17,10 +17,10 @@ else echo "CHOPERATOR_NAMESPACE=${CHOPERATOR_NAMESPACE}" echo "CHOPERATOR_IMAGE=${CHOPERATOR_IMAGE}" - kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" MANIFEST_PRINT_DEPLOYMENT="no" ${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh) + kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" MANIFEST_PRINT_DEPLOYMENT="no" "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh") if [[ "${INSTALL_FROM_DEPLOYMENT_MANIFEST}" == "yes" ]]; then # Install operator from Docker Registry (dockerhub or whatever) - kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" MANIFEST_PRINT_CRD="no" MANIFEST_PRINT_RBAC="no" ${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh) + kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" MANIFEST_PRINT_CRD="no" MANIFEST_PRINT_RBAC="no" "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh") fi fi diff --git a/manifests/dev/dev-reset.sh b/manifests/dev/dev-reset.sh index 1b9748230..9cd0138c0 100755 --- a/manifests/dev/dev-reset.sh +++ b/manifests/dev/dev-reset.sh @@ -2,7 +2,7 @@ CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source ${CUR_DIR}/dev-config.sh +source "${CUR_DIR}/dev-config.sh" echo "Reset namespace: ${CHOPERATOR_NAMESPACE}" -${CUR_DIR}/dev-delete.sh && ${CUR_DIR}/dev-install.sh +"${CUR_DIR}/dev-delete.sh" && "${CUR_DIR}/dev-install.sh" diff --git a/manifests/dev/dev-show.sh b/manifests/dev/dev-show.sh index c78493426..016d06289 100755 --- a/manifests/dev/dev-show.sh +++ b/manifests/dev/dev-show.sh @@ -2,7 +2,7 @@ CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source ${CUR_DIR}/dev-config.sh +source "${CUR_DIR}/dev-config.sh" echo "=== Pod ===" kubectl -n "${CHOPERATOR_NAMESPACE}" -o wide get pod diff --git a/manifests/dev/dev-watch.sh b/manifests/dev/dev-watch.sh index e08c9fe3f..bb7583ff7 100755 --- a/manifests/dev/dev-watch.sh +++ b/manifests/dev/dev-watch.sh @@ -2,6 +2,6 @@ CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source ${CUR_DIR}/dev-config.sh +source "${CUR_DIR}/dev-config.sh" watch -n1 "kubectl -n ${CHOPERATOR_NAMESPACE} get all,configmap,endpoints,pv,pvc" From 819441e7a1349c630cd9430867e647904af22747 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 18 Jun 2019 13:23:46 +0300 Subject: [PATCH 26/31] dev: fix layout minorr --- pkg/model/creator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 0c3e7bc2f..38a4e80f3 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -165,7 +165,7 @@ func (r *Reconciler) createServiceFromTemplate( for i := range template.Spec.Ports { servicePort := &template.Spec.Ports[i] if (servicePort.Port < 1) || (servicePort.Port > 65535) { - glog.V(1).Infof("createServiceFromTemplate(%s/%s) INCORRECT PORT: %d ", namespace, name, servicePort.Port ) + glog.V(1).Infof("createServiceFromTemplate(%s/%s) INCORRECT PORT: %d ", namespace, name, servicePort.Port) return nil } } From 478e91953da75dee6cc607ac9a33e811e01e516a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 18 Jun 2019 16:09:09 +0300 Subject: [PATCH 27/31] env: prepare for stand-alone file --- .../cat-clickhouse-operator-install-yaml.sh | 74 +++++++++++++++++-- manifests/dev/clickhouse-operator-install.sh | 43 ++++++++++- 2 files changed, 108 insertions(+), 9 deletions(-) diff --git a/manifests/dev/cat-clickhouse-operator-install-yaml.sh b/manifests/dev/cat-clickhouse-operator-install-yaml.sh index 224743a9d..9fc33e41f 100755 --- a/manifests/dev/cat-clickhouse-operator-install-yaml.sh +++ b/manifests/dev/cat-clickhouse-operator-install-yaml.sh @@ -4,7 +4,6 @@ # Paths CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -MANIFEST_ROOT="$(realpath "${CUR_DIR}/..")" PROJECT_ROOT="$(realpath "${CUR_DIR}/../..")" ########################################## @@ -52,6 +51,43 @@ MANIFEST_PRINT_RBAC="${MANIFEST_PRINT_RBAC:-yes}" # Render operator's Deployment section. May be not required in case of dev localhost run MANIFEST_PRINT_DEPLOYMENT="${MANIFEST_PRINT_DEPLOYMENT:-yes}" +################################## +## +## File handler +## +################################## + +function ensure_file() { + local FILE="$1" + + if [[ -f "${FILE}" ]]; then + # File found, all is ok + : + else + # File not found, try to download it + if ! curl --version > /dev/null; then + echo "curl is not available, can not continue" + exit 1 + fi + + local BASE="$(basename "${FILE}")" + ALTINITY_REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/manifests/dev/" + ALTINITY_REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator/dev-vladislav/manifests/dev/" + if ! curl --silent "${ALTINITY_REPO_URL}${BASE}" --output "${FILE}"; then + echo "curl call to download ${BASE} failed, can not continue" + exit 1 + fi + fi + + if [[ -f "${FILE}" ]]; then + # File found, all is ok + : + else + # File not found + echo "Unable to download ${FILE}" + exit 1 + fi +} ################################## ## @@ -62,6 +98,7 @@ MANIFEST_PRINT_DEPLOYMENT="${MANIFEST_PRINT_DEPLOYMENT:-yes}" # Render CRD section if [[ "${MANIFEST_PRINT_CRD}" == "yes" ]]; then + ensure_file "${CUR_DIR}/clickhouse-operator-template-01-section-crd.yaml" cat "${CUR_DIR}/clickhouse-operator-template-01-section-crd.yaml" | \ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst fi @@ -69,6 +106,7 @@ fi # Render RBAC section if [[ "${MANIFEST_PRINT_RBAC}" == "yes" ]]; then echo "---" + ensure_file "${CUR_DIR}/clickhouse-operator-template-02-section-rbac-and-service.yaml" cat "${CUR_DIR}/clickhouse-operator-template-02-section-rbac-and-service.yaml" | \ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst fi @@ -86,6 +124,8 @@ function render_configmap_header() { # Template file with ConfigMap header/beginning CM_HEADER_FILE="${CUR_DIR}/clickhouse-operator-template-03-section-configmap-header.yaml" + ensure_file "${CM_HEADER_FILE}" + # Render ConfigMap header template with vars substitution cat "${CM_HEADER_FILE}" | \ CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" CONFIGMAP_NAME="${CM_NAME}" envsubst @@ -104,7 +144,8 @@ function render_configmap_header() { # ui.properties: | # color.good=purple function render_configmap_data_section_file() { - FILE_PATH=$1 + FILE_PATH="$1" + # ConfigMap .data section looks like # config.yaml: | # line 1 @@ -121,20 +162,27 @@ if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then if [[ -z "${CHOPERATOR_CONFIG_FILE}" ]]; then # No config file specified, render simple deployment echo "---" + ensure_file "${CUR_DIR}/clickhouse-operator-template-04-section-deployment.yaml" cat "${CUR_DIR}/clickhouse-operator-template-04-section-deployment.yaml" | \ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst else # Config file specified, render all ConfigMaps and then render deployment - # Render clickhouse-operator config file echo "---" render_configmap_header "etc-clickhouse-operator-files" - render_configmap_data_section_file "${PROJECT_ROOT}/config/config.yaml" + if [[ -f "${PROJECT_ROOT}/config/config.yaml" ]]; then + # Render clickhouse-operator config file + render_configmap_data_section_file "${PROJECT_ROOT}/config/config.yaml" + else + # Fetch from github and apply + # config/config.yaml + : + fi # Render confd.d files echo "---" render_configmap_header "etc-clickhouse-operator-confd-files" - if [[ ! -z "${CHOPERATOR_CONFD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_CONFD_FOLDER}")" ]]; then + if [[ ! -z "${CHOPERATOR_CONFD_FOLDER}" ]] && [[ -d "${CHOPERATOR_CONFD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_CONFD_FOLDER}")" ]]; then for FILE in "${CHOPERATOR_CONFD_FOLDER}"/*; do render_configmap_data_section_file "${FILE}" done @@ -143,16 +191,21 @@ if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then # Render configd.d files echo "---" render_configmap_header "etc-clickhouse-operator-configd-files" - if [[ ! -z "${CHOPERATOR_CONFIGD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_CONFIGD_FOLDER}")" ]]; then + if [[ ! -z "${CHOPERATOR_CONFIGD_FOLDER}" ]] && [[ -d "${CHOPERATOR_CONFIGD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_CONFIGD_FOLDER}")" ]]; then for FILE in "${CHOPERATOR_CONFIGD_FOLDER}"/*; do render_configmap_data_section_file "${FILE}" done + else + # Fetch from github and apply + # config/config.d/01-clickhouse-operator-listen.xml + # config/config.d/01-clickhouse-operator-listen.xml + : fi # Render templates.d files echo "---" render_configmap_header "etc-clickhouse-operator-templatesd-files" - if [[ ! -z "${CHOPERATOR_TEMPLATESD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_TEMPLATESD_FOLDER}")" ]]; then + if [[ ! -z "${CHOPERATOR_TEMPLATESD_FOLDER}" ]] && [[ -d "${CHOPERATOR_TEMPLATESD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_TEMPLATESD_FOLDER}")" ]]; then for FILE in "${CHOPERATOR_TEMPLATESD_FOLDER}"/*; do render_configmap_data_section_file "${FILE}" done @@ -161,14 +214,19 @@ if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then # Render users.d files echo "---" render_configmap_header "etc-clickhouse-operator-usersd-files" - if [[ ! -z "${CHOPERATOR_USERSD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_USERSD_FOLDER}")" ]]; then + if [[ ! -z "${CHOPERATOR_USERSD_FOLDER}" ]] && [[ -d "${CHOPERATOR_USERSD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_USERSD_FOLDER}")" ]]; then for FILE in "${CHOPERATOR_USERSD_FOLDER}"/*; do render_configmap_data_section_file "${FILE}" done + else + # Fetch from github and apply + # config/users.d/01-clickhouse-operator-user.xml + : fi # Render Deployment echo "---" + ensure_file "${CUR_DIR}/clickhouse-operator-template-04-section-deployment-with-configmap.yaml" cat "${CUR_DIR}/clickhouse-operator-template-04-section-deployment-with-configmap.yaml" | \ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst fi diff --git a/manifests/dev/clickhouse-operator-install.sh b/manifests/dev/clickhouse-operator-install.sh index e93726b55..fd01686ed 100755 --- a/manifests/dev/clickhouse-operator-install.sh +++ b/manifests/dev/clickhouse-operator-install.sh @@ -4,7 +4,48 @@ CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE:-dev}" CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE:-altinity/clickhouse-operator:dev}" CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -MANIFEST_ROOT="$(realpath "${CUR_DIR}/..")" + +function ensure_kubectl() { + if ! kubectl version; then + echo "kubectl failed, can not continue" + exit 1 + fi +} + +function ensure_file() { + local FILE="$1" + + if [[ -f "${FILE}" ]]; then + # File found, all is ok + : + else + # File not found, try to download it + if ! curl --version > /dev/null; then + echo "curl is not available, can not continue" + exit 1 + fi + + local BASE="$(basename "${FILE}")" + ALTINITY_REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/manifests/dev/" + ALTINITY_REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator/dev-vladislav/manifests/dev/" + if ! curl --silent "${ALTINITY_REPO_URL}${BASE}" --output "${FILE}"; then + echo "curl call to download ${BASE} failed, can not continue" + exit 1 + fi + fi + + if [[ -f "${FILE}" ]]; then + # File found, all is ok + : + else + # File not found + echo "Unable to download ${FILE}" + exit 1 + fi +} + +ensure_kubectl +ensure_file "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh" echo "Setup ClickHouse Operator into ${CHOPERATOR_NAMESPACE} namespace" From e04f1a324dbc851be1aa00666246630594ddb8d9 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 18 Jun 2019 23:28:08 +0300 Subject: [PATCH 28/31] env: self-sufficient install scripts --- .../cat-clickhouse-operator-install-yaml.sh | 91 +++++++++++++------ manifests/dev/clickhouse-operator-install.sh | 69 ++++++++++---- 2 files changed, 110 insertions(+), 50 deletions(-) diff --git a/manifests/dev/cat-clickhouse-operator-install-yaml.sh b/manifests/dev/cat-clickhouse-operator-install-yaml.sh index 9fc33e41f..a95436ebb 100755 --- a/manifests/dev/cat-clickhouse-operator-install-yaml.sh +++ b/manifests/dev/cat-clickhouse-operator-install-yaml.sh @@ -58,37 +58,65 @@ MANIFEST_PRINT_DEPLOYMENT="${MANIFEST_PRINT_DEPLOYMENT:-yes}" ################################## function ensure_file() { - local FILE="$1" + # Params + local LOCAL_DIR="$1" + local FILE="$2" + local REPO_DIR="$3" - if [[ -f "${FILE}" ]]; then + local LOCAL_FILE="${LOCAL_DIR}/${FILE}" + + if [[ -f "${LOCAL_FILE}" ]]; then # File found, all is ok : else - # File not found, try to download it - if ! curl --version > /dev/null; then - echo "curl is not available, can not continue" - exit 1 - fi - - local BASE="$(basename "${FILE}")" - ALTINITY_REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/manifests/dev/" - ALTINITY_REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator/dev-vladislav/manifests/dev/" - if ! curl --silent "${ALTINITY_REPO_URL}${BASE}" --output "${FILE}"; then - echo "curl call to download ${BASE} failed, can not continue" - exit 1 - fi + download_file "${LOCAL_DIR}" "${FILE}" "${REPO_DIR}" fi - if [[ -f "${FILE}" ]]; then + if [[ -f "${LOCAL_FILE}" ]]; then # File found, all is ok : else # File not found - echo "Unable to download ${FILE}" + echo "Unable to get ${FILE}" exit 1 fi } +function download_file() { + # Params + local LOCAL_DIR="$1" + local FILE="$2" + local REPO_DIR="$3" + + local LOCAL_FILE="${LOCAL_DIR}/${FILE}" + + REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator" + BRANCH="dev-vladislav" + #BRANCH="master" + FILE_URL="${REPO_URL}/${BRANCH}/${REPO_DIR}/${FILE}" + + # Check curl is in place + if ! curl --version > /dev/null; then + echo "curl is not available, can not continue" + exit 1 + fi + + # Download file + if ! curl --silent "${FILE_URL}" --output "${LOCAL_FILE}"; then + echo "curl call to download ${FILE_URL} failed, can not continue" + exit 1 + fi + + # Check file is in place + if [[ -f "${LOCAL_FILE}" ]]; then + # File found, all is ok + : + else + # File not found + echo "Unable to download ${FILE_URL}" + exit 1 + fi +} ################################## ## ## Render .yaml manifest @@ -98,7 +126,7 @@ function ensure_file() { # Render CRD section if [[ "${MANIFEST_PRINT_CRD}" == "yes" ]]; then - ensure_file "${CUR_DIR}/clickhouse-operator-template-01-section-crd.yaml" + ensure_file "${CUR_DIR}" "clickhouse-operator-template-01-section-crd.yaml" "manifests/dev" cat "${CUR_DIR}/clickhouse-operator-template-01-section-crd.yaml" | \ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst fi @@ -106,7 +134,7 @@ fi # Render RBAC section if [[ "${MANIFEST_PRINT_RBAC}" == "yes" ]]; then echo "---" - ensure_file "${CUR_DIR}/clickhouse-operator-template-02-section-rbac-and-service.yaml" + ensure_file "${CUR_DIR}" "clickhouse-operator-template-02-section-rbac-and-service.yaml" "manifests/dev" cat "${CUR_DIR}/clickhouse-operator-template-02-section-rbac-and-service.yaml" | \ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst fi @@ -122,13 +150,11 @@ function render_configmap_header() { # ConfigMap name CM_NAME="$1" # Template file with ConfigMap header/beginning - CM_HEADER_FILE="${CUR_DIR}/clickhouse-operator-template-03-section-configmap-header.yaml" - - ensure_file "${CM_HEADER_FILE}" + ensure_file "${CUR_DIR}" "clickhouse-operator-template-03-section-configmap-header.yaml" "manifests/dev" # Render ConfigMap header template with vars substitution - cat "${CM_HEADER_FILE}" | \ - CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" CONFIGMAP_NAME="${CM_NAME}" envsubst + cat "${CUR_DIR}/clickhouse-operator-template-03-section-configmap-header.yaml" | \ + CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" CONFIGMAP_NAME="${CM_NAME}" envsubst } # Render one file section in ConfigMap yaml specification: @@ -162,7 +188,7 @@ if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then if [[ -z "${CHOPERATOR_CONFIG_FILE}" ]]; then # No config file specified, render simple deployment echo "---" - ensure_file "${CUR_DIR}/clickhouse-operator-template-04-section-deployment.yaml" + ensure_file "${CUR_DIR}" "clickhouse-operator-template-04-section-deployment.yaml" "manifests/dev" cat "${CUR_DIR}/clickhouse-operator-template-04-section-deployment.yaml" | \ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst else @@ -176,7 +202,8 @@ if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then else # Fetch from github and apply # config/config.yaml - : + download_file "${CUR_DIR}" "config.yaml" "config" + render_configmap_data_section_file "${CUR_DIR}/config.yaml" fi # Render confd.d files @@ -198,8 +225,11 @@ if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then else # Fetch from github and apply # config/config.d/01-clickhouse-operator-listen.xml - # config/config.d/01-clickhouse-operator-listen.xml - : + # config/config.d/02-clickhouse-operator-logger.xml + download_file "${CUR_DIR}" "01-clickhouse-operator-listen.xml" "config/config.d" + download_file "${CUR_DIR}" "02-clickhouse-operator-logger.xml" "config/config.d" + render_configmap_data_section_file "${CUR_DIR}/01-clickhouse-operator-listen.xml" + render_configmap_data_section_file "${CUR_DIR}/02-clickhouse-operator-logger.xml" fi # Render templates.d files @@ -221,12 +251,13 @@ if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then else # Fetch from github and apply # config/users.d/01-clickhouse-operator-user.xml - : + download_file "${CUR_DIR}" "01-clickhouse-operator-user.xml" "config/users.d" + render_configmap_data_section_file "${CUR_DIR}/01-clickhouse-operator-user.xml" fi # Render Deployment echo "---" - ensure_file "${CUR_DIR}/clickhouse-operator-template-04-section-deployment-with-configmap.yaml" + ensure_file "${CUR_DIR}" "clickhouse-operator-template-04-section-deployment-with-configmap.yaml" "manifests/dev" cat "${CUR_DIR}/clickhouse-operator-template-04-section-deployment-with-configmap.yaml" | \ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst fi diff --git a/manifests/dev/clickhouse-operator-install.sh b/manifests/dev/clickhouse-operator-install.sh index fd01686ed..8cacf0195 100755 --- a/manifests/dev/clickhouse-operator-install.sh +++ b/manifests/dev/clickhouse-operator-install.sh @@ -6,46 +6,75 @@ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE:-altinity/clickhouse-operator:dev}" CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" function ensure_kubectl() { - if ! kubectl version; then + if ! kubectl version > /dev/null; then echo "kubectl failed, can not continue" exit 1 fi } function ensure_file() { - local FILE="$1" + # Params + local LOCAL_DIR="$1" + local FILE="$2" + local REPO_DIR="$3" - if [[ -f "${FILE}" ]]; then + local LOCAL_FILE="${LOCAL_DIR}/${FILE}" + + if [[ -f "${LOCAL_FILE}" ]]; then # File found, all is ok : else - # File not found, try to download it - if ! curl --version > /dev/null; then - echo "curl is not available, can not continue" - exit 1 - fi - - local BASE="$(basename "${FILE}")" - ALTINITY_REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/manifests/dev/" - ALTINITY_REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator/dev-vladislav/manifests/dev/" - if ! curl --silent "${ALTINITY_REPO_URL}${BASE}" --output "${FILE}"; then - echo "curl call to download ${BASE} failed, can not continue" - exit 1 - fi + download_file "${LOCAL_DIR}" "${FILE}" "${REPO_DIR}" + fi + + if [[ -f "${LOCAL_FILE}" ]]; then + # File found, all is ok + : + else + # File not found + echo "Unable to get ${FILE}" + exit 1 + fi +} + +function download_file() { + # Params + local LOCAL_DIR="$1" + local FILE="$2" + local REPO_DIR="$3" + + local LOCAL_FILE="${LOCAL_DIR}/${FILE}" + + REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator" + BRANCH="dev-vladislav" + #BRANCH="master" + FILE_URL="${REPO_URL}/${BRANCH}/${REPO_DIR}/${FILE}" + + # Check curl is in place + if ! curl --version > /dev/null; then + echo "curl is not available, can not continue" + exit 1 + fi + + # Download file + if ! curl --silent "${FILE_URL}" --output "${LOCAL_FILE}"; then + echo "curl call to download ${FILE_URL} failed, can not continue" + exit 1 fi - if [[ -f "${FILE}" ]]; then + # Check file is in place + if [[ -f "${LOCAL_FILE}" ]]; then # File found, all is ok : else # File not found - echo "Unable to download ${FILE}" + echo "Unable to download ${FILE_URL}" exit 1 fi } ensure_kubectl -ensure_file "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh" +ensure_file "${CUR_DIR}" "cat-clickhouse-operator-install-yaml.sh" "manifests/dev" echo "Setup ClickHouse Operator into ${CHOPERATOR_NAMESPACE} namespace" @@ -53,4 +82,4 @@ echo "Setup ClickHouse Operator into ${CHOPERATOR_NAMESPACE} namespace" kubectl create namespace "${CHOPERATOR_NAMESPACE}" # Setup into dedicated namespace -kubectl apply --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh") +kubectl apply --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" /bin/bash "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh") From 3517db9d76c783110dea6255464226ffe6e36996 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 18 Jun 2019 23:37:32 +0300 Subject: [PATCH 29/31] env: self-suffisient install scripts - switch to master branch --- manifests/dev/cat-clickhouse-operator-install-yaml.sh | 4 ++-- manifests/dev/clickhouse-operator-install.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/manifests/dev/cat-clickhouse-operator-install-yaml.sh b/manifests/dev/cat-clickhouse-operator-install-yaml.sh index a95436ebb..45731281b 100755 --- a/manifests/dev/cat-clickhouse-operator-install-yaml.sh +++ b/manifests/dev/cat-clickhouse-operator-install-yaml.sh @@ -91,8 +91,8 @@ function download_file() { local LOCAL_FILE="${LOCAL_DIR}/${FILE}" REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator" - BRANCH="dev-vladislav" - #BRANCH="master" + #BRANCH="dev-vladislav" + BRANCH="master" FILE_URL="${REPO_URL}/${BRANCH}/${REPO_DIR}/${FILE}" # Check curl is in place diff --git a/manifests/dev/clickhouse-operator-install.sh b/manifests/dev/clickhouse-operator-install.sh index 8cacf0195..9d0c218f3 100755 --- a/manifests/dev/clickhouse-operator-install.sh +++ b/manifests/dev/clickhouse-operator-install.sh @@ -46,8 +46,8 @@ function download_file() { local LOCAL_FILE="${LOCAL_DIR}/${FILE}" REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator" - BRANCH="dev-vladislav" - #BRANCH="master" + #BRANCH="dev-vladislav" + BRANCH="master" FILE_URL="${REPO_URL}/${BRANCH}/${REPO_DIR}/${FILE}" # Check curl is in place From c0ac5ed66567524f5ad277e6781b560304147eee Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 19 Jun 2019 00:13:12 +0300 Subject: [PATCH 30/31] docs: make quick start use install script --- docs/quick-start.md | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/docs/quick-start.md b/docs/quick-start.md index 7231fafb5..3adc94542 100644 --- a/docs/quick-start.md +++ b/docs/quick-start.md @@ -13,13 +13,23 @@ # Prerequisites 1. Operational Kubernetes instance 1. Properly configured `kubectl` +1. `curl` # ClickHouse Operator Installation -Apply `clickhouse-operator` installation manifest. The simplest way - directly from github +Apply `clickhouse-operator` installation manifest. The simplest way - directly from `github`. +Please, `cd` into writable folder, because install script would download config files to build `.yaml` manifests from. ```bash -kubectl apply -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/manifests/operator/clickhouse-operator-install.yaml +cd ~ +curl -s https://raw.githubusercontent.com/Altinity/clickhouse-operator/dev-vladislav/manifests/dev/clickhouse-operator-install.sh | CHOPERATOR_NAMESPACE=test-clickhouse-operator bash ``` +Take into account explicitly specified namespace +```bash +CHOPERATOR_NAMESPACE=test-clickhouse-operator +``` +This namespace would be created and used to install `clickhouse-operator` into. +Install script would download some `.yaml` and `.xml` files and install `clickhouse-operator` into specified namespace. + Operator installation process ```text customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.altinity.com created @@ -31,7 +41,7 @@ service/clickhouse-operator-metrics created Check `clickhouse-operator` is running: ```bash -kubectl get pods -n kube-system +kubectl get pods -n test-clickhouse-operator ``` ```text NAME READY STATUS RESTARTS AGE @@ -62,7 +72,7 @@ This is the trivial [1 shard 1 replica](./examples/01-standard-layout-01-1shard- **WARNING**: Do not use it for anything other than 'Hello, world!', it does not have persistent storage! ```bash -kubectl apply -n test -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/docs/examples/01-standard-layout-01-1shard-1repl.yaml +kubectl apply -n test-clickhouse-operator -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/docs/examples/01-standard-layout-01-1shard-1repl.yaml ``` ```text clickhouseinstallation.clickhouse.altinity.com/example-01 created @@ -86,7 +96,7 @@ spec: Once cluster is created, there are two checks to be made. ```bash -kubectl get pods -n test +kubectl get pods -n test-clickhouse-operator ``` ```text NAME READY STATUS RESTARTS AGE @@ -96,7 +106,7 @@ chi-b3d29f-a242-0-0-0 1/1 Running 0 10m Watch out for 'Running' status. Also check services created by an operator: ```bash -kubectl get service -n test +kubectl get service -n test-clickhouse-operator ``` ```text NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE @@ -121,7 +131,7 @@ Connected to ClickHouse server version 19.4.3 revision 54416. ``` 1. In case there is not **EXTERNAL-IP** available, we can access ClickHouse from inside Kubernetes cluster ```bash -kubectl -n test exec -it chi-b3d29f-a242-0-0-0 -- clickhouse-client +kubectl -n test-clickhouse-operator exec -it chi-b3d29f-a242-0-0-0 -- clickhouse-client ``` ```text ClickHouse client version 19.4.3.11. From 4125329515f645d9ee3d3dab8a3ba2656f1b5b06 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 19 Jun 2019 00:20:33 +0300 Subject: [PATCH 31/31] docs: minor --- docs/quick-start.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/quick-start.md b/docs/quick-start.md index 3adc94542..2e7cdddb4 100644 --- a/docs/quick-start.md +++ b/docs/quick-start.md @@ -21,7 +21,7 @@ Apply `clickhouse-operator` installation manifest. The simplest way - directly f Please, `cd` into writable folder, because install script would download config files to build `.yaml` manifests from. ```bash cd ~ -curl -s https://raw.githubusercontent.com/Altinity/clickhouse-operator/dev-vladislav/manifests/dev/clickhouse-operator-install.sh | CHOPERATOR_NAMESPACE=test-clickhouse-operator bash +curl -s https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/manifests/dev/clickhouse-operator-install.sh | CHOPERATOR_NAMESPACE=test-clickhouse-operator bash ``` Take into account explicitly specified namespace ```bash