From 67322d4134cead8bfa99f07fc0600f534f27e3ae Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 1 May 2019 00:43:36 +0300 Subject: [PATCH 01/32] docs: unify verb in titles --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c0a9c3900..cd4f0d06f 100644 --- a/README.md +++ b/README.md @@ -38,8 +38,8 @@ The ClickHouse Operator for Kubernetes currently provides the following: * [ClickHouse Installation Custom Resource specification][crd_explained] **Maintanance tasks** - * [Adding replication to an existing ClickHouse cluster][update_cluster_add_replication] - * Adding shards and replicas + * [Add replication to an existing ClickHouse cluster][update_cluster_add_replication] + * Add shards and replicas * [Automatic schema creation][schema_migration] * [Update ClickHouse version][update_clickhouse_version] * [Update Operator version][update_operator] From 1909abdb57b909961b44b2ae5e012d00ab23d1c8 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 1 May 2019 00:48:59 +0300 Subject: [PATCH 02/32] docs: docs list typos fix --- docs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md b/docs/README.md index 6d199e5b6..f69503f65 100644 --- a/docs/README.md +++ b/docs/README.md @@ -14,6 +14,6 @@ 1. [prometheus_setup.md](./prometheus_setup.md) - how to setup Prometheus 1. [quick-start.md](./quick-start.md) - quick start 1. [replication_setup.md](./replication_setup.md) - how to setup replication -1. [schema_migration.md](./schema_migration.md) - how operator migrates shena during cluster resize +1. [schema_migration.md](./schema_migration.md) - how operator migrates schema during cluster resize 1. [storage.md](./storage.md) - storage explained 1. [zookeeper_setup.md](./zookeeper_setup.md) - how to setup zookeeper From 1f5a83ce8086fe2eb5d1151c7033a9a5d955bf3f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 5 May 2019 14:24:11 +0300 Subject: [PATCH 03/32] env: automate unformatted sources search --- dev/find_unformatted_sources.sh | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100755 dev/find_unformatted_sources.sh diff --git a/dev/find_unformatted_sources.sh b/dev/find_unformatted_sources.sh new file mode 100755 index 000000000..0ce55fb41 --- /dev/null +++ b/dev/find_unformatted_sources.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Find unformatted .go sources + +# Exit immediately when a command fails +set -e + +# Only exit with zero if all commands of the pipeline exit successfully +set -o pipefail + +# Error on unset variables +set -u + +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +source ${CUR_DIR}/binary_build_config.sh + +# Prepare list of all .go files in the project, but exclude all files from /vendor/ folder +GO_FILES_LIST=$(find ${SRC_ROOT} -name \*.go -not -path "${SRC_ROOT}/vendor/*" -print) +# Prepare unformatted files list +UNFORMATTED_FILES_LIST=$(gofmt -l ${GO_FILES_LIST}) + +if [[ ${UNFORMATTED_FILES_LIST} ]]; then + echo "These files need to pass through 'go fmt'" + for FILE in ${UNFORMATTED_FILES_LIST}; do + echo ${FILE} + done + exit 1 +fi + +exit 0 From 9636a2599331cb82114197c6939955e37f895608 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 5 May 2019 14:30:56 +0300 Subject: [PATCH 04/32] env: make build scrits folder-independent --- dev/binary_build.sh | 4 +++- dev/binary_clean.sh | 4 +++- dev/find_unformatted_sources.sh | 10 ++++------ dev/image_build_altinity.sh | 6 +++++- dev/image_build_dev.sh | 6 +++++- dev/run_dev.sh | 8 +++++--- dev/update-codegen.sh | 3 +++ 7 files changed, 28 insertions(+), 13 deletions(-) diff --git a/dev/binary_build.sh b/dev/binary_build.sh index e6bd8f64b..0c022ba27 100755 --- a/dev/binary_build.sh +++ b/dev/binary_build.sh @@ -3,7 +3,9 @@ # Build clickhouse-operator # Do not forget to update version -source ./binary_build_config.sh +# Source configuration +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +source ${CUR_DIR}/binary_build_config.sh #CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ${CUR_DIR}/clickhouse-operator ${SRC_ROOT}/cmd/clickhouse-operator CGO_ENABLED=0 go build -o ${OPERATOR_BIN} ${SRC_ROOT}/cmd/clickhouse-operator diff --git a/dev/binary_clean.sh b/dev/binary_clean.sh index c46e8ced6..308210aec 100755 --- a/dev/binary_clean.sh +++ b/dev/binary_clean.sh @@ -3,6 +3,8 @@ # Delete clickhouse-operator # Do not forget to update version -source ./binary_build_config.sh +# Source configuration +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +source ${CUR_DIR}/binary_build_config.sh rm -f ${OPERATOR_BIN} diff --git a/dev/find_unformatted_sources.sh b/dev/find_unformatted_sources.sh index 0ce55fb41..51aec9be8 100755 --- a/dev/find_unformatted_sources.sh +++ b/dev/find_unformatted_sources.sh @@ -3,16 +3,14 @@ # Find unformatted .go sources # Exit immediately when a command fails -set -e - +set -o errexit +# Error on unset variables +set -o nounset # Only exit with zero if all commands of the pipeline exit successfully set -o pipefail -# Error on unset variables -set -u - +# Source configuration CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - source ${CUR_DIR}/binary_build_config.sh # Prepare list of all .go files in the project, but exclude all files from /vendor/ folder diff --git a/dev/image_build_altinity.sh b/dev/image_build_altinity.sh index d9eaa682e..7cc84e30e 100755 --- a/dev/image_build_altinity.sh +++ b/dev/image_build_altinity.sh @@ -2,6 +2,10 @@ # Production docker image builder +# Source configuration +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +source ${CUR_DIR}/binary_build_config.sh + # Externally configurable build-dependent options TAG="${TAG:-altinity/clickhouse-operator:dev}" DOCKERHUB_LOGIN="${DOCKERHUB_LOGIN:-altinitybuilds}" @@ -12,4 +16,4 @@ TAG="${TAG}" \ DOCKERHUB_LOGIN="${DOCKERHUB_LOGIN}" \ DOCKERHUB_PUBLISH="${DOCKERHUB_PUBLISH}" \ MINIKUBE="${MINIKUBE}" \ -./image_build_universal.sh +${CUR_DIR}/image_build_universal.sh diff --git a/dev/image_build_dev.sh b/dev/image_build_dev.sh index 82e872578..41fa54f24 100755 --- a/dev/image_build_dev.sh +++ b/dev/image_build_dev.sh @@ -2,6 +2,10 @@ # Dev docker image builder +# Source configuration +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +source ${CUR_DIR}/binary_build_config.sh + # Externally configurable build-dependent options TAG="${TAG:-sunsingerus/clickhouse-operator:dev}" DOCKERHUB_LOGIN="${DOCKERHUB_LOGIN:-sunsingerus}" @@ -12,4 +16,4 @@ TAG="${TAG}" \ DOCKERHUB_LOGIN="${DOCKERHUB_LOGIN}" \ DOCKERHUB_PUBLISH="${DOCKERHUB_PUBLISH}" \ MINIKUBE="${MINIKUBE}" \ -./image_build_universal.sh +${CUR_DIR}/image_build_universal.sh diff --git a/dev/run_dev.sh b/dev/run_dev.sh index d54ad5b84..35f3c226a 100755 --- a/dev/run_dev.sh +++ b/dev/run_dev.sh @@ -3,11 +3,13 @@ # Run clickhouse-operator # Do not forget to update version -source ./binary_build_config.sh +# Source configuration +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +source ${CUR_DIR}/binary_build_config.sh LOG_DIR=${CUR_DIR}/log echo -n "Building binary, please wait..." -if ./binary_build.sh; then +if ${CUR_DIR}/binary_build.sh; then echo "successfully built clickhouse-operator. Starting" mkdir -p ${LOG_DIR} @@ -25,7 +27,7 @@ if ./binary_build.sh; then # -stderrthreshold=FATAL Log events at or above this severity are logged to standard error as well as to files # And clean binary after run. It'll be rebuilt next time - ./binary_clean.sh + ${CUR_DIR}/binary_clean.sh echo "======================" echo "=== Logs available ===" diff --git a/dev/update-codegen.sh b/dev/update-codegen.sh index 1d19669e2..f8ff94d5c 100755 --- a/dev/update-codegen.sh +++ b/dev/update-codegen.sh @@ -1,7 +1,10 @@ #!/bin/bash +# Exit immediately when a command fails set -o errexit +# Error on unset variables set -o nounset +# Only exit with zero if all commands of the pipeline exit successfully set -o pipefail SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. From 23201e1e5465659bbc681be6dd5d949c180c6f46 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 5 May 2019 14:36:24 +0300 Subject: [PATCH 05/32] docs: minor --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 65e145e6e..a7b3bbe33 100644 --- a/README.md +++ b/README.md @@ -38,8 +38,8 @@ The ClickHouse Operator for Kubernetes currently provides the following: * [ClickHouse Installation Custom Resource specification][crd_explained] **Maintenance tasks** - * [Adding replication to an existing ClickHouse cluster][update_cluster_add_replication] - * Adding shards and replicas + * [Add replication to an existing ClickHouse cluster][update_cluster_add_replication] + * Add shards and replicas * [Automatic schema creation][schema_migration] * [Update ClickHouse version][update_clickhouse_version] * [Update Operator version][update_operator] From 77d98698eb41af1a018c58527a5a6b449e6cd8fe Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 5 May 2019 14:39:45 +0300 Subject: [PATCH 06/32] dev: formatter --- dev/RnD/chopsim/parser/parser.go | 2 +- dev/RnD/diff/main.go | 70 ++++++++++++++-------------- dev/RnD/messagediff/chi_data.go | 60 ++++++++++++------------ dev/RnD/messagediff/chi_processor.go | 2 +- dev/RnD/messagediff/simple.go | 36 +++++++------- pkg/models/listers.go | 1 - pkg/models/util.go | 2 +- 7 files changed, 85 insertions(+), 88 deletions(-) diff --git a/dev/RnD/chopsim/parser/parser.go b/dev/RnD/chopsim/parser/parser.go index b8e6d63e4..b2542ce96 100644 --- a/dev/RnD/chopsim/parser/parser.go +++ b/dev/RnD/chopsim/parser/parser.go @@ -348,7 +348,7 @@ func (chi *ClickHouseInstallation) createConfigMapObjects(data map[string]string return cmList } -// Returns list of services: +// Returns list of services: // one service per pod with internal name, and one service for installation itself that should finally bind to: // clickhouse-..svc.cluster.local func (chi *ClickHouseInstallation) createServiceObjects(o *genOptions) serviceList { diff --git a/dev/RnD/diff/main.go b/dev/RnD/diff/main.go index ef837ac10..987c4201d 100644 --- a/dev/RnD/diff/main.go +++ b/dev/RnD/diff/main.go @@ -17,8 +17,8 @@ package main import ( "fmt" . "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" - corev1 "k8s.io/api/core/v1" "github.com/r3labs/diff" + corev1 "k8s.io/api/core/v1" ) type Order struct { @@ -47,8 +47,6 @@ func ex4() ([]int, []int) { return a, b } - - func ex3() ([]int, []int) { a := []int{1, 2, 3, 4} b := []int{1, 2, 3} @@ -66,12 +64,12 @@ func ex2() (ChiSpec, ChiSpec) { Profile: "", }, Deployment: ChiDeployment{ - PodTemplate: "", + PodTemplate: "", VolumeClaimTemplate: "", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, }, @@ -86,9 +84,9 @@ func ex2() (ChiSpec, ChiSpec) { }, }, }, - Users: nil, + Users: nil, Profiles: nil, - Quotas: nil, + Quotas: nil, Settings: nil, Clusters: []ChiCluster{ @@ -97,12 +95,12 @@ func ex2() (ChiSpec, ChiSpec) { // useless Deployment: ChiDeployment{ - PodTemplate: "", + PodTemplate: "", VolumeClaimTemplate: "", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, @@ -121,17 +119,17 @@ func ex2() (ChiSpec, ChiSpec) { // useless ReplicasCount: 1, - Weight: 1, + Weight: 1, InternalReplication: "yes", // useless Deployment: ChiDeployment{ - PodTemplate: "", + PodTemplate: "", VolumeClaimTemplate: "", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, @@ -141,12 +139,12 @@ func ex2() (ChiSpec, ChiSpec) { Port: 9000, Deployment: ChiDeployment{ - PodTemplate: "podTemplate1", + PodTemplate: "podTemplate1", VolumeClaimTemplate: "volumeClaimTemplate1", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, }, @@ -159,16 +157,16 @@ func ex2() (ChiSpec, ChiSpec) { }, Templates: ChiTemplates{ - PodTemplates: []ChiPodTemplate{ + PodTemplates: []ChiPodTemplate{ { - Name: "podTemplate1", + Name: "podTemplate1", Containers: []corev1.Container{}, Volumes: []corev1.Volume{}, }, }, VolumeClaimTemplates: []ChiVolumeClaimTemplate{ { - Name:"volumeClaimTemplate1", + Name: "volumeClaimTemplate1", PersistentVolumeClaim: corev1.PersistentVolumeClaim{}, }, }, @@ -184,12 +182,12 @@ func ex2() (ChiSpec, ChiSpec) { Profile: "", }, Deployment: ChiDeployment{ - PodTemplate: "", + PodTemplate: "", VolumeClaimTemplate: "", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, }, @@ -204,9 +202,9 @@ func ex2() (ChiSpec, ChiSpec) { }, }, }, - Users: nil, + Users: nil, Profiles: nil, - Quotas: nil, + Quotas: nil, Settings: nil, Clusters: []ChiCluster{ @@ -215,12 +213,12 @@ func ex2() (ChiSpec, ChiSpec) { // useless Deployment: ChiDeployment{ - PodTemplate: "", + PodTemplate: "", VolumeClaimTemplate: "", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, @@ -239,17 +237,17 @@ func ex2() (ChiSpec, ChiSpec) { // useless ReplicasCount: 1, - Weight: 1, + Weight: 1, InternalReplication: "yes", // useless Deployment: ChiDeployment{ - PodTemplate: "", + PodTemplate: "", VolumeClaimTemplate: "", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, @@ -259,12 +257,12 @@ func ex2() (ChiSpec, ChiSpec) { Port: 9000, Deployment: ChiDeployment{ - PodTemplate: "podTemplate1", + PodTemplate: "podTemplate1", VolumeClaimTemplate: "volumeClaimTemplate1", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, }, @@ -273,12 +271,12 @@ func ex2() (ChiSpec, ChiSpec) { Port: 9000, Deployment: ChiDeployment{ - PodTemplate: "podTemplate1", + PodTemplate: "podTemplate1", VolumeClaimTemplate: "volumeClaimTemplate1", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, }, @@ -291,16 +289,16 @@ func ex2() (ChiSpec, ChiSpec) { }, Templates: ChiTemplates{ - PodTemplates: []ChiPodTemplate{ + PodTemplates: []ChiPodTemplate{ { - Name: "podTemplate1", + Name: "podTemplate1", Containers: []corev1.Container{}, Volumes: []corev1.Volume{}, }, }, VolumeClaimTemplates: []ChiVolumeClaimTemplate{ { - Name:"volumeClaimTemplate1", + Name: "volumeClaimTemplate1", PersistentVolumeClaim: corev1.PersistentVolumeClaim{}, }, }, @@ -312,14 +310,14 @@ func ex2() (ChiSpec, ChiSpec) { func ex1() (Order, Order) { a := Order{ - ID: "1234", + ID: "1234", Items: []int{1, 2, 3, 4}, } b := Order{ - ID: "1234", + ID: "1234", Items: []int{1, 2, 4}, } return a, b -} \ No newline at end of file +} diff --git a/dev/RnD/messagediff/chi_data.go b/dev/RnD/messagediff/chi_data.go index 77d8de3aa..09a57ac77 100644 --- a/dev/RnD/messagediff/chi_data.go +++ b/dev/RnD/messagediff/chi_data.go @@ -15,12 +15,12 @@ func exCHI1() (ChiSpec, ChiSpec) { Profile: "", }, Deployment: ChiDeployment{ - PodTemplate: "", + PodTemplate: "", VolumeClaimTemplate: "", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, }, @@ -35,9 +35,9 @@ func exCHI1() (ChiSpec, ChiSpec) { }, }, }, - Users: nil, + Users: nil, Profiles: nil, - Quotas: nil, + Quotas: nil, Settings: nil, Clusters: []ChiCluster{ @@ -46,12 +46,12 @@ func exCHI1() (ChiSpec, ChiSpec) { // useless Deployment: ChiDeployment{ - PodTemplate: "", + PodTemplate: "", VolumeClaimTemplate: "", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, @@ -70,17 +70,17 @@ func exCHI1() (ChiSpec, ChiSpec) { // useless ReplicasCount: 1, - Weight: 1, + Weight: 1, InternalReplication: "yes", // useless Deployment: ChiDeployment{ - PodTemplate: "", + PodTemplate: "", VolumeClaimTemplate: "", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, @@ -90,12 +90,12 @@ func exCHI1() (ChiSpec, ChiSpec) { Port: 9000, Deployment: ChiDeployment{ - PodTemplate: "podTemplate1", + PodTemplate: "podTemplate1", VolumeClaimTemplate: "volumeClaimTemplate1", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, }, @@ -108,16 +108,16 @@ func exCHI1() (ChiSpec, ChiSpec) { }, Templates: ChiTemplates{ - PodTemplates: []ChiPodTemplate{ + PodTemplates: []ChiPodTemplate{ { - Name: "podTemplate1", + Name: "podTemplate1", Containers: []corev1.Container{}, Volumes: []corev1.Volume{}, }, }, VolumeClaimTemplates: []ChiVolumeClaimTemplate{ { - Name:"volumeClaimTemplate1", + Name: "volumeClaimTemplate1", PersistentVolumeClaim: corev1.PersistentVolumeClaim{}, }, }, @@ -133,12 +133,12 @@ func exCHI1() (ChiSpec, ChiSpec) { Profile: "", }, Deployment: ChiDeployment{ - PodTemplate: "", + PodTemplate: "", VolumeClaimTemplate: "", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, }, @@ -153,9 +153,9 @@ func exCHI1() (ChiSpec, ChiSpec) { }, }, }, - Users: nil, + Users: nil, Profiles: nil, - Quotas: nil, + Quotas: nil, Settings: nil, Clusters: []ChiCluster{ @@ -164,12 +164,12 @@ func exCHI1() (ChiSpec, ChiSpec) { // useless Deployment: ChiDeployment{ - PodTemplate: "", + PodTemplate: "", VolumeClaimTemplate: "", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, @@ -188,17 +188,17 @@ func exCHI1() (ChiSpec, ChiSpec) { // useless ReplicasCount: 1, - Weight: 1, + Weight: 1, InternalReplication: "yes", // useless Deployment: ChiDeployment{ - PodTemplate: "", + PodTemplate: "", VolumeClaimTemplate: "", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, @@ -208,12 +208,12 @@ func exCHI1() (ChiSpec, ChiSpec) { Port: 9000, Deployment: ChiDeployment{ - PodTemplate: "podTemplate1", + PodTemplate: "podTemplate1", VolumeClaimTemplate: "volumeClaimTemplate1", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, }, @@ -222,12 +222,12 @@ func exCHI1() (ChiSpec, ChiSpec) { Port: 9000, Deployment: ChiDeployment{ - PodTemplate: "podTemplate1", + PodTemplate: "podTemplate1", VolumeClaimTemplate: "volumeClaimTemplate1", Zone: ChiDeploymentZone{ MatchLabels: nil, // map[string]string, }, - Scenario: "", + Scenario: "", Fingerprint: "", }, }, @@ -240,16 +240,16 @@ func exCHI1() (ChiSpec, ChiSpec) { }, Templates: ChiTemplates{ - PodTemplates: []ChiPodTemplate{ + PodTemplates: []ChiPodTemplate{ { - Name: "podTemplate1", + Name: "podTemplate1", Containers: []corev1.Container{}, Volumes: []corev1.Volume{}, }, }, VolumeClaimTemplates: []ChiVolumeClaimTemplate{ { - Name:"volumeClaimTemplate1", + Name: "volumeClaimTemplate1", PersistentVolumeClaim: corev1.PersistentVolumeClaim{}, }, }, diff --git a/dev/RnD/messagediff/chi_processor.go b/dev/RnD/messagediff/chi_processor.go index 63d00cd13..6798ad507 100644 --- a/dev/RnD/messagediff/chi_processor.go +++ b/dev/RnD/messagediff/chi_processor.go @@ -11,7 +11,7 @@ func processor(diff *messagediff.Diff) { shardIndex := -1 replicaIndex := -1 structField := "" - for pPath:= range diff.Added { + for pPath := range diff.Added { for i := range *pPath { pathNode := (*pPath)[i] switch pathNode.(type) { diff --git a/dev/RnD/messagediff/simple.go b/dev/RnD/messagediff/simple.go index 701af9790..35ae806fe 100644 --- a/dev/RnD/messagediff/simple.go +++ b/dev/RnD/messagediff/simple.go @@ -103,9 +103,9 @@ func ex7() (struct5, struct5) { { s3: struct3{ s2: struct2{ - A:1, - b:2, - C:[]int{1}, + A: 1, + b: 2, + C: []int{1}, }, }, }, @@ -126,9 +126,9 @@ func ex7() (struct5, struct5) { { s3: struct3{ s2: struct2{ - A:1, - b:2, - C:[]int{1}, + A: 1, + b: 2, + C: []int{1}, }, }, }, @@ -144,9 +144,9 @@ func ex8() (struct5, struct5) { { s3: struct3{ s2: struct2{ - A:1, - b:2, - C:[]int{1}, + A: 1, + b: 2, + C: []int{1}, }, }, }, @@ -158,9 +158,9 @@ func ex8() (struct5, struct5) { { s3: struct3{ s2: struct2{ - A:1, - b:2, - C:[]int{1}, + A: 1, + b: 2, + C: []int{1}, }, }, }, @@ -185,9 +185,9 @@ func ex9() (struct5, struct5) { { s3: struct3{ s2: struct2{ - A:1, - b:2, - C:[]int{1}, + A: 1, + b: 2, + C: []int{1}, }, }, }, @@ -199,9 +199,9 @@ func ex9() (struct5, struct5) { { s3: struct3{ s2: struct2{ - A:1, - b:2, - C:[]int{1, 1}, + A: 1, + b: 2, + C: []int{1, 1}, }, }, }, diff --git a/pkg/models/listers.go b/pkg/models/listers.go index 43c1e007b..6ecaf5776 100644 --- a/pkg/models/listers.go +++ b/pkg/models/listers.go @@ -30,7 +30,6 @@ func ListPodFQDNs(chi *chiv1.ClickHouseInstallation) []string { return names } - func Yaml(chi *chiv1.ClickHouseInstallation) string { if data, err := yaml.Marshal(chi); err != nil { return "" diff --git a/pkg/models/util.go b/pkg/models/util.go index 021c66a22..a30c483fc 100644 --- a/pkg/models/util.go +++ b/pkg/models/util.go @@ -16,8 +16,8 @@ package models import ( "encoding/hex" - "io" "fmt" + "io" "math/rand" "time" ) From 2dd844b0e9ed1976cb6f7198e70527446f5d0f6f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 5 May 2019 17:24:10 +0300 Subject: [PATCH 07/32] env: advanced ZK maintenanace scripts --- .../zookeeper/advanced/create-zookeeper.sh | 18 ++++++++++++++---- .../zookeeper/advanced/delete-zookeeper.sh | 4 +++- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/manifests/zookeeper/advanced/create-zookeeper.sh b/manifests/zookeeper/advanced/create-zookeeper.sh index e545f62dc..dad6583e3 100755 --- a/manifests/zookeeper/advanced/create-zookeeper.sh +++ b/manifests/zookeeper/advanced/create-zookeeper.sh @@ -1,7 +1,17 @@ #!/bin/bash -kubectl create namespace zoons -for f in 01-service-client-access.yaml 02-headless-service.yaml 03-pod-disruption-budget.yaml 04-storageclass-zookeeper.yaml 05-stateful-set.yaml; do - kubectl apply -f $f -n zoons -done +ZK_NAMESPACE="${ZK_NAMESPACE:-zoons}" +YAML_FILES_LIST="\ +01-service-client-access.yaml \ +02-headless-service.yaml \ +03-pod-disruption-budget.yaml \ +04-storageclass-zookeeper.yaml \ +05-stateful-set.yaml\ +" + +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +kubectl create namespace ${ZK_NAMESPACE} +for FILE in ${YAML_FILES_LIST}; do + kubectl -n "${ZK_NAMESPACE}" apply -f "${CUR_DIR}/${FILE}" +done diff --git a/manifests/zookeeper/advanced/delete-zookeeper.sh b/manifests/zookeeper/advanced/delete-zookeeper.sh index 2ff8d2112..8d1ba7e82 100755 --- a/manifests/zookeeper/advanced/delete-zookeeper.sh +++ b/manifests/zookeeper/advanced/delete-zookeeper.sh @@ -1,3 +1,5 @@ #!/bin/bash -kubectl delete namespace zoons +ZK_NAMESPACE="${ZK_NAMESPACE:-zoons}" + +kubectl delete namespace ${ZK_NAMESPACE} From 64d674bb540ccee949641177ee528851ae3d42a6 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 5 May 2019 18:12:07 +0300 Subject: [PATCH 08/32] examples: zookeeper with PersistentVolume and zookeeper with emptyDir create/delete scripts --- .../advanced/03-pod-disruption-budget.yaml | 2 +- .../advanced/04-storageclass-zookeeper.yaml | 16 +-- .../05-stateful-set-persistent-volume.yaml | 119 ++++++++++++++++++ ...l => 05-stateful-set-volume-emptyDir.yaml} | 2 + .../advanced/zookeeper-create-universal.sh | 21 ++++ ... => zookeeper-persistent-volume-create.sh} | 7 +- ... => zookeeper-persistent-volume-delete.sh} | 0 .../zookeeper-volume-emptyDir-create.sh | 14 +++ .../zookeeper-volume-emptyDir-delete.sh | 5 + .../zookeeper/advanced/zookeeper-watch.sh | 5 + 10 files changed, 178 insertions(+), 13 deletions(-) create mode 100644 manifests/zookeeper/advanced/05-stateful-set-persistent-volume.yaml rename manifests/zookeeper/advanced/{05-stateful-set.yaml => 05-stateful-set-volume-emptyDir.yaml} (94%) create mode 100644 manifests/zookeeper/advanced/zookeeper-create-universal.sh rename manifests/zookeeper/advanced/{create-zookeeper.sh => zookeeper-persistent-volume-create.sh} (61%) rename manifests/zookeeper/advanced/{delete-zookeeper.sh => zookeeper-persistent-volume-delete.sh} (100%) create mode 100755 manifests/zookeeper/advanced/zookeeper-volume-emptyDir-create.sh create mode 100755 manifests/zookeeper/advanced/zookeeper-volume-emptyDir-delete.sh create mode 100755 manifests/zookeeper/advanced/zookeeper-watch.sh diff --git a/manifests/zookeeper/advanced/03-pod-disruption-budget.yaml b/manifests/zookeeper/advanced/03-pod-disruption-budget.yaml index 0fc1ec2b7..c8d2114c5 100644 --- a/manifests/zookeeper/advanced/03-pod-disruption-budget.yaml +++ b/manifests/zookeeper/advanced/03-pod-disruption-budget.yaml @@ -2,7 +2,7 @@ apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: - name: zookeeper-pod-distribution-budget + name: zookeeper-pod-disruption-budget spec: selector: matchLabels: diff --git a/manifests/zookeeper/advanced/04-storageclass-zookeeper.yaml b/manifests/zookeeper/advanced/04-storageclass-zookeeper.yaml index 88fe04cca..28d25491d 100644 --- a/manifests/zookeeper/advanced/04-storageclass-zookeeper.yaml +++ b/manifests/zookeeper/advanced/04-storageclass-zookeeper.yaml @@ -1,8 +1,10 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: storageclass-zookeeper -provisioner: kubernetes.io/no-provisioner -#volumeBindingMode: WaitForFirstConsumer -volumeBindingMode: Immediate +# Specify StorageClass in case there is no default storage class provided +#apiVersion: storage.k8s.io/v1 +#kind: StorageClass +#metadata: +# name: storageclass-zookeeper +#provisioner: kubernetes.io/no-provisioner +## Choose desired 'volumeBindingMode' +##volumeBindingMode: WaitForFirstConsumer +#volumeBindingMode: Immediate diff --git a/manifests/zookeeper/advanced/05-stateful-set-persistent-volume.yaml b/manifests/zookeeper/advanced/05-stateful-set-persistent-volume.yaml new file mode 100644 index 000000000..997ac0fe3 --- /dev/null +++ b/manifests/zookeeper/advanced/05-stateful-set-persistent-volume.yaml @@ -0,0 +1,119 @@ +# Setup Zookeeper StatefulSet +# Possible params: +# 1. replicas +# 2. memory +# 3. cpu +# 4. storage +# 5. storageClassName +# 6. user to run app +apiVersion: apps/v1 +kind: StatefulSet +metadata: + # nodes would be named as zookeeper-0, zookeeper-1, zookeeper-2 + name: zookeeper +spec: + selector: + matchLabels: + app: zookeeper + serviceName: zookeepers + replicas: 3 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: zookeeper + what: node + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zookeeper + topologyKey: "kubernetes.io/hostname" + containers: + - name: kubernetes-zookeeper + imagePullPolicy: Always + image: "k8s.gcr.io/kubernetes-zookeeper:1.0-3.4.10" + resources: + requests: + memory: "1Gi" + cpu: "0.5" + ports: + - containerPort: 2181 + name: client + - containerPort: 2888 + name: server + - containerPort: 3888 + name: leader-election + command: + - sh + - -c + - "start-zookeeper \ + --servers=3 \ + --data_dir=/var/lib/zookeeper/data \ + --data_log_dir=/var/lib/zookeeper/data/log \ + --conf_dir=/opt/zookeeper/conf \ + --client_port=2181 \ + --election_port=3888 \ + --server_port=2888 \ + --tick_time=2000 \ + --init_limit=10 \ + --sync_limit=5 \ + --heap=512M \ + --max_client_cnxns=60 \ + --snap_retain_count=3 \ + --purge_interval=12 \ + --max_session_timeout=40000 \ + --min_session_timeout=4000 \ + --log_level=INFO" + readinessProbe: + exec: + command: + - sh + - -c + - "zookeeper-ready 2181" + initialDelaySeconds: 10 + timeoutSeconds: 5 + livenessProbe: + exec: + command: + - sh + - -c + - "zookeeper-ready 2181" + initialDelaySeconds: 10 + timeoutSeconds: 5 + volumeMounts: + - name: datadir-volume + mountPath: /var/lib/zookeeper + # Run as a non-privileged user + securityContext: + runAsUser: 1000 + fsGroup: 1000 +## Mount either emptyDir via volume or PV of 'storageclass-zookeeper' storageClass +## Uncomment what is required +# volumes: +# - name: datadir-volume +# emptyDir: +# medium: "" #accepted values: empty str (means node's default medium) or Memory +# sizeLimit: 1Gi +## Mount either emptyDir via volume or PV of 'storageclass-zookeeper' storageClass +## Uncomment what is required + volumeClaimTemplates: + - metadata: + name: datadir-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +## storageClassName has to be coordinated with k8s admin and has to be created as a `kind: StorageClass` resource +## In case no storageClassName specified at all - means use default storageClassName +## In case storageClassName specified as empty value "" - means do dot use dynamic provisioning +# storageClassName: storageclass-zookeeper diff --git a/manifests/zookeeper/advanced/05-stateful-set.yaml b/manifests/zookeeper/advanced/05-stateful-set-volume-emptyDir.yaml similarity index 94% rename from manifests/zookeeper/advanced/05-stateful-set.yaml rename to manifests/zookeeper/advanced/05-stateful-set-volume-emptyDir.yaml index 9c56365e1..4d9cf41f6 100644 --- a/manifests/zookeeper/advanced/05-stateful-set.yaml +++ b/manifests/zookeeper/advanced/05-stateful-set-volume-emptyDir.yaml @@ -114,4 +114,6 @@ spec: # requests: # storage: 1Gi ## storageClassName has to be coordinated with k8s admin and has to be created as a `kind: StorageClass` resource +## In case no storageClassName specified at all - means use default storageClassName +## In case storageClassName specified as empty value "" - means do dot use dynamic provisioning # storageClassName: storageclass-zookeeper diff --git a/manifests/zookeeper/advanced/zookeeper-create-universal.sh b/manifests/zookeeper/advanced/zookeeper-create-universal.sh new file mode 100644 index 000000000..e4aff6a0a --- /dev/null +++ b/manifests/zookeeper/advanced/zookeeper-create-universal.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# This file is not supposed to be run directly +# It must be sourced from task "create" files + +if [[ -z "${ZK_NAMESPACE}" ]]; then + echo "Please specify \$ZK_NAMESPACE" + exit -1 +fi + +if [[ -z "${YAML_FILES_LIST}" ]]; then + echo "Please specify \$YAML_FILES_LIST" + exit -1 +fi + +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +kubectl create namespace ${ZK_NAMESPACE} +for FILE in ${YAML_FILES_LIST}; do + kubectl -n "${ZK_NAMESPACE}" apply -f "${CUR_DIR}/${FILE}" +done diff --git a/manifests/zookeeper/advanced/create-zookeeper.sh b/manifests/zookeeper/advanced/zookeeper-persistent-volume-create.sh similarity index 61% rename from manifests/zookeeper/advanced/create-zookeeper.sh rename to manifests/zookeeper/advanced/zookeeper-persistent-volume-create.sh index dad6583e3..1a3ebf293 100755 --- a/manifests/zookeeper/advanced/create-zookeeper.sh +++ b/manifests/zookeeper/advanced/zookeeper-persistent-volume-create.sh @@ -6,12 +6,9 @@ YAML_FILES_LIST="\ 02-headless-service.yaml \ 03-pod-disruption-budget.yaml \ 04-storageclass-zookeeper.yaml \ -05-stateful-set.yaml\ +05-stateful-set-persistent-volume.yaml\ " CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -kubectl create namespace ${ZK_NAMESPACE} -for FILE in ${YAML_FILES_LIST}; do - kubectl -n "${ZK_NAMESPACE}" apply -f "${CUR_DIR}/${FILE}" -done +source "${CUR_DIR}/zookeeper-create-universal.sh" diff --git a/manifests/zookeeper/advanced/delete-zookeeper.sh b/manifests/zookeeper/advanced/zookeeper-persistent-volume-delete.sh similarity index 100% rename from manifests/zookeeper/advanced/delete-zookeeper.sh rename to manifests/zookeeper/advanced/zookeeper-persistent-volume-delete.sh diff --git a/manifests/zookeeper/advanced/zookeeper-volume-emptyDir-create.sh b/manifests/zookeeper/advanced/zookeeper-volume-emptyDir-create.sh new file mode 100755 index 000000000..29e9004b1 --- /dev/null +++ b/manifests/zookeeper/advanced/zookeeper-volume-emptyDir-create.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +ZK_NAMESPACE="${ZK_NAMESPACE:-zoons}" +YAML_FILES_LIST="\ +01-service-client-access.yaml \ +02-headless-service.yaml \ +03-pod-disruption-budget.yaml \ +04-storageclass-zookeeper.yaml \ +05-stateful-set-volume-emptyDir.yaml\ +" + +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +source "${CUR_DIR}/zookeeper-create-universal.sh" diff --git a/manifests/zookeeper/advanced/zookeeper-volume-emptyDir-delete.sh b/manifests/zookeeper/advanced/zookeeper-volume-emptyDir-delete.sh new file mode 100755 index 000000000..8d1ba7e82 --- /dev/null +++ b/manifests/zookeeper/advanced/zookeeper-volume-emptyDir-delete.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +ZK_NAMESPACE="${ZK_NAMESPACE:-zoons}" + +kubectl delete namespace ${ZK_NAMESPACE} diff --git a/manifests/zookeeper/advanced/zookeeper-watch.sh b/manifests/zookeeper/advanced/zookeeper-watch.sh new file mode 100755 index 000000000..168eaf04b --- /dev/null +++ b/manifests/zookeeper/advanced/zookeeper-watch.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +ZK_NAMESPACE="${ZK_NAMESPACE:-zoons}" + +kubectl -n "${ZK_NAMESPACE}" get all,pv,pvc -o wide From 9638d696363010de910042360cf31ac88dc70a3f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 6 May 2019 18:49:42 +0300 Subject: [PATCH 09/32] dev: pass Pod and Container runtime info into clickhouse-operator via env vars --- .../app/clickhouse_operator.go | 40 ++++++++++++- ...ckhouse-operator-install-dev-template.yaml | 53 ++++++++++++++++- manifests/dev/deployment-dockerhub-dev.yaml | 49 ++++++++++++++++ .../operator/clickhouse-operator-install.yaml | 58 +++++++++++++++++-- 4 files changed, 193 insertions(+), 7 deletions(-) diff --git a/cmd/clickhouse-operator/app/clickhouse_operator.go b/cmd/clickhouse-operator/app/clickhouse_operator.go index 3172162c2..2d8101f35 100644 --- a/cmd/clickhouse-operator/app/clickhouse_operator.go +++ b/cmd/clickhouse-operator/app/clickhouse_operator.go @@ -127,6 +127,42 @@ func createClientsets(config *kuberest.Config) (*kube.Clientset, *chopclientset. return kubeClientset, chopClientset } +func GetRuntimeParams() map[string]string { + res := make(map[string]string) + // This list of ENV VARS is specified in operator .yaml manifest, section "kind: Deployment" + vars := []string{ + // spec.nodeName: ip-172-20-52-62.ec2.internal + "OPERATOR_POD_NODE_NAME", + // metadata.name: clickhouse-operator-6f87589dbb-ftcsf + "OPERATOR_POD_NAME", + // metadata.namespace: kube-system + "OPERATOR_POD_NAMESPACE", + // status.podIP: 100.96.3.2 + "OPERATOR_POD_IP", + // spec.serviceAccount: clickhouse-operator + // spec.serviceAccountName: clickhouse-operator + "OPERATOR_POD_SERVICE_ACCOUNT", + + "OPERATOR_CONTAINER_CPU_REQUEST", + "OPERATOR_CONTAINER_CPU_LIMIT", + "OPERATOR_CONTAINER_MEM_REQUEST", + "OPERATOR_CONTAINER_MEM_LIMIT", + } + + for _, varName := range vars { + res[varName] = os.Getenv(varName) + } + + return res +} + +func LogRuntimeParams() { + runtimeParams := GetRuntimeParams() + for name, value := range runtimeParams { + glog.V(1).Infof("%s=%s\n", name, value) + } +} + // Run is an entry point of the application func Run() { if versionRequest { @@ -134,7 +170,9 @@ func Run() { os.Exit(0) } - glog.V(1).Infof("Starting clickhouse-operator versionRequest '%s'\n", Version) + glog.V(1).Infof("Starting clickhouse-operator version '%s'\n", Version) + LogRuntimeParams() + chopConfig, err := config.GetConfig(chopConfigFile) if err != nil { glog.Fatalf("Unable to build config file %v\n", err) diff --git a/manifests/dev/clickhouse-operator-install-dev-template.yaml b/manifests/dev/clickhouse-operator-install-dev-template.yaml index 5dc887c2e..cd5c400fd 100644 --- a/manifests/dev/clickhouse-operator-install-dev-template.yaml +++ b/manifests/dev/clickhouse-operator-install-dev-template.yaml @@ -245,8 +245,57 @@ spec: spec: serviceAccountName: clickhouse-operator containers: - - image: altinity/clickhouse-operator:dev - name: clickhouse-operator + - name: clickhouse-operator + image: altinity/clickhouse-operator:dev + env: + # spec.nodeName: ip-172-20-52-62.ec2.internal + - name: OPERATOR_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # metadata.name: clickhouse-operator-6f87589dbb-ftcsf + - name: OPERATOR_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + # metadata.namespace: kube-system + - name: OPERATOR_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # status.podIP: 100.96.3.2 + - name: OPERATOR_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + # spec.serviceAccount: clickhouse-operator + # spec.serviceAccountName: clickhouse-operator + - name: OPERATOR_POD_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + + # Container-specific + - name: OPERATOR_CONTAINER_CPU_REQUEST + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: requests.cpu + - name: OPERATOR_CONTAINER_CPU_LIMIT + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: limits.cpu + - name: OPERATOR_CONTAINER_MEM_REQUEST + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: requests.memory + - name: OPERATOR_CONTAINER_MEM_LIMIT + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: limits.memory --- # Setup ClusterIP Service to provide monitoring metrics for Prometheus # Service would be created in kubectl-specified namespace diff --git a/manifests/dev/deployment-dockerhub-dev.yaml b/manifests/dev/deployment-dockerhub-dev.yaml index c2b9a438e..284c73f95 100644 --- a/manifests/dev/deployment-dockerhub-dev.yaml +++ b/manifests/dev/deployment-dockerhub-dev.yaml @@ -21,3 +21,52 @@ spec: #image: sunsingerus/clickhouse-operator:dev image: $PERSONAL_DEV_INSTALL_IMAGE imagePullPolicy: Always + env: + # spec.nodeName: ip-172-20-52-62.ec2.internal + - name: OPERATOR_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # metadata.name: clickhouse-operator-6f87589dbb-ftcsf + - name: OPERATOR_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + # metadata.namespace: kube-system + - name: OPERATOR_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # status.podIP: 100.96.3.2 + - name: OPERATOR_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + # spec.serviceAccount: clickhouse-operator + # spec.serviceAccountName: clickhouse-operator + - name: OPERATOR_POD_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + + # Container-specific + - name: OPERATOR_CONTAINER_CPU_REQUEST + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: requests.cpu + - name: OPERATOR_CONTAINER_CPU_LIMIT + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: limits.cpu + - name: OPERATOR_CONTAINER_MEM_REQUEST + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: requests.memory + - name: OPERATOR_CONTAINER_MEM_LIMIT + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: limits.memory diff --git a/manifests/operator/clickhouse-operator-install.yaml b/manifests/operator/clickhouse-operator-install.yaml index 379f52780..d2f9405cc 100644 --- a/manifests/operator/clickhouse-operator-install.yaml +++ b/manifests/operator/clickhouse-operator-install.yaml @@ -245,8 +245,58 @@ spec: spec: serviceAccountName: clickhouse-operator containers: - - image: altinity/clickhouse-operator:latest - name: clickhouse-operator + - name: clickhouse-operator + image: altinity/clickhouse-operator:latest + env: + # Pod-specific + # spec.nodeName: ip-172-20-52-62.ec2.internal + - name: OPERATOR_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # metadata.name: clickhouse-operator-6f87589dbb-ftcsf + - name: OPERATOR_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + # metadata.namespace: kube-system + - name: OPERATOR_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # status.podIP: 100.96.3.2 + - name: OPERATOR_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + # spec.serviceAccount: clickhouse-operator + # spec.serviceAccountName: clickhouse-operator + - name: OPERATOR_POD_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + + # Container-specific + - name: OPERATOR_CONTAINER_CPU_REQUEST + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: requests.cpu + - name: OPERATOR_CONTAINER_CPU_LIMIT + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: limits.cpu + - name: OPERATOR_CONTAINER_MEM_REQUEST + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: requests.memory + - name: OPERATOR_CONTAINER_MEM_LIMIT + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: limits.memory --- # Setup ClusterIP Service to provide monitoring metrics for Prometheus # Service would be created in kubectl-specified namespace @@ -262,7 +312,7 @@ metadata: app: clickhouse-operator spec: ports: - - port: 8888 - name: clickhouse-operator-metrics + - port: 8888 + name: clickhouse-operator-metrics selector: app: clickhouse-operator From f122981e1cec9541a658bc71be5af01e7fad0a10 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 6 May 2019 19:54:17 +0300 Subject: [PATCH 10/32] zookeeper: separate quick-start examples into stateless and persistent --- .../zookeeper-1-node-create.sh | 0 .../zookeeper-1-node-delete.sh | 0 .../zookeeper-1-node.yaml | 151 ++++++++++++++++++ .../zookeeper-1-node-create.sh | 6 + .../zookeeper-1-node-delete.sh | 7 + .../zookeeper-1-node.yaml | 0 .../zookeeper-3-nodes-create.sh | 0 .../zookeeper-3-nodes-delete.sh | 0 .../zookeeper-3-nodes.yaml | 0 9 files changed, 164 insertions(+) rename manifests/zookeeper/{quick-start => quick-start-persistent-volume}/zookeeper-1-node-create.sh (100%) rename manifests/zookeeper/{quick-start => quick-start-persistent-volume}/zookeeper-1-node-delete.sh (100%) create mode 100644 manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node.yaml create mode 100755 manifests/zookeeper/quick-start-stateless/zookeeper-1-node-create.sh create mode 100755 manifests/zookeeper/quick-start-stateless/zookeeper-1-node-delete.sh rename manifests/zookeeper/{quick-start => quick-start-stateless}/zookeeper-1-node.yaml (100%) rename manifests/zookeeper/{quick-start => quick-start-stateless}/zookeeper-3-nodes-create.sh (100%) rename manifests/zookeeper/{quick-start => quick-start-stateless}/zookeeper-3-nodes-delete.sh (100%) rename manifests/zookeeper/{quick-start => quick-start-stateless}/zookeeper-3-nodes.yaml (100%) diff --git a/manifests/zookeeper/quick-start/zookeeper-1-node-create.sh b/manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node-create.sh similarity index 100% rename from manifests/zookeeper/quick-start/zookeeper-1-node-create.sh rename to manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node-create.sh diff --git a/manifests/zookeeper/quick-start/zookeeper-1-node-delete.sh b/manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node-delete.sh similarity index 100% rename from manifests/zookeeper/quick-start/zookeeper-1-node-delete.sh rename to manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node-delete.sh diff --git a/manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node.yaml b/manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node.yaml new file mode 100644 index 000000000..a89cfb164 --- /dev/null +++ b/manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node.yaml @@ -0,0 +1,151 @@ +# Setup Service to provide access to Zookeeper for clients +apiVersion: v1 +kind: Service +metadata: + # DNS would be like zookeeper.zoons + name: zookeeper + labels: + app: zookeeper +spec: + ports: + - port: 2181 + name: client + selector: + app: zookeeper + what: node +--- +# Setup Headless Service for StatefulSet +apiVersion: v1 +kind: Service +metadata: + # DNS would be like zookeeper-0.zookeepers.etc + name: zookeepers + labels: + app: zookeeper +spec: + ports: + - port: 2888 + name: server + - port: 3888 + name: leader-election + clusterIP: None + selector: + app: zookeeper + what: node +--- +# Setup max number of unavailable pods in StatefulSet +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: zookeeper-pod-disruption-budget +spec: + selector: + matchLabels: + app: zookeeper + maxUnavailable: 1 +--- +# Setup Zookeeper StatefulSet +# Possible params: +# 1. replicas +# 2. memory +# 3. cpu +# 4. storage +# 5. user to run app +apiVersion: apps/v1 +kind: StatefulSet +metadata: + # nodes would be named as zookeeper-0, zookeeper-1, zookeeper-2 + name: zookeeper +spec: + selector: + matchLabels: + app: zookeeper + serviceName: zookeepers + replicas: 1 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: zookeeper + what: node + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zookeeper + topologyKey: "kubernetes.io/hostname" + containers: + - name: kubernetes-zookeeper + imagePullPolicy: Always + image: "k8s.gcr.io/kubernetes-zookeeper:1.0-3.4.10" + resources: + requests: + memory: "1Gi" + cpu: "0.5" + ports: + - containerPort: 2181 + name: client + - containerPort: 2888 + name: server + - containerPort: 3888 + name: leader-election + command: + - sh + - -c + - "start-zookeeper \ + --servers=1 \ + --data_dir=/var/lib/zookeeper/data \ + --data_log_dir=/var/lib/zookeeper/data/log \ + --conf_dir=/opt/zookeeper/conf \ + --client_port=2181 \ + --election_port=3888 \ + --server_port=2888 \ + --tick_time=2000 \ + --init_limit=10 \ + --sync_limit=5 \ + --heap=512M \ + --max_client_cnxns=60 \ + --snap_retain_count=3 \ + --purge_interval=12 \ + --max_session_timeout=40000 \ + --min_session_timeout=4000 \ + --log_level=INFO" + readinessProbe: + exec: + command: + - sh + - -c + - "zookeeper-ready 2181" + initialDelaySeconds: 10 + timeoutSeconds: 5 + livenessProbe: + exec: + command: + - sh + - -c + - "zookeeper-ready 2181" + initialDelaySeconds: 10 + timeoutSeconds: 5 + volumeMounts: + - name: datadir-volume + mountPath: /var/lib/zookeeper + # Run as a non-privileged user + securityContext: + runAsUser: 1000 + fsGroup: 1000 + volumeClaimTemplates: + - metadata: + name: datadir-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/manifests/zookeeper/quick-start-stateless/zookeeper-1-node-create.sh b/manifests/zookeeper/quick-start-stateless/zookeeper-1-node-create.sh new file mode 100755 index 000000000..55e04c53e --- /dev/null +++ b/manifests/zookeeper/quick-start-stateless/zookeeper-1-node-create.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +ZK_NAMESPACE="${ZK_NAMESPACE:-zoo1ns}" + +kubectl create namespace "${ZK_NAMESPACE}" +kubectl --namespace="${ZK_NAMESPACE}" apply -f zookeeper-1-node.yaml diff --git a/manifests/zookeeper/quick-start-stateless/zookeeper-1-node-delete.sh b/manifests/zookeeper/quick-start-stateless/zookeeper-1-node-delete.sh new file mode 100755 index 000000000..96e61cb60 --- /dev/null +++ b/manifests/zookeeper/quick-start-stateless/zookeeper-1-node-delete.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +ZK_NAMESPACE="${ZK_NAMESPACE:-zoo1ns}" + +echo "Delete Zookeeper namespace ${ZK_NAMESPACE}" + +kubectl delete namespace "${ZK_NAMESPACE}" diff --git a/manifests/zookeeper/quick-start/zookeeper-1-node.yaml b/manifests/zookeeper/quick-start-stateless/zookeeper-1-node.yaml similarity index 100% rename from manifests/zookeeper/quick-start/zookeeper-1-node.yaml rename to manifests/zookeeper/quick-start-stateless/zookeeper-1-node.yaml diff --git a/manifests/zookeeper/quick-start/zookeeper-3-nodes-create.sh b/manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes-create.sh similarity index 100% rename from manifests/zookeeper/quick-start/zookeeper-3-nodes-create.sh rename to manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes-create.sh diff --git a/manifests/zookeeper/quick-start/zookeeper-3-nodes-delete.sh b/manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes-delete.sh similarity index 100% rename from manifests/zookeeper/quick-start/zookeeper-3-nodes-delete.sh rename to manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes-delete.sh diff --git a/manifests/zookeeper/quick-start/zookeeper-3-nodes.yaml b/manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes.yaml similarity index 100% rename from manifests/zookeeper/quick-start/zookeeper-3-nodes.yaml rename to manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes.yaml From c496d0a8ff598aa23600dc4af8965678fd448cc9 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 6 May 2019 19:57:07 +0300 Subject: [PATCH 11/32] zookeeper: make screate/delete scripts folder-agnostic --- .../quick-start-persistent-volume/zookeeper-1-node-create.sh | 4 +++- .../quick-start-stateless/zookeeper-1-node-create.sh | 4 +++- .../quick-start-stateless/zookeeper-3-nodes-create.sh | 4 +++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node-create.sh b/manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node-create.sh index 55e04c53e..63a009f70 100755 --- a/manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node-create.sh +++ b/manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node-create.sh @@ -2,5 +2,7 @@ ZK_NAMESPACE="${ZK_NAMESPACE:-zoo1ns}" +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + kubectl create namespace "${ZK_NAMESPACE}" -kubectl --namespace="${ZK_NAMESPACE}" apply -f zookeeper-1-node.yaml +kubectl --namespace="${ZK_NAMESPACE}" apply -f "${CUR_DIR}/zookeeper-1-node.yaml" diff --git a/manifests/zookeeper/quick-start-stateless/zookeeper-1-node-create.sh b/manifests/zookeeper/quick-start-stateless/zookeeper-1-node-create.sh index 55e04c53e..63a009f70 100755 --- a/manifests/zookeeper/quick-start-stateless/zookeeper-1-node-create.sh +++ b/manifests/zookeeper/quick-start-stateless/zookeeper-1-node-create.sh @@ -2,5 +2,7 @@ ZK_NAMESPACE="${ZK_NAMESPACE:-zoo1ns}" +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + kubectl create namespace "${ZK_NAMESPACE}" -kubectl --namespace="${ZK_NAMESPACE}" apply -f zookeeper-1-node.yaml +kubectl --namespace="${ZK_NAMESPACE}" apply -f "${CUR_DIR}/zookeeper-1-node.yaml" diff --git a/manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes-create.sh b/manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes-create.sh index 63f050fa6..846af9bb0 100755 --- a/manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes-create.sh +++ b/manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes-create.sh @@ -2,6 +2,8 @@ ZK_NAMESPACE="${ZK_NAMESPACE:-zoo3ns}" +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + kubectl create namespace "${ZK_NAMESPACE}" -kubectl --namespace="${ZK_NAMESPACE}" apply -f zookeeper-3-nodes.yaml +kubectl --namespace="${ZK_NAMESPACE}" apply -f "${CUR_DIR}/zookeeper-3-nodes.yaml" From ca71117637ca983b4beddb3cd7bc164faa866084 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 7 May 2019 00:15:21 +0300 Subject: [PATCH 12/32] config: bring config up-to-date --- config/config.yaml | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/config/config.yaml b/config/config.yaml index 7ef19be49..6afaa73d0 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -5,11 +5,11 @@ namespaces: - info - onemore -########################################### +################################################ ## ## Additional Configuration Files Section ## -########################################### +################################################ # Path to folder where ClickHouse configuration files common for all instances within CHI are located. chCommonConfigsPath: config.d @@ -25,11 +25,11 @@ chUsersConfigsPath: users.d # Manifests are applied in sorted alpha-numeric order chiTemplatesPath: templates.d -########################################### +################################################ ## -## Cluster Update Section +## Cluster Create/Update/Delete Objects Section ## -########################################### +################################################ # How many seconds to wait for created/updated StatefulSet to be Ready statefulSetUpdateTimeout: 50 @@ -37,5 +37,15 @@ statefulSetUpdateTimeout: 50 # How many seconds to wait between checks for created/updated StatefulSet status statefulSetUpdatePollPeriod: 2 -# What to do in case created/updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds -onStatefulSetUpdateFailureAction: abort +# What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds +# Possible options: +# 1. abort - do nothing, just break the process and wait for admin +# 2. delete - delete newly created problematic StatefulSet +onStatefulSetCreateFailureAction: delete + +# What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds +# Possible options: +# 1. abort - do nothing, just break the process and wait for admin +# 2. rollback - delete Pod and rollback StatefulSet to previous Generation. +# Pod would be recreated by StatefulSet based on rollback-ed configuration +onStatefulSetUpdateFailureAction: rollback From b6547e90b8ee11b5cf5d12182ce025b44dfd2dd1 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 7 May 2019 00:40:30 +0300 Subject: [PATCH 13/32] docs: enhance withsand configuration errors section --- docs/clickhouse_config_errors_handling.md | 28 +++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/docs/clickhouse_config_errors_handling.md b/docs/clickhouse_config_errors_handling.md index 66136bf31..565677f75 100644 --- a/docs/clickhouse_config_errors_handling.md +++ b/docs/clickhouse_config_errors_handling.md @@ -28,9 +28,37 @@ onStatefulSetUpdateFailureAction: rollback Regarding should operator continue with rolling update/create in case of failed StatefulSet it met - current behavior is to abort rolling process and let admin to decide how to proceed with current situation. +# Misconfiguration Examples +Let's take a look on real-life examples of misconfiguration opeartor can deal with. +There are several erroneous configurations located in +[docs/examples-withstand-errors folder](./examples-withstand-errors) +Operator can withstand this misconfiguration and continue to serve ClickHouse installation. +- Incorrect ClickHouse image specified. Create new `ClickHouseInstallation` with incorrect image. Kubernetes can't create container with incorrect image. +[manifest](./examples-withstand-errors/01-incorrect-image-create.yaml) +- Incorrect ClickHouse image specified. Update existing `ClickHouseInstallation` with incorrect image. Kubernetes can't create container with incorrect image. +[initial position](./examples-withstand-errors/02-incorrect-image-update-01-initial-position.yaml) +[apply incorrect update](./examples-withstand-errors/02-incorrect-image-update-02-apply-incorrect-update.yaml) +[revert back](./examples-withstand-errors/02-incorrect-image-update-03-revert-and-apply.yaml) +- Incorrect ClickHouse settings specified. Create new `ClickHouseInstallation` with incorrect ClickHouse settings. ClickHouse instance can't start. +[manifest](./examples-withstand-errors/03-incorrect-settings-create.yaml) +- Incorrect ClickHouse settings specified. Update existing `ClickHouseInstallation` with incorrect ClickHouse settings. ClickHouse instance can't start. +[initial position](./examples-withstand-errors/04-incorrect-settings-update-01-initial-position.yaml) +[apply incorrect update](./examples-withstand-errors/04-incorrect-settings-update-02-apply-incorrect-update.yaml) +[revert back](./examples-withstand-errors/04-incorrect-settings-update-03-revert-and-apply.yaml) +- Incorrect `PodTemplate` specified. Create new `ClickHouseInstallation` with incorrect `PodTemplate`. Kubernetes can't create Pod. +[manifest](./examples-withstand-errors/05-incorrect-pod-template.yaml) + +`clickhouse-operator` is able to detect unsuccessful create/update operation. Exact behavior of `clickhouse-operator` deals with the situation depends on +```yaml +onStatefulSetCreateFailureAction +onStatefulSetUpdateFailureAction +``` +configuration settings. + # Plans and discussion Interesting question is what to do with StatefulSets that were already successfully updated on the same run, before failed StatefulSet met. Available options are: 1. Do nothing. In this case ClickHouse cluster may be in some inconsistent state, because some replicas may be updated and some not. 1. try to rollback the whole cluster to some **previous** state. What this **previous** state be is a matter of discussion. Currently operator goes with 'do nothing' approach. + From 4f4387c670848b4b2347cfa89bad5f47f69db224 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 8 May 2019 20:19:59 +0300 Subject: [PATCH 14/32] dev: drop tables on replica deletion --- pkg/controllers/chi/controller.go | 62 ++++++++-------------------- pkg/controllers/chi/deleters.go | 13 +++++- pkg/controllers/chi/events.go | 12 +++--- pkg/controllers/chi/types.go | 54 +++++++++++++++++-------- pkg/models/create_names.go | 8 ++-- pkg/models/schemer.go | 67 +++++++++++++++++++++---------- pkg/models/util.go | 13 ++++++ 7 files changed, 135 insertions(+), 94 deletions(-) diff --git a/pkg/controllers/chi/controller.go b/pkg/controllers/chi/controller.go index fa8315715..989f0e03b 100644 --- a/pkg/controllers/chi/controller.go +++ b/pkg/controllers/chi/controller.go @@ -75,52 +75,24 @@ func CreateController( // Create Controller instance controller := &Controller{ - // chopConfig used to keep clickhouse-oprator config - chopConfig: chopConfig, - - // kubeClient used to Create() k8s resources as c.kubeClient.AppsV1().StatefulSets(namespace).Create(name) - kubeClient: kubeClient, - // chopClient used to Update() CRD k8s resource as c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.Namespace).Update(chiCopy) - chopClient: chopClient, - - // chiLister used as chiLister.ClickHouseInstallations(namespace).Get(name) - chiLister: chiInformer.Lister(), - // chiListerSynced used in waitForCacheSync() - chiListerSynced: chiInformer.Informer().HasSynced, - - // serviceLister used as serviceLister.Services(namespace).Get(name) - serviceLister: serviceInformer.Lister(), - // serviceListerSynced used in waitForCacheSync() - serviceListerSynced: serviceInformer.Informer().HasSynced, - - // endpointsLister used as endpointsLister.Endpoints(namespace).Get(name) - endpointsLister: endpointsInformer.Lister(), - // endpointsListerSynced used in waitForCacheSync() - endpointsListerSynced: endpointsInformer.Informer().HasSynced, - - // configMapLister used as configMapLister.ConfigMaps(namespace).Get(name) - configMapLister: configMapInformer.Lister(), - // configMapListerSynced used in waitForCacheSync() - configMapListerSynced: configMapInformer.Informer().HasSynced, - - // statefulSetLister used as statefulSetLister.StatefulSets(namespace).Get(name) - statefulSetLister: statefulSetInformer.Lister(), - // statefulSetListerSynced used in waitForCacheSync() + chopConfig: chopConfig, + kubeClient: kubeClient, + chopClient: chopClient, + chiLister: chiInformer.Lister(), + chiListerSynced: chiInformer.Informer().HasSynced, + serviceLister: serviceInformer.Lister(), + serviceListerSynced: serviceInformer.Informer().HasSynced, + endpointsLister: endpointsInformer.Lister(), + endpointsListerSynced: endpointsInformer.Informer().HasSynced, + configMapLister: configMapInformer.Lister(), + configMapListerSynced: configMapInformer.Informer().HasSynced, + statefulSetLister: statefulSetInformer.Lister(), statefulSetListerSynced: statefulSetInformer.Informer().HasSynced, - - // podLister used as statefulSetLister.StatefulSets(namespace).Get(name) - podLister: podInformer.Lister(), - // podListerSynced used in waitForCacheSync() - podListerSynced: podInformer.Informer().HasSynced, - - // queue used to organize events queue processed by operator - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "chi"), - - // not used explicitly - recorder: recorder, - - // export metrics to Prometheus - metricsExporter: chopMetricsExporter, + podLister: podInformer.Lister(), + podListerSynced: podInformer.Informer().HasSynced, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "chi"), + recorder: recorder, + metricsExporter: chopMetricsExporter, } chiInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ diff --git a/pkg/controllers/chi/deleters.go b/pkg/controllers/chi/deleters.go index 314d4c12c..423d049ec 100644 --- a/pkg/controllers/chi/deleters.go +++ b/pkg/controllers/chi/deleters.go @@ -35,6 +35,17 @@ func newDeleteOptions() *metav1.DeleteOptions { // deleteReplica deletes all kubernetes resources related to replica *chop.ChiClusterLayoutShardReplica func (c *Controller) deleteReplica(replica *chop.ChiClusterLayoutShardReplica) error { + // Each replica consists of + // 1. Tables on replica - we need to delete tables on replica in order to clean Zookeeper data + // 2. StatefulSet + // 3. ConfigMap + // 4. Service + // Need to delete all these item + + // Delete tables on replica + tableNames, dropTableSQLs, _ := chopmodels.ReplicaGetDropTables(replica) + glog.V(1).Infof("Drop tables: %v as %v\n", tableNames, dropTableSQLs) + _ = chopmodels.ReplicaApplySQLs(replica, dropTableSQLs, false) // Delete StatefulSet statefulSetName := chopmodels.CreateStatefulSetName(replica) @@ -111,7 +122,7 @@ func (c *Controller) deleteChi(chi *chop.ClickHouseInstallation) { } } -// statefulSetDeletePod delete all pod of a StatefulSet. This requests StatefulSet to relaunch deleted pods +// statefulSetDeletePod delete a pod of a StatefulSet. This requests StatefulSet to relaunch deleted pod func (c *Controller) statefulSetDeletePod(statefulSet *apps.StatefulSet) error { name := chopmodels.CreatePodName(statefulSet) glog.V(1).Infof("Delete Pod %s/%s\n", statefulSet.Namespace, name) diff --git a/pkg/controllers/chi/events.go b/pkg/controllers/chi/events.go index ba6e65dd6..5b3e402c2 100644 --- a/pkg/controllers/chi/events.go +++ b/pkg/controllers/chi/events.go @@ -51,11 +51,11 @@ const ( eventReasonDeleteFailed = "DeleteFailed" ) -// createEventChi creates CHI-related event -// action - what action was attempted (and then succeeded/failed regarding to the Involved Object -// reason - short, machine understandable string, ex.: SuccessfulCreate +// eventChi creates CHI-related event +// typ - type of the event - Normal, Warning, etc, one of eventType* +// action - what action was attempted, and then succeeded/failed regarding to the Involved Object. One of eventAction* +// reason - short, machine understandable string, one of eventReason* // message - human-readable description -// typ - type of the event - Normal, Warning, etc func (c *Controller) eventChi( chi *chop.ClickHouseInstallation, typ string, @@ -64,7 +64,7 @@ func (c *Controller) eventChi( message string, ) { now := time.Now() - event := core.Event{ + event := &core.Event{ ObjectMeta: meta.ObjectMeta{ GenerateName: "chop-chi-", }, @@ -94,7 +94,7 @@ func (c *Controller) eventChi( // ID of the controller instance, e.g. `kubelet-xyzf`. // ReportingInstance: } - _, err := c.kubeClient.CoreV1().Events(chi.Namespace).Create(&event) + _, err := c.kubeClient.CoreV1().Events(chi.Namespace).Create(event) if err != nil { glog.V(1).Infof("Create Event failed: %v\n", err) diff --git a/pkg/controllers/chi/types.go b/pkg/controllers/chi/types.go index 5f3161a9b..b83e2558a 100644 --- a/pkg/controllers/chi/types.go +++ b/pkg/controllers/chi/types.go @@ -32,24 +32,44 @@ import ( // Controller defines CRO controller type Controller struct { - chopConfig *config.Config - kubeClient kube.Interface - chopClient chopclientset.Interface - chiLister choplisters.ClickHouseInstallationLister - chiListerSynced cache.InformerSynced - serviceLister corelisters.ServiceLister - serviceListerSynced cache.InformerSynced - endpointsLister corelisters.EndpointsLister - endpointsListerSynced cache.InformerSynced - configMapLister corelisters.ConfigMapLister - configMapListerSynced cache.InformerSynced - statefulSetLister appslisters.StatefulSetLister + // chopConfig used to keep clickhouse-oprator config + chopConfig *config.Config + // kubeClient used to Create() k8s resources as c.kubeClient.AppsV1().StatefulSets(namespace).Create(name) + kubeClient kube.Interface + // chopClient used to Update() CRD k8s resource as c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.Namespace).Update(chiCopy) + chopClient chopclientset.Interface + + // chiLister used as chiLister.ClickHouseInstallations(namespace).Get(name) + chiLister choplisters.ClickHouseInstallationLister + // chiListerSynced used in waitForCacheSync() + chiListerSynced cache.InformerSynced + // serviceLister used as serviceLister.Services(namespace).Get(name) + serviceLister corelisters.ServiceLister + // serviceListerSynced used in waitForCacheSync() + serviceListerSynced cache.InformerSynced + // endpointsLister used as endpointsLister.Endpoints(namespace).Get(name) + endpointsLister corelisters.EndpointsLister + // endpointsListerSynced used in waitForCacheSync() + endpointsListerSynced cache.InformerSynced + // configMapLister used as configMapLister.ConfigMaps(namespace).Get(name) + configMapLister corelisters.ConfigMapLister + // configMapListerSynced used in waitForCacheSync() + configMapListerSynced cache.InformerSynced + // statefulSetLister used as statefulSetLister.StatefulSets(namespace).Get(name) + statefulSetLister appslisters.StatefulSetLister + // statefulSetListerSynced used in waitForCacheSync() statefulSetListerSynced cache.InformerSynced - podLister corelisters.PodLister - podListerSynced cache.InformerSynced - queue workqueue.RateLimitingInterface - recorder record.EventRecorder - metricsExporter *chopmetrics.Exporter + // podLister used as statefulSetLister.StatefulSets(namespace).Get(name) + podLister corelisters.PodLister + // podListerSynced used in waitForCacheSync() + podListerSynced cache.InformerSynced + + // queue used to organize events queue processed by operator + queue workqueue.RateLimitingInterface + // not used explicitly + recorder record.EventRecorder + // export metrics to Prometheus + metricsExporter *chopmetrics.Exporter } const ( diff --git a/pkg/models/create_names.go b/pkg/models/create_names.go index a40b80d3c..837da6ba1 100644 --- a/pkg/models/create_names.go +++ b/pkg/models/create_names.go @@ -135,8 +135,8 @@ func CreatePodFQDN(replica *chop.ChiClusterLayoutShardReplica) string { ) } -// CreateClusterPodFQDNs creates fully qualified domain names of all pods in a cluster -func CreateClusterPodFQDNs(cluster *chop.ChiCluster) []string { +// CreatePodFQDNs4Cluster creates fully qualified domain names of all pods in a cluster +func CreatePodFQDNs4Cluster(cluster *chop.ChiCluster) []string { fqdns := make([]string, 0) cluster.WalkReplicas(func(replica *chop.ChiClusterLayoutShardReplica) error { fqdns = append(fqdns, CreatePodFQDN(replica)) @@ -145,8 +145,8 @@ func CreateClusterPodFQDNs(cluster *chop.ChiCluster) []string { return fqdns } -// CreateChiPodFQDNs creates fully qualified domain names of all pods in a CHI -func CreateChiPodFQDNs(chi *chop.ClickHouseInstallation) []string { +// CreatePodFQDNs4Chi creates fully qualified domain names of all pods in a CHI +func CreatePodFQDNs4Chi(chi *chop.ClickHouseInstallation) []string { fqdns := make([]string, 0) chi.WalkReplicas(func(replica *chop.ChiClusterLayoutShardReplica) error { fqdns = append(fqdns, CreatePodFQDN(replica)) diff --git a/pkg/models/schemer.go b/pkg/models/schemer.go index e7ef1c34b..733be42e4 100644 --- a/pkg/models/schemer.go +++ b/pkg/models/schemer.go @@ -23,7 +23,10 @@ import ( ) const ( + // Comma-separated ''-enclosed list of database names to be ignored ignoredDBs = "'system'" + + // Max number of retries for SQL queries maxRetries = 10 ) @@ -33,7 +36,9 @@ func ClusterGetCreateDatabases(chi *chi.ClickHouseInstallation, cluster *chi.Chi glog.V(1).Info(CreateChiServiceFQDN(chi)) _ = clickhouse.Query(&result, fmt.Sprintf( - `SELECT distinct name, concat('CREATE DATABASE IF NOT EXISTS ', name) + `SELECT + distinct name, + concat('CREATE DATABASE IF NOT EXISTS ', name) FROM cluster('%s', system, databases) WHERE name not in (%s) ORDER BY name @@ -41,40 +46,54 @@ func ClusterGetCreateDatabases(chi *chi.ClickHouseInstallation, cluster *chi.Chi cluster.Name, ignoredDBs), CreateChiServiceFQDN(chi), ) - names, creates := unzip(result) - return names, creates, nil + dbNames, createStatements := unzip(result) + return dbNames, createStatements, nil } // ClusterGetCreateTables returns set of 'CREATE TABLE ...' SQLs func ClusterGetCreateTables(chi *chi.ClickHouseInstallation, cluster *chi.ChiCluster) ([]string, []string, error) { result := make([][]string, 0) - _ = clickhouse.Query(&result, + _ = clickhouse.Query( + &result, fmt.Sprintf( - `SELECT distinct name, + `SELECT + distinct name, replaceRegexpOne(create_table_query, 'CREATE (TABLE|VIEW|MATERIALIZED VIEW)', 'CREATE \\1 IF NOT EXISTS') FROM cluster('%s', system, tables) WHERE database not in (%s) AND name not like '.inner.%%' ORDER BY multiIf(engine not in ('Distributed', 'View', 'MaterializedView'), 1, engine = 'MaterializedView', 2, engine = 'Distributed', 3, 4), name SETTINGS skip_unavailable_shards = 1`, - cluster.Name, ignoredDBs), + cluster.Name, + ignoredDBs, + ), CreateChiServiceFQDN(chi), ) - names, creates := unzip(result) - return names, creates, nil + tableNames, createStatements := unzip(result) + return tableNames, createStatements, nil } -// unzip makes two 1-value columns (slices) out of one 2-value column (slice) -func unzip(slice [][]string) ([]string, []string) { - col1 := make([]string, len(slice)) - col2 := make([]string, len(slice)) - for i := 0; i < len(slice); i++ { - col1 = append(col1, slice[i][0]) - if len(slice[i]) > 1 { - col2 = append(col2, slice[i][1]) - } - } - return col1, col2 +// ReplicaGetDropTables returns set of 'DROP TABLE ...' SQLs +func ReplicaGetDropTables(replica *chi.ChiClusterLayoutShardReplica) ([]string, []string, error) { + // There isn't a separate query for deleting views. To delete a view, use DROP TABLE + // See https://clickhouse.yandex/docs/en/query_language/create/ + result := make([][]string, 0) + _ = clickhouse.Query( + &result, + fmt.Sprintf( + `SELECT + distinct name, + concat('DROP TABLE IF EXISTS ', database, '.', name) + FROM system.tables + WHERE database not in (%s) + ORDER BY multiIf(engine = 'View', 1, engine = 'Distributed', 2, engine = 'MaterializedView', 3, 4), name + SETTINGS skip_unavailable_shards = 1`, + ignoredDBs, + ), + CreatePodFQDN(replica), + ) + tableNames, dropStatements := unzip(result) + return tableNames, dropStatements, nil } // ChiDropDnsCache runs 'DROP DNS CACHE' over the whole CHI @@ -87,12 +106,18 @@ func ChiDropDnsCache(chi *chi.ClickHouseInstallation) error { // ClusterApplySQLs runs set of SQL queries over the cluster func ClusterApplySQLs(cluster *chi.ChiCluster, sqls []string, retry bool) error { - return applySQLs(CreateClusterPodFQDNs(cluster), sqls, retry) + return applySQLs(CreatePodFQDNs4Cluster(cluster), sqls, retry) } // ChiApplySQLs runs set of SQL queries over the whole CHI func ChiApplySQLs(chi *chi.ClickHouseInstallation, sqls []string) error { - return applySQLs(CreateChiPodFQDNs(chi), sqls, true) + return applySQLs(CreatePodFQDNs4Chi(chi), sqls, true) +} + +// ReplicaApplySQLs runs set of SQL queries over the replica +func ReplicaApplySQLs(replica *chi.ChiClusterLayoutShardReplica, sqls []string, retry bool) error { + hosts := []string{CreatePodFQDN(replica)} + return applySQLs(hosts, sqls, true) } // applySQLs runs set of SQL queries on set on hosts diff --git a/pkg/models/util.go b/pkg/models/util.go index a30c483fc..f6dd835d9 100644 --- a/pkg/models/util.go +++ b/pkg/models/util.go @@ -46,3 +46,16 @@ func includeNonEmpty(dst map[string]string, key, src string) { func fprintf(w io.Writer, format string, a ...interface{}) { _, _ = fmt.Fprintf(w, format, a...) } + +// unzip makes two 1-value columns (slices) out of one 2-value column (slice) +func unzip(slice [][]string) ([]string, []string) { + col1 := make([]string, len(slice)) + col2 := make([]string, len(slice)) + for i := 0; i < len(slice); i++ { + col1 = append(col1, slice[i][0]) + if len(slice[i]) > 1 { + col2 = append(col2, slice[i][1]) + } + } + return col1, col2 +} From 2f71d0aa3218388c702e0032a03a78d8f43d5ba0 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 8 May 2019 21:18:21 +0300 Subject: [PATCH 15/32] dev: simplify and unify naming functions --- pkg/controllers/chi/controller.go | 4 ++-- pkg/models/ch_configs.go | 4 ++++ pkg/models/constants.go | 24 +++++++----------------- pkg/models/create_names.go | 8 ++++---- pkg/models/create_objects.go | 2 +- pkg/models/normalizers.go | 25 ------------------------- pkg/models/schemer.go | 4 ++-- pkg/models/{listers.go => yaml.go} | 16 +++------------- 8 files changed, 23 insertions(+), 64 deletions(-) rename pkg/models/{listers.go => yaml.go} (64%) diff --git a/pkg/controllers/chi/controller.go b/pkg/controllers/chi/controller.go index 989f0e03b..1ec4eebdf 100644 --- a/pkg/controllers/chi/controller.go +++ b/pkg/controllers/chi/controller.go @@ -453,7 +453,7 @@ func (c *Controller) onAddChi(chi *chop.ClickHouseInstallation) error { c.eventChi(chi, eventTypeNormal, eventActionCreate, eventReasonCreateCompleted, fmt.Sprintf("onAddChi(%s/%s)", chi.Namespace, chi.Name)) // Check hostnames of the Pods from current CHI object included into chopmetrics.Exporter state - c.metricsExporter.EnsureControlledValues(chi.Name, chopmodels.ListPodFQDNs(chi)) + c.metricsExporter.EnsureControlledValues(chi.Name, chopmodels.CreatePodFQDNsOfChi(chi)) return nil } @@ -535,7 +535,7 @@ func (c *Controller) onUpdateChi(old, new *chop.ClickHouseInstallation) error { } // Check hostnames of the Pods from current CHI object included into chopmetrics.Exporter state - c.metricsExporter.EnsureControlledValues(new.Name, chopmodels.ListPodFQDNs(new)) + c.metricsExporter.EnsureControlledValues(new.Name, chopmodels.CreatePodFQDNsOfChi(new)) return nil } diff --git a/pkg/models/ch_configs.go b/pkg/models/ch_configs.go index b9c9b24ad..46a901cf7 100644 --- a/pkg/models/ch_configs.go +++ b/pkg/models/ch_configs.go @@ -22,6 +22,10 @@ import ( ) const ( + // Special auto-generated clusters. Each of these clusters lay over all replicas in CHI + // 1. Cluster with one shard and all replicas. Used to duplicate data over all replicas. + // 2. Cluster with all shards (1 replica). Used to gather/scatter data over all replicas. + oneShardAllReplicasClusterName = "all-replicated" allShardsOneReplicaClusterName = "all-sharded" ) diff --git a/pkg/models/constants.go b/pkg/models/constants.go index aeb99f2dd..f0feab74e 100644 --- a/pkg/models/constants.go +++ b/pkg/models/constants.go @@ -102,26 +102,11 @@ const ( // 1. macros dirPathConfd = "/etc/clickhouse-server/conf.d/" - fullPathConfigd = "/etc/clickhouse-server/config.d/" - // fullPathConfigTemplate specifies template for full path of the XML config files for ClickHouse - fullPathConfigTemplate = fullPathConfigd + "%s" - // dirPathClickHouseData specifies full path of data folder where ClickHouse would place its datastorage dirPathClickHouseData = "/var/lib/clickhouse" ) const ( - // Full Deployment ID consists of two parts: - // 1. "deployment id" (it should be derived from fingerprint) of each deployment in ClickHouseInstallation object. - // Some deployments may be the same and thus have the same "deployment id" (because of the same fingerprint) - // 2. Sequential index of this "deployment id" in ClickHouseInstallation object. - // Some deployments may be the same and thus have the same "deployment id" (because of the same fingerprint), - // but they will have different sequentially increasing index of this "deployment id" in ClickHouseInstallation object - // Ex.: two running instances of the same deployment will have full deployments ids - // 1eb454-1 - // 1eb454-2 - fullDeploymentIDPattern = "%s-%d" - // NAME READY AGE CONTAINERS IMAGES // statefulset.apps/ss-1eb454-1 0/1 2s ss-1eb454-1 yandex/clickhouse-server:latest statefulSetNamePattern = "chi-%s-%s-%d-%d" @@ -182,21 +167,26 @@ const ( ) const ( - chDefaultDockerImage = "yandex/clickhouse-server:latest" - chDefaultVolumeMountNameData = "clickhouse-data" + // Default docker image to be used + defaultClickHouseDockerImage = "yandex/clickhouse-server:latest" + + // Index of container within Pod with ClickHouse instance. Pod may have other containers included, such as monitoring ClickHouseContainerIndex = 0 ) const ( + // ClickHouse open ports chDefaultHTTPPortName = "http" chDefaultHTTPPortNumber = 8123 chDefaultClientPortName = "client" chDefaultClientPortNumber = 9000 chDefaultInterServerPortName = "interserver" chDefaultInterServerPortNumber = 9009 + // Application Label chDefaultAppLabel = clickhousealtinitycom.GroupName + "/app" ) const ( + // Default value for ClusterIP service templateDefaultsServiceClusterIP = "None" ) diff --git a/pkg/models/create_names.go b/pkg/models/create_names.go index 837da6ba1..537a3e2c9 100644 --- a/pkg/models/create_names.go +++ b/pkg/models/create_names.go @@ -135,8 +135,8 @@ func CreatePodFQDN(replica *chop.ChiClusterLayoutShardReplica) string { ) } -// CreatePodFQDNs4Cluster creates fully qualified domain names of all pods in a cluster -func CreatePodFQDNs4Cluster(cluster *chop.ChiCluster) []string { +// CreatePodFQDNsOfCluster creates fully qualified domain names of all pods in a cluster +func CreatePodFQDNsOfCluster(cluster *chop.ChiCluster) []string { fqdns := make([]string, 0) cluster.WalkReplicas(func(replica *chop.ChiClusterLayoutShardReplica) error { fqdns = append(fqdns, CreatePodFQDN(replica)) @@ -145,8 +145,8 @@ func CreatePodFQDNs4Cluster(cluster *chop.ChiCluster) []string { return fqdns } -// CreatePodFQDNs4Chi creates fully qualified domain names of all pods in a CHI -func CreatePodFQDNs4Chi(chi *chop.ClickHouseInstallation) []string { +// CreatePodFQDNsOfChi creates fully qualified domain names of all pods in a CHI +func CreatePodFQDNsOfChi(chi *chop.ClickHouseInstallation) []string { fqdns := make([]string, 0) chi.WalkReplicas(func(replica *chop.ChiClusterLayoutShardReplica) error { fqdns = append(fqdns, CreatePodFQDN(replica)) diff --git a/pkg/models/create_objects.go b/pkg/models/create_objects.go index ab2e7b023..6c521cd7b 100644 --- a/pkg/models/create_objects.go +++ b/pkg/models/create_objects.go @@ -530,7 +530,7 @@ func createDefaultPodTemplate(name string) *chiv1.ChiPodTemplate { Containers: []corev1.Container{ { Name: name, - Image: chDefaultDockerImage, + Image: defaultClickHouseDockerImage, Ports: []corev1.ContainerPort{ { Name: chDefaultHTTPPortName, diff --git a/pkg/models/normalizers.go b/pkg/models/normalizers.go index 0ce1ba15d..6b21bfe03 100644 --- a/pkg/models/normalizers.go +++ b/pkg/models/normalizers.go @@ -217,31 +217,6 @@ func deploymentGenerateFingerprint(replica *chiv1.ChiClusterLayoutShardReplica, return hex.EncodeToString(hasher.Sum(nil)) } -// deploymentGenerateID generates short-printable deployment ID out of long deployment fingerprint -// Generally, fingerprint is perfectly OK - it is unique for each unique deployment inside ClickHouseInstallation object, -// but it is extremely long and thus can not be used in k8s resources names. -// So we need to produce another - much shorter - unique id for each unique deployment inside ClickHouseInstallation object. -// IMPORTANT there can be the same deployments inside ClickHouseInstallation object and they will have the same -// deployment fingerprint and thus deployment id. This is addressed by FullDeploymentID, which is unique for each -// deployment inside ClickHouseInstallation object -func deploymentGenerateID(fingerprint string) string { - // Extract last 10 chars of fingerprint - return fingerprint[len(fingerprint)-10:] - //return randomString() -} - -// generateFullDeploymentID generates full deployment ID out of deployment ID -// Full Deployment ID is unique for each deployment inside ClickHouseInstallation object and can be used for naming. -// IMPORTANT there can be the same deployments inside ClickHouseInstallation object and they will have the same -// deployment fingerprint and thus deployment id. This is addressed by FullDeploymentID, which is unique for each -// deployment inside ClickHouseInstallation object -func generateFullDeploymentID(replica *chiv1.ChiClusterLayoutShardReplica) string { - deploymentID := deploymentGenerateID(replica.Deployment.Fingerprint) - index := replica.Deployment.Index - // 1eb454-2 (deployment id - sequential index of this deployment id) - return fmt.Sprintf(fullDeploymentIDPattern, deploymentID, index) -} - // defaultsNormalizeReplicasUseFQDN ensures chiv1.ChiDefaults.ReplicasUseFQDN section has proper values func defaultsNormalizeReplicasUseFQDN(d *chiv1.ChiDefaults) { // Acceptable values are 0 and 1 diff --git a/pkg/models/schemer.go b/pkg/models/schemer.go index 733be42e4..f2967f628 100644 --- a/pkg/models/schemer.go +++ b/pkg/models/schemer.go @@ -106,12 +106,12 @@ func ChiDropDnsCache(chi *chi.ClickHouseInstallation) error { // ClusterApplySQLs runs set of SQL queries over the cluster func ClusterApplySQLs(cluster *chi.ChiCluster, sqls []string, retry bool) error { - return applySQLs(CreatePodFQDNs4Cluster(cluster), sqls, retry) + return applySQLs(CreatePodFQDNsOfCluster(cluster), sqls, retry) } // ChiApplySQLs runs set of SQL queries over the whole CHI func ChiApplySQLs(chi *chi.ClickHouseInstallation, sqls []string) error { - return applySQLs(CreatePodFQDNs4Chi(chi), sqls, true) + return applySQLs(CreatePodFQDNsOfChi(chi), sqls, true) } // ReplicaApplySQLs runs set of SQL queries over the replica diff --git a/pkg/models/listers.go b/pkg/models/yaml.go similarity index 64% rename from pkg/models/listers.go rename to pkg/models/yaml.go index 6ecaf5776..fda3a52f4 100644 --- a/pkg/models/listers.go +++ b/pkg/models/yaml.go @@ -15,22 +15,12 @@ package models import ( - chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "gopkg.in/yaml.v2" -) - -func ListPodFQDNs(chi *chiv1.ClickHouseInstallation) []string { - names := make([]string, 0) - replicaProcessor := func(replica *chiv1.ChiClusterLayoutShardReplica) error { - names = append(names, CreatePodFQDN(replica)) - return nil - } - chi.WalkReplicas(replicaProcessor) - return names -} + chi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" +) -func Yaml(chi *chiv1.ClickHouseInstallation) string { +func Yaml(chi *chi.ClickHouseInstallation) string { if data, err := yaml.Marshal(chi); err != nil { return "" } else { From 516c3b92b770e8f5597783b7334ac83becec2dc0 Mon Sep 17 00:00:00 2001 From: alex-zaitsev Date: Thu, 9 May 2019 00:37:17 +0300 Subject: [PATCH 16/32] Only replicated tables should be dropped automatically --- pkg/models/schemer.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/pkg/models/schemer.go b/pkg/models/schemer.go index f2967f628..b93a6e02e 100644 --- a/pkg/models/schemer.go +++ b/pkg/models/schemer.go @@ -57,7 +57,7 @@ func ClusterGetCreateTables(chi *chi.ClickHouseInstallation, cluster *chi.ChiClu &result, fmt.Sprintf( `SELECT - distinct name, + distinct name, replaceRegexpOne(create_table_query, 'CREATE (TABLE|VIEW|MATERIALIZED VIEW)', 'CREATE \\1 IF NOT EXISTS') FROM cluster('%s', system, tables) WHERE database not in (%s) @@ -82,12 +82,11 @@ func ReplicaGetDropTables(replica *chi.ChiClusterLayoutShardReplica) ([]string, &result, fmt.Sprintf( `SELECT - distinct name, - concat('DROP TABLE IF EXISTS ', database, '.', name) - FROM system.tables - WHERE database not in (%s) - ORDER BY multiIf(engine = 'View', 1, engine = 'Distributed', 2, engine = 'MaterializedView', 3, 4), name - SETTINGS skip_unavailable_shards = 1`, + distinct name, + concat('DROP TABLE IF EXISTS ', database, '.', name) + FROM system.tables + WHERE database not in (%s) + AND engine like 'Replicated%', ignoredDBs, ), CreatePodFQDN(replica), From 398210c019123da8a7a48133a8787e024aa6273e Mon Sep 17 00:00:00 2001 From: alex-zaitsev Date: Fri, 10 May 2019 11:20:19 +0300 Subject: [PATCH 17/32] Fixed like expression --- pkg/models/schemer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/models/schemer.go b/pkg/models/schemer.go index b93a6e02e..89447e748 100644 --- a/pkg/models/schemer.go +++ b/pkg/models/schemer.go @@ -86,7 +86,7 @@ func ReplicaGetDropTables(replica *chi.ChiClusterLayoutShardReplica) ([]string, concat('DROP TABLE IF EXISTS ', database, '.', name) FROM system.tables WHERE database not in (%s) - AND engine like 'Replicated%', + AND engine like 'Replicated%%', ignoredDBs, ), CreatePodFQDN(replica), From 3229bc1906e0d72e0269f16a55aed05da947e730 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 10 May 2019 12:35:20 +0300 Subject: [PATCH 18/32] dev: add missing close-tick in SQL statement --- pkg/models/schemer.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/models/schemer.go b/pkg/models/schemer.go index 89447e748..30b41b735 100644 --- a/pkg/models/schemer.go +++ b/pkg/models/schemer.go @@ -57,11 +57,11 @@ func ClusterGetCreateTables(chi *chi.ClickHouseInstallation, cluster *chi.ChiClu &result, fmt.Sprintf( `SELECT - distinct name, - replaceRegexpOne(create_table_query, 'CREATE (TABLE|VIEW|MATERIALIZED VIEW)', 'CREATE \\1 IF NOT EXISTS') + distinct name, + replaceRegexpOne(create_table_query, 'CREATE (TABLE|VIEW|MATERIALIZED VIEW)', 'CREATE \\1 IF NOT EXISTS') FROM cluster('%s', system, tables) - WHERE database not in (%s) - AND name not like '.inner.%%' + WHERE database not in (%s) + AND name not like '.inner.%%' ORDER BY multiIf(engine not in ('Distributed', 'View', 'MaterializedView'), 1, engine = 'MaterializedView', 2, engine = 'Distributed', 3, 4), name SETTINGS skip_unavailable_shards = 1`, cluster.Name, @@ -82,11 +82,11 @@ func ReplicaGetDropTables(replica *chi.ChiClusterLayoutShardReplica) ([]string, &result, fmt.Sprintf( `SELECT - distinct name, - concat('DROP TABLE IF EXISTS ', database, '.', name) - FROM system.tables - WHERE database not in (%s) - AND engine like 'Replicated%%', + distinct name, + concat('DROP TABLE IF EXISTS ', database, '.', name) + FROM system.tables + WHERE database not in (%s) + AND engine like 'Replicated%%'`, ignoredDBs, ), CreatePodFQDN(replica), From 0754ee0315a16cfafd218405aa47916bdb3cdb7c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 10 May 2019 13:08:00 +0300 Subject: [PATCH 19/32] dev: extract default CHOP timeouts --- pkg/config/chop_config.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/pkg/config/chop_config.go b/pkg/config/chop_config.go index 0b452ff9e..db34cfb6f 100644 --- a/pkg/config/chop_config.go +++ b/pkg/config/chop_config.go @@ -26,6 +26,14 @@ import ( chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" ) +const( + // Default update timeout in seconds + defaultStatefulSetUpdateTimeout = 120 + + // Default polling period in seconds + defaultStatefulSetUpdatePollPeriod = 15 +) + // GetConfig creates Config object based on current environment func GetConfig(configFilePath string) (*Config, error) { if len(configFilePath) > 0 { @@ -56,8 +64,8 @@ func GetConfig(configFilePath string) (*Config, error) { } } - // Try to find /etc/clickhouse-oprator/config.yaml - if conf, err := buildConfigFromFile("/etc/clickhouse-oprator/config.yaml"); err == nil { + // Try to find /etc/clickhouse-operator/config.yaml + if conf, err := buildConfigFromFile("/etc/clickhouse-operator/config.yaml"); err == nil { // Able to build config, all is fine return conf, nil } @@ -142,15 +150,17 @@ func (config *Config) normalize() error { // Process ClickHouseInstallation templates section config.prepareConfigPath(&config.ChiTemplatesPath, "templates.d") - // Process Rolling update section + // Process Create/Update section + + // Timeouts if config.StatefulSetUpdateTimeout == 0 { // Default update timeout in seconds - config.StatefulSetUpdateTimeout = 120 + config.StatefulSetUpdateTimeout = defaultStatefulSetUpdateTimeout } if config.StatefulSetUpdatePollPeriod == 0 { // Default polling period in seconds - config.StatefulSetUpdatePollPeriod = 15 + config.StatefulSetUpdatePollPeriod = defaultStatefulSetUpdatePollPeriod } // Default action on Create/Update failure - to keep system in previous state From 2e4ac1b9de3b107306e476008caecc3b0da0e91a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 10 May 2019 13:21:59 +0300 Subject: [PATCH 20/32] dev: extract aux function into 'util' package --- pkg/config/chop_config.go | 16 +++++++++++---- pkg/util/array.go | 25 +++++++++++++++++++++++ pkg/{config/util.go => util/fs.go} | 32 +++++++++++------------------- 3 files changed, 49 insertions(+), 24 deletions(-) create mode 100644 pkg/util/array.go rename pkg/{config/util.go => util/fs.go} (70%) diff --git a/pkg/config/chop_config.go b/pkg/config/chop_config.go index db34cfb6f..ad35c0e93 100644 --- a/pkg/config/chop_config.go +++ b/pkg/config/chop_config.go @@ -24,6 +24,7 @@ import ( "strings" chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + "github.com/altinity/clickhouse-operator/pkg/util" ) const( @@ -195,7 +196,7 @@ func (config *Config) prepareConfigPath(path *string, defaultRelativePath string } // In case of incorrect/unavailable path - make it empty - if (*path != "") && !isDirOk(*path) { + if (*path != "") && !util.IsDirOk(*path) { *path = "" } } @@ -224,7 +225,7 @@ func (config *Config) readChConfigFiles() { // isChConfigExt return true in case specified file has proper extension for a ClickHouse config file func (config *Config) isChConfigExt(file string) bool { - switch extToLower(file) { + switch util.ExtToLower(file) { case ".xml": return true } @@ -238,7 +239,7 @@ func (config *Config) readChiTemplateFiles() { // isChiTemplateExt return true in case specified file has proper extension for a CHI template config file func (config *Config) isChiTemplateExt(file string) bool { - switch extToLower(file) { + switch util.ExtToLower(file) { case ".yaml": return true } @@ -252,5 +253,12 @@ func (config *Config) IsWatchedNamespace(namespace string) bool { return true } - return inArray(namespace, config.Namespaces) + return util.InArray(namespace, config.Namespaces) +} + +// readConfigFiles reads config files from specified path into "file name->file content" map +// path - folder where to look for files +// isChConfigExt - accepts path to file return bool whether this file has config extension +func readConfigFiles(path string, isConfigExt func(string) bool) map[string]string { + return util.ReadFilesIntoMap(path, isConfigExt) } diff --git a/pkg/util/array.go b/pkg/util/array.go new file mode 100644 index 000000000..15df26277 --- /dev/null +++ b/pkg/util/array.go @@ -0,0 +1,25 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +// InArray checks whether needle is in haystack +func InArray(needle string, haystack []string) bool { + for _, b := range haystack { + if b == needle { + return true + } + } + return false +} diff --git a/pkg/config/util.go b/pkg/util/fs.go similarity index 70% rename from pkg/config/util.go rename to pkg/util/fs.go index 3a8a4e709..6498a3266 100644 --- a/pkg/config/util.go +++ b/pkg/util/fs.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package config +package util import ( "github.com/golang/glog" @@ -22,8 +22,8 @@ import ( "strings" ) -// isDirOk returns whether the given path exists and is a dir -func isDirOk(path string) bool { +// IsDirOk returns whether the given path exists and is a dir +func IsDirOk(path string) bool { if stat, err := os.Stat(path); (err == nil) && stat.IsDir() { // File object Stat-ed without errors - it exists and it is a dir return true @@ -33,10 +33,10 @@ func isDirOk(path string) bool { return false } -// readConfigFiles reads config files from specified path into "file name->file content" map +// ReadFilesIntoMap reads config files from specified path into "file name->file content" map // path - folder where to look for files -// isChConfigExt - accepts path to file return bool whether this file has config extension -func readConfigFiles(path string, isConfigExt func(string) bool) map[string]string { +// isOurFile - accepts path to file return bool whether this file should be read +func ReadFilesIntoMap(path string, isOurFile func(string) bool) map[string]string { // Look in real path only if path == "" { return nil @@ -44,15 +44,16 @@ func readConfigFiles(path string, isConfigExt func(string) bool) map[string]stri // Result is a filename to content map var files map[string]string + // Loop over all files in folder if matches, err := filepath.Glob(path + "/*"); err == nil { for i := range matches { // `file` comes with `path`-prefixed. // So in case `path` is an absolute path, `file` will be absolute path to file file := matches[i] - if isConfigExt(file) { - // Pick files with proper extensions only - glog.Infof("CommonConfig file %s\n", file) + if isOurFile(file) { + // Pick our files only + glog.Infof("Reading file %s\n", file) if content, err := ioutil.ReadFile(file); (err == nil) && (len(content) > 0) { // File content read successfully and file has some content if files == nil { @@ -72,17 +73,8 @@ func readConfigFiles(path string, isConfigExt func(string) bool) map[string]stri } } -// extToLower fetches and lowercases file extension. With dot, as '.xml' -func extToLower(file string) string { +// ExtToLower fetches and lower-cases file extension. With dot, as '.xml' +func ExtToLower(file string) string { return strings.ToLower(filepath.Ext(file)) } -// inArray checks whether a is in list -func inArray(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} From a33d1969513eb31d7acc5e6489ff5a8db6903090 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 10 May 2019 13:34:57 +0300 Subject: [PATCH 21/32] dev: extract common functions into util module --- pkg/config/chop_config.go | 2 +- pkg/models/constants.go | 4 ++-- pkg/models/create_objects.go | 23 +++++++++++---------- pkg/models/schemer.go | 7 ++++--- pkg/models/util.go | 40 ++---------------------------------- pkg/util/array.go | 13 ++++++++++++ pkg/util/fs.go | 1 - pkg/util/map.go | 28 +++++++++++++++++++++++++ pkg/util/print.go | 25 ++++++++++++++++++++++ pkg/util/string.go | 28 +++++++++++++++++++++++++ 10 files changed, 115 insertions(+), 56 deletions(-) create mode 100644 pkg/util/map.go create mode 100644 pkg/util/print.go create mode 100644 pkg/util/string.go diff --git a/pkg/config/chop_config.go b/pkg/config/chop_config.go index ad35c0e93..16043c588 100644 --- a/pkg/config/chop_config.go +++ b/pkg/config/chop_config.go @@ -27,7 +27,7 @@ import ( "github.com/altinity/clickhouse-operator/pkg/util" ) -const( +const ( // Default update timeout in seconds defaultStatefulSetUpdateTimeout = 120 diff --git a/pkg/models/constants.go b/pkg/models/constants.go index f0feab74e..b1f86b3c8 100644 --- a/pkg/models/constants.go +++ b/pkg/models/constants.go @@ -171,7 +171,7 @@ const ( defaultClickHouseDockerImage = "yandex/clickhouse-server:latest" // Index of container within Pod with ClickHouse instance. Pod may have other containers included, such as monitoring - ClickHouseContainerIndex = 0 + ClickHouseContainerIndex = 0 ) const ( @@ -183,7 +183,7 @@ const ( chDefaultInterServerPortName = "interserver" chDefaultInterServerPortNumber = 9009 // Application Label - chDefaultAppLabel = clickhousealtinitycom.GroupName + "/app" + chDefaultAppLabel = clickhousealtinitycom.GroupName + "/app" ) const ( diff --git a/pkg/models/create_objects.go b/pkg/models/create_objects.go index 6c521cd7b..c4ebf8f52 100644 --- a/pkg/models/create_objects.go +++ b/pkg/models/create_objects.go @@ -17,6 +17,7 @@ package models import ( chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/config" + "github.com/altinity/clickhouse-operator/pkg/util" "strconv" "github.com/golang/glog" @@ -68,13 +69,13 @@ func createConfigMapObjectsCommon(chi *chiv1.ClickHouseInstallation, config *con // 3. settings // 4. listen configs.commonConfigSections = make(map[string]string) - includeNonEmpty(configs.commonConfigSections, filenameRemoteServersXML, generateRemoteServersConfig(chi)) - includeNonEmpty(configs.commonConfigSections, filenameZookeeperXML, generateZookeeperConfig(chi)) - includeNonEmpty(configs.commonConfigSections, filenameSettingsXML, generateSettingsConfig(chi)) - includeNonEmpty(configs.commonConfigSections, filenameListenXML, generateListenConfig(chi)) + util.IncludeNonEmpty(configs.commonConfigSections, filenameRemoteServersXML, generateRemoteServersConfig(chi)) + util.IncludeNonEmpty(configs.commonConfigSections, filenameZookeeperXML, generateZookeeperConfig(chi)) + util.IncludeNonEmpty(configs.commonConfigSections, filenameSettingsXML, generateSettingsConfig(chi)) + util.IncludeNonEmpty(configs.commonConfigSections, filenameListenXML, generateListenConfig(chi)) // Extra user-specified configs for filename, content := range config.ChCommonConfigs { - includeNonEmpty(configs.commonConfigSections, filename, content) + util.IncludeNonEmpty(configs.commonConfigSections, filename, content) } // commonConfigSections maps section name to section XML config of the following sections: @@ -82,12 +83,12 @@ func createConfigMapObjectsCommon(chi *chiv1.ClickHouseInstallation, config *con // 2. quotas // 3. profiles configs.commonUsersConfigSections = make(map[string]string) - includeNonEmpty(configs.commonUsersConfigSections, filenameUsersXML, generateUsersConfig(chi)) - includeNonEmpty(configs.commonUsersConfigSections, filenameQuotasXML, generateQuotasConfig(chi)) - includeNonEmpty(configs.commonUsersConfigSections, filenameProfilesXML, generateProfilesConfig(chi)) + util.IncludeNonEmpty(configs.commonUsersConfigSections, filenameUsersXML, generateUsersConfig(chi)) + util.IncludeNonEmpty(configs.commonUsersConfigSections, filenameQuotasXML, generateQuotasConfig(chi)) + util.IncludeNonEmpty(configs.commonUsersConfigSections, filenameProfilesXML, generateProfilesConfig(chi)) // Extra user-specified configs for filename, content := range config.ChUsersConfigs { - includeNonEmpty(configs.commonUsersConfigSections, filename, content) + util.IncludeNonEmpty(configs.commonUsersConfigSections, filename, content) } // There are two types of configs, kept in ConfigMaps: @@ -142,10 +143,10 @@ func createConfigMapObjectsDeployment(chi *chiv1.ClickHouseInstallation, config replicaProcessor := func(replica *chiv1.ChiClusterLayoutShardReplica) error { // Prepare for this replica deployment config files map as filename->content deploymentConfigSections := make(map[string]string) - includeNonEmpty(deploymentConfigSections, filenameMacrosXML, generateHostMacros(replica)) + util.IncludeNonEmpty(deploymentConfigSections, filenameMacrosXML, generateHostMacros(replica)) // Extra user-specified configs for filename, content := range config.ChDeploymentConfigs { - includeNonEmpty(deploymentConfigSections, filename, content) + util.IncludeNonEmpty(deploymentConfigSections, filename, content) } // Add corev1.ConfigMap object to the list diff --git a/pkg/models/schemer.go b/pkg/models/schemer.go index 30b41b735..75569402a 100644 --- a/pkg/models/schemer.go +++ b/pkg/models/schemer.go @@ -18,6 +18,7 @@ import ( "fmt" "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse" chi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + "github.com/altinity/clickhouse-operator/pkg/util" "github.com/golang/glog" "time" ) @@ -46,7 +47,7 @@ func ClusterGetCreateDatabases(chi *chi.ClickHouseInstallation, cluster *chi.Chi cluster.Name, ignoredDBs), CreateChiServiceFQDN(chi), ) - dbNames, createStatements := unzip(result) + dbNames, createStatements := util.Unzip(result) return dbNames, createStatements, nil } @@ -69,7 +70,7 @@ func ClusterGetCreateTables(chi *chi.ClickHouseInstallation, cluster *chi.ChiClu ), CreateChiServiceFQDN(chi), ) - tableNames, createStatements := unzip(result) + tableNames, createStatements := util.Unzip(result) return tableNames, createStatements, nil } @@ -91,7 +92,7 @@ func ReplicaGetDropTables(replica *chi.ChiClusterLayoutShardReplica) ([]string, ), CreatePodFQDN(replica), ) - tableNames, dropStatements := unzip(result) + tableNames, dropStatements := util.Unzip(result) return tableNames, dropStatements, nil } diff --git a/pkg/models/util.go b/pkg/models/util.go index f6dd835d9..b3817aaab 100644 --- a/pkg/models/util.go +++ b/pkg/models/util.go @@ -15,47 +15,11 @@ package models import ( - "encoding/hex" - "fmt" + "github.com/altinity/clickhouse-operator/pkg/util" "io" - "math/rand" - "time" ) -// randomString generates random string -func randomString() string { - b := make([]byte, 3) - rand.New(rand.NewSource(time.Now().UnixNano())).Read(b) - return hex.EncodeToString(b) -} - -// includeNonEmpty inserts (and overwrites) data into map object using specified key, if not empty value provided -func includeNonEmpty(dst map[string]string, key, src string) { - // Do not include empty value - if src == "" { - return - } - - // Include (and overwrite) value by specified key - dst[key] = src - - return -} - // fprintf suppresses warning for unused returns of fmt.Fprintf() func fprintf(w io.Writer, format string, a ...interface{}) { - _, _ = fmt.Fprintf(w, format, a...) -} - -// unzip makes two 1-value columns (slices) out of one 2-value column (slice) -func unzip(slice [][]string) ([]string, []string) { - col1 := make([]string, len(slice)) - col2 := make([]string, len(slice)) - for i := 0; i < len(slice); i++ { - col1 = append(col1, slice[i][0]) - if len(slice[i]) > 1 { - col2 = append(col2, slice[i][1]) - } - } - return col1, col2 + util.Fprintf(w, format, a...) } diff --git a/pkg/util/array.go b/pkg/util/array.go index 15df26277..85424d56d 100644 --- a/pkg/util/array.go +++ b/pkg/util/array.go @@ -23,3 +23,16 @@ func InArray(needle string, haystack []string) bool { } return false } + +// Unzip makes two 1-value columns (slices) out of one 2-value column (slice) +func Unzip(slice [][]string) ([]string, []string) { + col1 := make([]string, len(slice)) + col2 := make([]string, len(slice)) + for i := 0; i < len(slice); i++ { + col1 = append(col1, slice[i][0]) + if len(slice[i]) > 1 { + col2 = append(col2, slice[i][1]) + } + } + return col1, col2 +} diff --git a/pkg/util/fs.go b/pkg/util/fs.go index 6498a3266..434f66711 100644 --- a/pkg/util/fs.go +++ b/pkg/util/fs.go @@ -77,4 +77,3 @@ func ReadFilesIntoMap(path string, isOurFile func(string) bool) map[string]strin func ExtToLower(file string) string { return strings.ToLower(filepath.Ext(file)) } - diff --git a/pkg/util/map.go b/pkg/util/map.go new file mode 100644 index 000000000..ea52d87d4 --- /dev/null +++ b/pkg/util/map.go @@ -0,0 +1,28 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +// IncludeNonEmpty inserts (and overwrites) data into map object using specified key, if not empty value provided +func IncludeNonEmpty(dst map[string]string, key, src string) { + // Do not include empty value + if src == "" { + return + } + + // Include (and overwrite) value by specified key + dst[key] = src + + return +} diff --git a/pkg/util/print.go b/pkg/util/print.go new file mode 100644 index 000000000..37def73ee --- /dev/null +++ b/pkg/util/print.go @@ -0,0 +1,25 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "fmt" + "io" +) + +// Fprintf suppresses warning for unused returns of fmt.Fprintf() +func Fprintf(w io.Writer, format string, a ...interface{}) { + _, _ = fmt.Fprintf(w, format, a...) +} diff --git a/pkg/util/string.go b/pkg/util/string.go new file mode 100644 index 000000000..91a727bf2 --- /dev/null +++ b/pkg/util/string.go @@ -0,0 +1,28 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "encoding/hex" + "math/rand" + "time" +) + +// RandomString generates random string +func RandomString() string { + b := make([]byte, 3) + rand.New(rand.NewSource(time.Now().UnixNano())).Read(b) + return hex.EncodeToString(b) +} From b3c728303e258b6a019ac5787af3ca21ce6fc705 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 13 May 2019 17:35:37 +0300 Subject: [PATCH 22/32] dev: chopsim - use propoer port naming --- dev/RnD/chopsim/parser/parser.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dev/RnD/chopsim/parser/parser.go b/dev/RnD/chopsim/parser/parser.go index b2542ce96..e7f933567 100644 --- a/dev/RnD/chopsim/parser/parser.go +++ b/dev/RnD/chopsim/parser/parser.go @@ -355,17 +355,17 @@ func (chi *ClickHouseInstallation) createServiceObjects(o *genOptions) serviceLi svcList := make(serviceList, 0, len(o.ssNames)+1) ports := []serviceSpecPort{ { - Name: "rpc", + Name: "http", + Port: 8123, + }, + { + Name: "client", Port: 9000, }, { Name: "interserver", Port: 9009, }, - { - Name: "rest", - Port: 8123, - }, } for ssName := range o.ssNames { svcList = append(svcList, &service{ From e7ad954a60b7949173f4e544ce69ef44936326d6 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 13 May 2019 17:36:15 +0300 Subject: [PATCH 23/32] dev: unify XML generating functions --- pkg/models/builders/xml/xml.go | 77 +++++++++++++++++----------------- pkg/models/ch_configs.go | 43 ++++++------------- 2 files changed, 51 insertions(+), 69 deletions(-) diff --git a/pkg/models/builders/xml/xml.go b/pkg/models/builders/xml/xml.go index f7d76241a..66bb2fe4c 100644 --- a/pkg/models/builders/xml/xml.go +++ b/pkg/models/builders/xml/xml.go @@ -34,22 +34,20 @@ const ( ) // GenerateXML creates XML representation from the provided input -func GenerateXML(w io.Writer, input map[string]interface{}, indent, tabsize uint8, excludes ...string) { - re := regexp.MustCompile("//+") - +func GenerateXML(w io.Writer, input map[string]interface{}, prefix string) { // paths is sorted set of normalized paths (maps keys) from 'input' paths := make([]string, 0, len(input)) // data is copy of 'input' with: // 1. paths (map keys) are normalized in terms of trimmed '/' - // 2. all excludes are excluded + // 2. all map keys listed in 'excludes' are excluded data := make(map[string]interface{}) // Skip excluded paths for key, value := range input { - // key may be non-normalized, and may have starting or trailing '/' - // path is normalized path without starting and trailing '/', ex.: 'test/quotas' - path := re.ReplaceAllString(strings.Trim(key, "/"), "/") - if path == "" || checkExcludes(path, excludes) { + // 'key' may be non-normalized, and may have starting or trailing '/' + // 'path' is normalized path without starting and trailing '/', ex.: 'test/quotas' + path := normalizePath(prefix, key) + if path == "" { continue } paths = append(paths, path) @@ -72,7 +70,22 @@ func GenerateXML(w io.Writer, input map[string]interface{}, indent, tabsize uint } // return XML - xmlTreeRoot.buildXML(w, indent, tabsize) + xmlTreeRoot.buildXML(w, 0, 4) +} + +// normalizePath makes 'prefix/a/b/c' out of 'prefix' + '/a//b///c////' +// Important - leading '/' is removed! +func normalizePath(prefix, path string) string { + // Normalize '//' to '/' + re := regexp.MustCompile("//+") + path = re.ReplaceAllString(path, "/") + // Cut all leading and trailing '/' + path = strings.Trim(path, "/") + if len(prefix) > 0 { + return prefix + "/" + path + } else { + return path + } } // addBranch ensures branch esists and assign value to the last tagged node @@ -121,41 +134,41 @@ func (n *xmlNode) buildXML(w io.Writer, indent, tabsize uint8) { // 2001:DB8::/32 for _, value := range n.value.([]interface{}) { stringValue := value.(string) - n.printTagWithValue(w, stringValue, indent, tabsize) + n.writeTagWithValue(w, stringValue, indent, tabsize) } case string: // value is a string stringValue := n.value.(string) - n.printTagWithValue(w, stringValue, indent, tabsize) + n.writeTagWithValue(w, stringValue, indent, tabsize) default: // no value node, may have nested tags - n.printTagNoValue(w, indent, tabsize) + n.writeTagNoValue(w, indent, tabsize) } } -// printTagNoValue prints tag which has no value, But it may have nested tags +// writeTagNoValue prints tag which has no value, But it may have nested tags // // ... // -func (n *xmlNode) printTagNoValue(w io.Writer, indent, tabsize uint8) { - n.printTag(w, indent, true, eol) +func (n *xmlNode) writeTagNoValue(w io.Writer, indent, tabsize uint8) { + n.writeTag(w, indent, true, eol) for i := range n.children { n.children[i].buildXML(w, indent+tabsize, tabsize) } - n.printTag(w, indent, false, eol) + n.writeTag(w, indent, false, eol) } -// printTagWithValue prints tag with value. But it must have no children, +// writeTagWithValue prints tag with value. But it must have no children, // and children are not printed // value -func (n *xmlNode) printTagWithValue(w io.Writer, value string, indent, tabsize uint8) { - n.printTag(w, indent, true, noEol) - n.printValue(w, value) - n.printTag(w, 0, false, eol) +func (n *xmlNode) writeTagWithValue(w io.Writer, value string, indent, tabsize uint8) { + n.writeTag(w, indent, true, noEol) + n.writeValue(w, value) + n.writeTag(w, 0, false, eol) } -// printTag prints XML tag into io.Writer -func (n *xmlNode) printTag(w io.Writer, indent uint8, openTag bool, eol string) { +// writeTag prints XML tag into io.Writer +func (n *xmlNode) writeTag(w io.Writer, indent uint8, openTag bool, eol string) { if n.tag == "" { return } @@ -183,21 +196,7 @@ func (n *xmlNode) printTag(w io.Writer, indent uint8, openTag bool, eol string) } } -// printTag prints XML value into io.Writer -func (n *xmlNode) printValue(w io.Writer, value string) { +// writeTag prints XML value into io.Writer +func (n *xmlNode) writeValue(w io.Writer, value string) { _, _ = fmt.Fprintf(w, "%s", value) } - -// checkExcludes returns true if first tag of the key matches item with excludes list -func checkExcludes(key string, excludes []string) bool { - tags := strings.Split(key, "/") - if len(tags) == 0 { - return false - } - for j := range excludes { - if tags[0] == excludes[j] { - return true - } - } - return false -} diff --git a/pkg/models/ch_configs.go b/pkg/models/ch_configs.go index 46a901cf7..bf48606bf 100644 --- a/pkg/models/ch_configs.go +++ b/pkg/models/ch_configs.go @@ -30,54 +30,37 @@ const ( allShardsOneReplicaClusterName = "all-sharded" ) +// generateUsersConfig creates data for "users.xml" +func generateUsersConfig(chi *chiv1.ClickHouseInstallation) string { + return generateXMLConfig(chi.Spec.Configuration.Users, configUsers) +} + // generateProfilesConfig creates data for "profiles.xml" func generateProfilesConfig(chi *chiv1.ClickHouseInstallation) string { - return genConfigXML(chi.Spec.Configuration.Profiles, configProfiles) + return generateXMLConfig(chi.Spec.Configuration.Profiles, configProfiles) } // generateQuotasConfig creates data for "quotas.xml" func generateQuotasConfig(chi *chiv1.ClickHouseInstallation) string { - return genConfigXML(chi.Spec.Configuration.Quotas, configQuotas) + return generateXMLConfig(chi.Spec.Configuration.Quotas, configQuotas) } -// generateUsersConfig creates data for "users.xml" -func generateUsersConfig(chi *chiv1.ClickHouseInstallation) string { - return genConfigXML(chi.Spec.Configuration.Users, configUsers) +// generateSettingsConfig creates data for "settings.xml" +func generateSettingsConfig(chi *chiv1.ClickHouseInstallation) string { + return generateXMLConfig(chi.Spec.Configuration.Settings, "") } -// genConfigXML creates XML using map[string]string definitions -func genConfigXML(data map[string]interface{}, section string) string { +// generateXMLConfig creates XML using map[string]string definitions +func generateXMLConfig(data map[string]interface{}, prefix string) string { if len(data) == 0 { return "" } b := &bytes.Buffer{} - // - //
fprintf(b, "<%s>\n", xmlTagYandex) - fprintf(b, "%4s<%s>\n", " ", section) - - xmlbuilder.GenerateXML(b, data, 4, 4) - //
- // - fprintf(b, "%4s\n", " ", section) - fprintf(b, "\n", xmlTagYandex) - - return b.String() -} - -// generateSettingsConfig creates data for "settings.xml" -func generateSettingsConfig(chi *chiv1.ClickHouseInstallation) string { - if len(chi.Spec.Configuration.Settings) == 0 { - return "" - } - - b := &bytes.Buffer{} + xmlbuilder.GenerateXML(b, data, prefix) // - fprintf(b, "<%s>\n", xmlTagYandex) - xmlbuilder.GenerateXML(b, chi.Spec.Configuration.Settings, 0, 4, configUsers, configProfiles, configQuotas) - // fprintf(b, "\n", xmlTagYandex) return b.String() From 2c76e9fcfaf3a986f033cb12c9167e275820ffe7 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 13 May 2019 19:29:25 +0300 Subject: [PATCH 24/32] dev: ensure user config has mandatory fields: profile,quota,networks/ip,password|password_sha256_hex --- .../03-incorrect-settings-create.yaml | 2 + pkg/models/builders/xml/xml.go | 14 ++- pkg/models/normalizers.go | 92 +++++++++++++++++++ 3 files changed, 107 insertions(+), 1 deletion(-) diff --git a/docs/examples-withstand-errors/03-incorrect-settings-create.yaml b/docs/examples-withstand-errors/03-incorrect-settings-create.yaml index cc7dedfa1..1a48cf36f 100644 --- a/docs/examples-withstand-errors/03-incorrect-settings-create.yaml +++ b/docs/examples-withstand-errors/03-incorrect-settings-create.yaml @@ -21,6 +21,8 @@ spec: admin/quota: default readonly/profile: readonly readonly/quota: default + support/password: qwerty + support_readonly/profile: readonly profiles: test_profile/max_memory_usage: "1000000000" test_profile/readonly: "1" diff --git a/pkg/models/builders/xml/xml.go b/pkg/models/builders/xml/xml.go index 66bb2fe4c..161200b84 100644 --- a/pkg/models/builders/xml/xml.go +++ b/pkg/models/builders/xml/xml.go @@ -124,7 +124,7 @@ func (n *xmlNode) addChild(tag string) *xmlNode { func (n *xmlNode) buildXML(w io.Writer, indent, tabsize uint8) { switch n.value.(type) { case []interface{}: - // Value is an array of strings + // Value is an array of "what would be a string" // Repeat tag with each value, like: // // ... @@ -136,6 +136,18 @@ func (n *xmlNode) buildXML(w io.Writer, indent, tabsize uint8) { stringValue := value.(string) n.writeTagWithValue(w, stringValue, indent, tabsize) } + case []string: + // Value is an array of strings + // Repeat tag with each value, like: + // + // ... + // + // ::/64 + // 203.0.113.0/24 + // 2001:DB8::/32 + for _, value := range n.value.([]string) { + n.writeTagWithValue(w, value, indent, tabsize) + } case string: // value is a string stringValue := n.value.(string) diff --git a/pkg/models/normalizers.go b/pkg/models/normalizers.go index 6b21bfe03..f4f016a28 100644 --- a/pkg/models/normalizers.go +++ b/pkg/models/normalizers.go @@ -20,6 +20,7 @@ import ( "fmt" chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/config" + "regexp" "sort" "strings" ) @@ -46,6 +47,7 @@ func ChiNormalize(chi *chiv1.ClickHouseInstallation) (*chiv1.ClickHouseInstallat defaultsNormalizeReplicasUseFQDN(&chi.Spec.Defaults) deploymentNormalizeScenario(&chi.Spec.Defaults.Deployment) templatesNormalizeVolumeClaimTemplatesNames(chi.Spec.Templates.VolumeClaimTemplates) + configurationNormalize(&chi.Spec.Configuration) // Normalize all clusters in this CHI chi.WalkClusters(func(cluster *chiv1.ChiCluster) error { @@ -61,6 +63,96 @@ func ChiNormalize(chi *chiv1.ClickHouseInstallation) (*chiv1.ClickHouseInstallat return chi, nil } +func configurationNormalize(conf *chiv1.ChiConfiguration) { + configurationUsersNormalize(&conf.Users) + configurationProfilesNormalize(&conf.Profiles) + configurationQuotasNormalize(&conf.Quotas) + configurationSettingsNormalize(&conf.Settings) +} + +func normalizePath(path string) string { + // Normalize '//' to '/' + re := regexp.MustCompile("//+") + path = re.ReplaceAllString(path, "/") + // Cut all leading and trailing '/' + return strings.Trim(path, "/") +} + +func normalizePaths(conf *map[string]interface{}) { + pathsToNormalize := make([]string, 0, 0) + + // Find entries with paths to normalize + for key := range *conf { + path := normalizePath(key) + if len(path) != len(key) { + // Normalization worked. These paths have to be normalized + pathsToNormalize = append(pathsToNormalize, key) + } + } + + // Add entries with normalized paths + for _, key := range pathsToNormalize { + path := normalizePath(key) + (*conf)[path] = (*conf)[key] + } + + // Delete entries with un-normalized paths + for _, key := range pathsToNormalize { + delete(*conf, key) + } +} + +func configurationUsersNormalize(conf *map[string]interface{}) { + normalizePaths(conf) + + // Extract username from path + usernameMap := make(map[string]bool) + for path := range *conf { + // Split 'admin/password' + tags := strings.Split(path, "/") + username := tags[0] + usernameMap[username] = true + } + + // Ensure "must have" sections are in place, which are + // 1. user/profile + // 2. user/quota + // 3. user/networks/ip + // 4. user/password OR user/password_sha256_hex + for username := range usernameMap { + if _, ok := (*conf)[username+"/profile"]; !ok { + // No 'user/profile' section + (*conf)[username+"/profile"] = "default" + } + if _, ok := (*conf)[username+"/quota"]; !ok { + // No 'user/quota' section + (*conf)[username+"/quota"] = "default" + } + if _, ok := (*conf)[username+"/networks/ip"]; !ok { + // No 'user/networks/ip' section + (*conf)[username+"/networks/ip"] = []string{"::/0"} + } + _, okPassword := (*conf)[username+"/password"] + _, okPasswordSHA256 := (*conf)[username+"/password_sha256_hex"] + if !okPassword && !okPasswordSHA256 { + // Neither 'password' nor 'password_sha256_hex' are in place + (*conf)[username+"/password"] = "default" + } + } +} + +func configurationProfilesNormalize(conf *map[string]interface{}) { + normalizePaths(conf) +} + +func configurationQuotasNormalize(conf *map[string]interface{}) { + normalizePaths(conf) +} + +func configurationSettingsNormalize(conf *map[string]interface{}) { + normalizePaths(conf) +} + // clusterNormalize normalizes cluster and returns deployments usage counters for this cluster func clusterNormalize( chi *chiv1.ClickHouseInstallation, From 9b51a2c7397580d0a6998cbd6c7066f2528738fc Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 13 May 2019 20:45:48 +0300 Subject: [PATCH 25/32] dev: extract user configuration default values into CHOP config --- pkg/config/chop_config.go | 34 +++++++++++++++++++++++++++++---- pkg/config/types.go | 10 ++++++++++ pkg/controllers/chi/creators.go | 2 +- pkg/models/normalizers.go | 22 ++++++++++----------- 4 files changed, 52 insertions(+), 16 deletions(-) diff --git a/pkg/config/chop_config.go b/pkg/config/chop_config.go index 16043c588..cc06f6979 100644 --- a/pkg/config/chop_config.go +++ b/pkg/config/chop_config.go @@ -28,11 +28,19 @@ import ( ) const ( - // Default update timeout in seconds - defaultStatefulSetUpdateTimeout = 120 - - // Default polling period in seconds + // Default values for update timeout and polling period in seconds + defaultStatefulSetUpdateTimeout = 120 defaultStatefulSetUpdatePollPeriod = 15 + + // Default values for ClickHouse user configuration + // 1. user/profile + // 2. user/quota + // 3. user/networks/ip + // 4. user/password + defaultChConfigUserDefaultProfile = "default" + defaultChConfigUserDefaultQuota = "default" + defaultChConfigUserDefaultNetworksIP = "::/0" + defaultChConfigUserDefaultPassword = "default" ) // GetConfig creates Config object based on current environment @@ -180,6 +188,24 @@ func (config *Config) normalize() error { config.OnStatefulSetUpdateFailureAction = OnStatefulSetUpdateFailureActionRollback } + // Default values for ClickHouse user configuration + // 1. user/profile + // 2. user/quota + // 3. user/networks/ip + // 4. user/password + if config.ChConfigUserDefaultProfile == "" { + config.ChConfigUserDefaultProfile = defaultChConfigUserDefaultProfile + } + if config.ChConfigUserDefaultQuota == "" { + config.ChConfigUserDefaultQuota = defaultChConfigUserDefaultQuota + } + if len(config.ChConfigUserDefaultNetworksIP) == 0 { + config.ChConfigUserDefaultNetworksIP = []string{defaultChConfigUserDefaultNetworksIP} + } + if config.ChConfigUserDefaultPassword == "" { + config.ChConfigUserDefaultPassword = defaultChConfigUserDefaultPassword + } + return nil } diff --git a/pkg/config/types.go b/pkg/config/types.go index d8373f61b..e3bc44c85 100644 --- a/pkg/config/types.go +++ b/pkg/config/types.go @@ -56,6 +56,16 @@ type Config struct { OnStatefulSetCreateFailureAction string `yaml:"onStatefulSetCreateFailureAction"` // StatefulSet update behavior - what to do in case StatefulSet can't reach new Generation OnStatefulSetUpdateFailureAction string `yaml:"onStatefulSetUpdateFailureAction"` + + // Default values for ClickHouse user configuration + // 1. user/profile + // 2. user/quota + // 3. user/networks/ip + // 4. user/password + ChConfigUserDefaultProfile string `yaml:"chConfigUserDefaultProfile"` + ChConfigUserDefaultQuota string `yaml:"chConfigUserDefaultQuota"` + ChConfigUserDefaultNetworksIP []string `yaml:"chConfigUserDefaultNetworksIP"` + ChConfigUserDefaultPassword string `yaml:"chConfigUserDefaultPassword"` } const ( diff --git a/pkg/controllers/chi/creators.go b/pkg/controllers/chi/creators.go index 4c9a569c7..95c991e71 100644 --- a/pkg/controllers/chi/creators.go +++ b/pkg/controllers/chi/creators.go @@ -376,7 +376,7 @@ func (c *Controller) createChiFromObjectMeta(objectMeta *meta.ObjectMeta) (*chi. return nil, err } - chi, err = chopmodels.ChiNormalize(chi) + chi, err = chopmodels.ChiNormalize(chi, c.chopConfig) if err != nil { return nil, err } diff --git a/pkg/models/normalizers.go b/pkg/models/normalizers.go index f4f016a28..681b25768 100644 --- a/pkg/models/normalizers.go +++ b/pkg/models/normalizers.go @@ -32,22 +32,22 @@ func ChiApplyTemplateAndNormalize( ) (*chiv1.ClickHouseInstallation, error) { if config.ChiTemplate == nil { // No template specified - return ChiNormalize(chi.DeepCopy()) + return ChiNormalize(chi.DeepCopy(), config) } else { base := config.ChiTemplate.DeepCopy() base.MergeFrom(chi) - return ChiNormalize(base) + return ChiNormalize(base, config) } } // ChiNormalize normalizes CHI. // Returns NamedNumber of deployments number required to satisfy clusters' infrastructure -func ChiNormalize(chi *chiv1.ClickHouseInstallation) (*chiv1.ClickHouseInstallation, error) { +func ChiNormalize(chi *chiv1.ClickHouseInstallation, config *config.Config) (*chiv1.ClickHouseInstallation, error) { // Set defaults for CHI object properties defaultsNormalizeReplicasUseFQDN(&chi.Spec.Defaults) deploymentNormalizeScenario(&chi.Spec.Defaults.Deployment) templatesNormalizeVolumeClaimTemplatesNames(chi.Spec.Templates.VolumeClaimTemplates) - configurationNormalize(&chi.Spec.Configuration) + configurationNormalize(&chi.Spec.Configuration, config) // Normalize all clusters in this CHI chi.WalkClusters(func(cluster *chiv1.ChiCluster) error { @@ -63,8 +63,8 @@ func ChiNormalize(chi *chiv1.ClickHouseInstallation) (*chiv1.ClickHouseInstallat return chi, nil } -func configurationNormalize(conf *chiv1.ChiConfiguration) { - configurationUsersNormalize(&conf.Users) +func configurationNormalize(conf *chiv1.ChiConfiguration, chopConf *config.Config) { + configurationUsersNormalize(&conf.Users, chopConf) configurationProfilesNormalize(&conf.Profiles) configurationQuotasNormalize(&conf.Quotas) configurationSettingsNormalize(&conf.Settings) @@ -102,7 +102,7 @@ func normalizePaths(conf *map[string]interface{}) { } } -func configurationUsersNormalize(conf *map[string]interface{}) { +func configurationUsersNormalize(conf *map[string]interface{}, chopConf *config.Config) { normalizePaths(conf) // Extract username from path @@ -122,21 +122,21 @@ func configurationUsersNormalize(conf *map[string]interface{}) { for username := range usernameMap { if _, ok := (*conf)[username+"/profile"]; !ok { // No 'user/profile' section - (*conf)[username+"/profile"] = "default" + (*conf)[username+"/profile"] = chopConf.ChConfigUserDefaultProfile } if _, ok := (*conf)[username+"/quota"]; !ok { // No 'user/quota' section - (*conf)[username+"/quota"] = "default" + (*conf)[username+"/quota"] = chopConf.ChConfigUserDefaultQuota } if _, ok := (*conf)[username+"/networks/ip"]; !ok { // No 'user/networks/ip' section - (*conf)[username+"/networks/ip"] = []string{"::/0"} + (*conf)[username+"/networks/ip"] = chopConf.ChConfigUserDefaultNetworksIP } _, okPassword := (*conf)[username+"/password"] _, okPasswordSHA256 := (*conf)[username+"/password_sha256_hex"] if !okPassword && !okPasswordSHA256 { // Neither 'password' nor 'password_sha256_hex' are in place - (*conf)[username+"/password"] = "default" + (*conf)[username+"/password"] = chopConf.ChConfigUserDefaultPassword } } } From 9587107fa094ffb5bdfe8a6c6df52834045d0c5a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 13 May 2019 20:52:34 +0300 Subject: [PATCH 26/32] docs: ClickHouse user settings configuration --- config/config.yaml | 17 +++++++++++++++++ docs/operator_configuration.md | 25 +++++++++++++++++++++---- pkg/config/types.go | 8 ++++---- 3 files changed, 42 insertions(+), 8 deletions(-) diff --git a/config/config.yaml b/config/config.yaml index 6afaa73d0..9efb92c48 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -49,3 +49,20 @@ onStatefulSetCreateFailureAction: delete # 2. rollback - delete Pod and rollback StatefulSet to previous Generation. # Pod would be recreated by StatefulSet based on rollback-ed configuration onStatefulSetUpdateFailureAction: rollback + +################################################ +## +## ClickHouse Settings Section +## +################################################ + +# Default values for ClickHouse user configuration +# 1. user/profile - string +# 2. user/quota - string +# 3. user/networks/ip - multiple strings +# 4. user/password - string +chConfigUserDefaultProfile: default +chConfigUserDefaultQuota: default +chConfigUserDefaultNetworksIP: + - "::/0" +chConfigUserDefaultPassword: "default" diff --git a/docs/operator_configuration.md b/docs/operator_configuration.md index ae887b7e6..e79ad18fb 100644 --- a/docs/operator_configuration.md +++ b/docs/operator_configuration.md @@ -8,11 +8,11 @@ namespaces: - info - onemore -########################################### +################################################ ## ## Additional Configuration Files Section ## -########################################### +################################################ # Path to folder where ClickHouse configuration files common for all instances within CHI are located. chCommonConfigsPath: config.d @@ -28,11 +28,11 @@ chUsersConfigsPath: users.d # Manifests are applied in sorted alpha-numeric order chiTemplatesPath: templates.d -########################################### +################################################ ## ## Cluster Create/Update/Delete Objects Section ## -########################################### +################################################ # How many seconds to wait for created/updated StatefulSet to be Ready statefulSetUpdateTimeout: 50 @@ -52,4 +52,21 @@ onStatefulSetCreateFailureAction: delete # 2. rollback - delete Pod and rollback StatefulSet to previous Generation. # Pod would be recreated by StatefulSet based on rollback-ed configuration onStatefulSetUpdateFailureAction: rollback + +################################################ +## +## ClickHouse Settings Section +## +################################################ + +# Default values for ClickHouse user configuration +# 1. user/profile - string +# 2. user/quota - string +# 3. user/networks/ip - multiple strings +# 4. user/password - string +chConfigUserDefaultProfile: default +chConfigUserDefaultQuota: default +chConfigUserDefaultNetworksIP: + - "::/0" +chConfigUserDefaultPassword: "default" ``` diff --git a/pkg/config/types.go b/pkg/config/types.go index e3bc44c85..4a0d8b1c7 100644 --- a/pkg/config/types.go +++ b/pkg/config/types.go @@ -58,10 +58,10 @@ type Config struct { OnStatefulSetUpdateFailureAction string `yaml:"onStatefulSetUpdateFailureAction"` // Default values for ClickHouse user configuration - // 1. user/profile - // 2. user/quota - // 3. user/networks/ip - // 4. user/password + // 1. user/profile - string + // 2. user/quota - string + // 3. user/networks/ip - multiple strings + // 4. user/password - string ChConfigUserDefaultProfile string `yaml:"chConfigUserDefaultProfile"` ChConfigUserDefaultQuota string `yaml:"chConfigUserDefaultQuota"` ChConfigUserDefaultNetworksIP []string `yaml:"chConfigUserDefaultNetworksIP"` From 62bff8409bcaf58eb0ef62ddf58769707a40e39e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 14 May 2019 00:05:05 +0300 Subject: [PATCH 27/32] env: Docker file --- Dockerfile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1ecb4ac2f..3d607aba3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,6 @@ -FROM golang:1.11.5 as builder +# === Builder === + +FROM golang:1.11.5 AS builder WORKDIR $GOPATH/src/github.com/altinity/clickhouse-operator @@ -11,7 +13,9 @@ ADD pkg pkg ADD cmd cmd RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /tmp/clickhouse-operator ./cmd/clickhouse-operator -FROM alpine:3.8 +# === Runner === + +FROM alpine:3.8 AS runner RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/* WORKDIR / COPY --from=builder /tmp/clickhouse-operator . From 0ffeee94d445ea6e4a541e451e139fc7e3ebdd67 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 14 May 2019 00:05:54 +0300 Subject: [PATCH 28/32] dev: increase defaultStatefulSetUpdateTimeout --- pkg/config/chop_config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/config/chop_config.go b/pkg/config/chop_config.go index cc06f6979..ee2ef5b50 100644 --- a/pkg/config/chop_config.go +++ b/pkg/config/chop_config.go @@ -29,7 +29,7 @@ import ( const ( // Default values for update timeout and polling period in seconds - defaultStatefulSetUpdateTimeout = 120 + defaultStatefulSetUpdateTimeout = 300 defaultStatefulSetUpdatePollPeriod = 15 // Default values for ClickHouse user configuration From 1b116f6e062e426371902aa31ffc457f6ee905b0 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 14 May 2019 14:44:07 +0300 Subject: [PATCH 29/32] docs & examples: Zookeeper docs & examples - 1&3 node persistent storage --- docs/zookeeper_setup.md | 77 ++++++--- .../zookeeper-1-node-create.sh | 0 .../zookeeper-1-node-delete.sh | 0 .../zookeeper-1-node.yaml | 2 +- .../zookeeper-3-nodes-create.sh | 0 .../zookeeper-3-nodes-delete.sh | 0 .../zookeeper-3-nodes.yaml | 2 +- .../zookeeper-3-nodes-create.sh | 9 ++ .../zookeeper-3-nodes-delete.sh | 7 + .../zookeeper-3-nodes.yaml | 151 ++++++++++++++++++ 10 files changed, 221 insertions(+), 27 deletions(-) rename manifests/zookeeper/{quick-start-stateless => quick-start-local-emptyDir}/zookeeper-1-node-create.sh (100%) rename manifests/zookeeper/{quick-start-stateless => quick-start-local-emptyDir}/zookeeper-1-node-delete.sh (100%) rename manifests/zookeeper/{quick-start-stateless => quick-start-local-emptyDir}/zookeeper-1-node.yaml (98%) rename manifests/zookeeper/{quick-start-stateless => quick-start-local-emptyDir}/zookeeper-3-nodes-create.sh (100%) rename manifests/zookeeper/{quick-start-stateless => quick-start-local-emptyDir}/zookeeper-3-nodes-delete.sh (100%) rename manifests/zookeeper/{quick-start-stateless => quick-start-local-emptyDir}/zookeeper-3-nodes.yaml (98%) create mode 100755 manifests/zookeeper/quick-start-persistent-volume/zookeeper-3-nodes-create.sh create mode 100755 manifests/zookeeper/quick-start-persistent-volume/zookeeper-3-nodes-delete.sh create mode 100644 manifests/zookeeper/quick-start-persistent-volume/zookeeper-3-nodes.yaml diff --git a/docs/zookeeper_setup.md b/docs/zookeeper_setup.md index 676a0bd3e..5cca07270 100644 --- a/docs/zookeeper_setup.md +++ b/docs/zookeeper_setup.md @@ -5,26 +5,36 @@ This document assumes k8s cluster already setup and `kubectl` has access to it. Zookeeper installation is available in two options: 1. [Quick start](#quick-start) - just run it quickly and ask no questions -1. [Advanced setup](#advanced-setup) - configure storage class, replicas number, etc +1. [Advanced setup](#advanced-setup) - setup internal details, such as storage class, replicas number, etc -What steps are performed during ZooKeeper installation: +During ZooKeeper installation the following items are created/configured: 1. [OPTIONAL] Create separate namespace to run Zookeeper in -1. Create k8s resoirces: - * [Service](https://kubernetes.io/docs/concepts/services-networking/service/) - used to provide central access point to Zookeeper - * [Headless Service](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services) - used to provide DNS namings - * [Disruption Balance](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) - used to specify max number of offline pods - * [OPTIONAL] [Storage Class](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) - used to specify storage class to be used by Zookeeper for data storage - * [Stateful Set](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) - used to manage and scale sets of pods - +1. Create k8s resources (optionally, within namespace): + * [Service][k8sdoc_service_main] - used to provide central access point to Zookeeper + * [Headless Service][k8sdoc_service_headless] - used to provide DNS namings + * [Disruption Balance][k8sdoc_disruption_balance] - used to specify max number of offline pods + * [OPTIONAL] [Storage Class][k8sdoc_storage_class] - used to specify storage class to be used by Zookeeper for data storage + * [Stateful Set][k8sdoc_statefulset] - used to manage and scale sets of pods ## Quick start -Quick start files are are located in [manifests/zookeeper/quick-start](../manifests/zookeeper/quick-start) folder. -Quick start provides the following installation options: -1. 1-node Zookeeper cluster (**zookeeper-1-** files) in [quick-start](../manifests/zookeeper/quick-start) folder -1. 3-node Zookeeper cluster (**zookeeper-3-** files) in [quick-start](../manifests/zookeeper/quick-start) folder -In this example we'll go with simple 1-node Zookeeper cluster. -[create](../manifests/zookeeper/quick-start/zookeeper-1-node-create.sh) and [delete](../manifests/zookeeper/quick-start/zookeeper-1-node-delete.sh) -shell scripts are available for simplification. In case you'd like to deploy Zookeeper manually, the following steps should be performed: +Quick start is represented in two flavors: +1. With persistent volume - good for AWS. File are located in [manifests/zookeeper/quick-start-persistent-volume][quickstart_persistent] +1. With local [`emptyDir`][k8sdoc_emptydir] storage - good for standalone local run, however has to true persistence. +Files are located in [manifests/zookeeper/quick-start-local-emptyDir][quickstart_emptydir] +Each quick start flavor provides the following installation options: +1. 1-node Zookeeper cluster (**zookeeper-1-** files). No failover provided. +1. 3-node Zookeeper cluster (**zookeeper-3-** files). Failover provided. +In case you'd like to test with AWS or any other cloud provider, we recommend to go with [manifests/zookeeper/quick-start-persistent-volume][quickstart_persistent] persistent storage. + +In case of local test, you'd may prefer to go with [manifests/zookeeper/quick-start-local-emptyDir][quickstart_emptydir] `emptyDir`. + +### Script-based Installation +In this example we'll go with simple 1-node Zookeeper cluster on AWS and pick [manifests/zookeeper/quick-start-persistent-volume][quickstart_persistent]. +Both [create](../manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node-create.sh) and [delete](../manifests/zookeeper/quick-start-persistent-volume/zookeeper-1-node-delete.sh) +shell scripts are available for simplification. + +### Manual Installation +In case you'd like to deploy Zookeeper manually, the following steps should be performed: ### Namespace Create **namespace** @@ -40,14 +50,14 @@ kubectl apply -f zookeeper-1-node.yaml -n zoo1ns Now Zookeeper should be up and running. Let's [explore Zookeeper cluster](#explore-zookeeper-cluster). -**IMPORTANT** quick-start zookeeper installation are for test purposes only and are stateless - do not have **PersistentVolume**. -For persistent storage configuration for Zookeeper we need to apply [advanced setup](#advanced-setup) +**IMPORTANT** quick-start zookeeper installation are for test purposes mainly. +For fine-tuned Zookeeper setup please refer to [advanced setup](#advanced-setup) options. ## Advanced setup Advanced files are are located in [manifests/zookeeper/advanced](../manifests/zookeeper/advanced) folder. All resources are separated into different files so it is easy to modify them and setup required options. -[create](../manifests/zookeeper/advanced/create-zookeeper.sh) and [delete](../manifests/zookeeper/advanced/delete-zookeeper.sh) +Both [create](../manifests/zookeeper/advanced/create-zookeeper.sh) and [delete](../manifests/zookeeper/advanced/delete-zookeeper.sh) shell scripts are available. Step-by-step explanations: @@ -91,20 +101,21 @@ poddisruptionbudget.policy/zookeeper-pod-distribution-budget created ### Storage Class This part is not that straightforward and may require communication with k8s instance administrator. -First of all, we need to decide, whether Zookeeper would use [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) as a storage or just stick to more simple [Volume](https://kubernetes.io/docs/concepts/storage/volumes) (In doc [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) type is used) +First of all, we need to decide, whether Zookeeper would use [Persistent Volume][k8sdoc_persistent_volume] as a storage or just stick to more simple [Volume][k8sdoc_volume] (In doc [emptyDir][k8sdoc_emptydir] type is used) -In case we'd prefer to stick with simpler solution and go with [Volume of type emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir), we are done here and need to adjust [StatefulSet config](../manifests/zookeeper/05-stateful-set.yaml) as described in next [Stateful Set unit](#stateful-set). Just move to [it](#stateful-set). +In case we'd prefer to stick with simpler solution and go with [Volume of type emptyDir][k8sdoc_emptydir], we are done here and need to adjust [StatefulSet config](../manifests/zookeeper/05-stateful-set.yaml) as described in next [Stateful Set unit](#stateful-set). Just move to [it](#stateful-set). -In case we'd prefer to go with [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) storage, some additional steps have to be done. +In case we'd prefer to go with [Persistent Volume][k8sdoc_persistent_volume] storage, some additional steps have to be done. -Shortly, [Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/) is used to bind together [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/), -which are created either by k8s admin manually or automatically by [Provisioner](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/). In any case, Persistent Volumes are provided externally to an application to be deployed into k8s. So, this application has to know **Storage Class Name** to ask for from the k8s in application's claim for new persistent volume - [Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims). +Shortly, [Storage Class][k8sdoc_storage_class] is used to bind together [Persistent Volumes][k8sdoc_persistent_volume], +which are created either by k8s admin manually or automatically by [Provisioner][k8sdocs_dynamic_provisioning]. In any case, Persistent Volumes are provided externally to an application to be deployed into k8s. +So, this application has to know **Storage Class Name** to ask for from the k8s in application's claim for new persistent volume - [Persistent Volume Claim][k8sdoc_persistent_volume_claim]. This **Storage Class Name** should be asked from k8s admin and written as application's **Persistent Volume Claim** `.spec.volumeClaimTemplates.storageClassName` parameter in [05-stateful-set.yaml](../manifests/zookeeper/advanced/05-stateful-set.yaml). ### Stateful Set Edit [05-stateful-set.yaml](../manifests/zookeeper/advanced/05-stateful-set.yaml) according to your Storage Preferences. -In case we'd go with [Volume of type emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir), ensure `.spec.template.spec.containers.volumes` is in place and look like the following: +In case we'd go with [Volume of type emptyDir][k8sdoc_emptydir], ensure `.spec.template.spec.containers.volumes` is in place and look like the following: ```yaml volumes: - name: datadir-volume @@ -206,3 +217,19 @@ zookeepers 3/3 10m ``` In case all looks fine Zookeeper cluster is up and running + + + +[k8sdoc_service_main]: https://kubernetes.io/docs/concepts/services-networking/service/ +[k8sdoc_service_headless]: https://kubernetes.io/docs/concepts/services-networking/service/#headless-services +[k8sdoc_disruption_balance]: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +[k8sdoc_storage_class]: https://kubernetes.io/docs/concepts/storage/storage-classes/ +[k8sdoc_statefulset]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ +[k8sdoc_volume]: https://kubernetes.io/docs/concepts/storage/volumes +[k8sdoc_emptydir]: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir +[k8sdoc_persistent_volume]: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ +[k8sdoc_persistent_volume_claim]: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims +[k8sdocs_dynamic_provisioning]: https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/ + +[quickstart_persistent]: ../manifests/zookeeper/quick-start-persistent-volume +[quickstart_emptydir]: ../manifests/zookeeper/quick-start-local-emptyDir diff --git a/manifests/zookeeper/quick-start-stateless/zookeeper-1-node-create.sh b/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-1-node-create.sh similarity index 100% rename from manifests/zookeeper/quick-start-stateless/zookeeper-1-node-create.sh rename to manifests/zookeeper/quick-start-local-emptyDir/zookeeper-1-node-create.sh diff --git a/manifests/zookeeper/quick-start-stateless/zookeeper-1-node-delete.sh b/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-1-node-delete.sh similarity index 100% rename from manifests/zookeeper/quick-start-stateless/zookeeper-1-node-delete.sh rename to manifests/zookeeper/quick-start-local-emptyDir/zookeeper-1-node-delete.sh diff --git a/manifests/zookeeper/quick-start-stateless/zookeeper-1-node.yaml b/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-1-node.yaml similarity index 98% rename from manifests/zookeeper/quick-start-stateless/zookeeper-1-node.yaml rename to manifests/zookeeper/quick-start-local-emptyDir/zookeeper-1-node.yaml index d8f15cbc4..0344a994b 100644 --- a/manifests/zookeeper/quick-start-stateless/zookeeper-1-node.yaml +++ b/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-1-node.yaml @@ -37,7 +37,7 @@ spec: apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: - name: zookeeper-pod-distribution-budget + name: zookeeper-pod-disruption-budget spec: selector: matchLabels: diff --git a/manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes-create.sh b/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-3-nodes-create.sh similarity index 100% rename from manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes-create.sh rename to manifests/zookeeper/quick-start-local-emptyDir/zookeeper-3-nodes-create.sh diff --git a/manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes-delete.sh b/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-3-nodes-delete.sh similarity index 100% rename from manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes-delete.sh rename to manifests/zookeeper/quick-start-local-emptyDir/zookeeper-3-nodes-delete.sh diff --git a/manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes.yaml b/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-3-nodes.yaml similarity index 98% rename from manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes.yaml rename to manifests/zookeeper/quick-start-local-emptyDir/zookeeper-3-nodes.yaml index a98f39d75..d9f371ad5 100644 --- a/manifests/zookeeper/quick-start-stateless/zookeeper-3-nodes.yaml +++ b/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-3-nodes.yaml @@ -37,7 +37,7 @@ spec: apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: - name: zookeeper-pod-distribution-budget + name: zookeeper-pod-disruption-budget spec: selector: matchLabels: diff --git a/manifests/zookeeper/quick-start-persistent-volume/zookeeper-3-nodes-create.sh b/manifests/zookeeper/quick-start-persistent-volume/zookeeper-3-nodes-create.sh new file mode 100755 index 000000000..846af9bb0 --- /dev/null +++ b/manifests/zookeeper/quick-start-persistent-volume/zookeeper-3-nodes-create.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +ZK_NAMESPACE="${ZK_NAMESPACE:-zoo3ns}" + +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +kubectl create namespace "${ZK_NAMESPACE}" +kubectl --namespace="${ZK_NAMESPACE}" apply -f "${CUR_DIR}/zookeeper-3-nodes.yaml" + diff --git a/manifests/zookeeper/quick-start-persistent-volume/zookeeper-3-nodes-delete.sh b/manifests/zookeeper/quick-start-persistent-volume/zookeeper-3-nodes-delete.sh new file mode 100755 index 000000000..56124e380 --- /dev/null +++ b/manifests/zookeeper/quick-start-persistent-volume/zookeeper-3-nodes-delete.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +ZK_NAMESPACE="${ZK_NAMESPACE:-zoo3ns}" + +echo "Delete Zookeeper namespace ${ZK_NAMESPACE}" + +kubectl delete namespace "${ZK_NAMESPACE}" diff --git a/manifests/zookeeper/quick-start-persistent-volume/zookeeper-3-nodes.yaml b/manifests/zookeeper/quick-start-persistent-volume/zookeeper-3-nodes.yaml new file mode 100644 index 000000000..42b0c1e2d --- /dev/null +++ b/manifests/zookeeper/quick-start-persistent-volume/zookeeper-3-nodes.yaml @@ -0,0 +1,151 @@ +# Setup Service to provide access to Zookeeper for clients +apiVersion: v1 +kind: Service +metadata: + # DNS would be like zookeeper.zoons + name: zookeeper + labels: + app: zookeeper +spec: + ports: + - port: 2181 + name: client + selector: + app: zookeeper + what: node +--- +# Setup Headless Service for StatefulSet +apiVersion: v1 +kind: Service +metadata: + # DNS would be like zookeeper-0.zookeepers.etc + name: zookeepers + labels: + app: zookeeper +spec: + ports: + - port: 2888 + name: server + - port: 3888 + name: leader-election + clusterIP: None + selector: + app: zookeeper + what: node +--- +# Setup max number of unavailable pods in StatefulSet +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: zookeeper-pod-disruption-budget +spec: + selector: + matchLabels: + app: zookeeper + maxUnavailable: 1 +--- +# Setup Zookeeper StatefulSet +# Possible params: +# 1. replicas +# 2. memory +# 3. cpu +# 4. storage +# 5. user to run app +apiVersion: apps/v1 +kind: StatefulSet +metadata: + # nodes would be named as zookeeper-0, zookeeper-1, zookeeper-2 + name: zookeeper +spec: + selector: + matchLabels: + app: zookeeper + serviceName: zookeepers + replicas: 3 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: zookeeper + what: node + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zookeeper + topologyKey: "kubernetes.io/hostname" + containers: + - name: kubernetes-zookeeper + imagePullPolicy: Always + image: "k8s.gcr.io/kubernetes-zookeeper:1.0-3.4.10" + resources: + requests: + memory: "1Gi" + cpu: "0.5" + ports: + - containerPort: 2181 + name: client + - containerPort: 2888 + name: server + - containerPort: 3888 + name: leader-election + command: + - sh + - -c + - "start-zookeeper \ + --servers=3 \ + --data_dir=/var/lib/zookeeper/data \ + --data_log_dir=/var/lib/zookeeper/data/log \ + --conf_dir=/opt/zookeeper/conf \ + --client_port=2181 \ + --election_port=3888 \ + --server_port=2888 \ + --tick_time=2000 \ + --init_limit=10 \ + --sync_limit=5 \ + --heap=512M \ + --max_client_cnxns=60 \ + --snap_retain_count=3 \ + --purge_interval=12 \ + --max_session_timeout=40000 \ + --min_session_timeout=4000 \ + --log_level=INFO" + readinessProbe: + exec: + command: + - sh + - -c + - "zookeeper-ready 2181" + initialDelaySeconds: 10 + timeoutSeconds: 5 + livenessProbe: + exec: + command: + - sh + - -c + - "zookeeper-ready 2181" + initialDelaySeconds: 10 + timeoutSeconds: 5 + volumeMounts: + - name: datadir-volume + mountPath: /var/lib/zookeeper + # Run as a non-privileged user + securityContext: + runAsUser: 1000 + fsGroup: 1000 + volumeClaimTemplates: + - metadata: + name: datadir-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi From f66544f9e3604fe7919326c5fe964964e075c392 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 14 May 2019 15:05:19 +0300 Subject: [PATCH 30/32] docs: Zookeeper advanced setup --- docs/zookeeper_setup.md | 26 ++++++++++++------- .../zookeeper-1-node-create.sh | 0 .../zookeeper-1-node-delete.sh | 0 .../zookeeper-1-node.yaml | 0 .../zookeeper-3-nodes-create.sh | 0 .../zookeeper-3-nodes-delete.sh | 0 .../zookeeper-3-nodes.yaml | 0 7 files changed, 16 insertions(+), 10 deletions(-) rename manifests/zookeeper/{quick-start-local-emptyDir => quick-start-volume-emptyDir}/zookeeper-1-node-create.sh (100%) rename manifests/zookeeper/{quick-start-local-emptyDir => quick-start-volume-emptyDir}/zookeeper-1-node-delete.sh (100%) rename manifests/zookeeper/{quick-start-local-emptyDir => quick-start-volume-emptyDir}/zookeeper-1-node.yaml (100%) rename manifests/zookeeper/{quick-start-local-emptyDir => quick-start-volume-emptyDir}/zookeeper-3-nodes-create.sh (100%) rename manifests/zookeeper/{quick-start-local-emptyDir => quick-start-volume-emptyDir}/zookeeper-3-nodes-delete.sh (100%) rename manifests/zookeeper/{quick-start-local-emptyDir => quick-start-volume-emptyDir}/zookeeper-3-nodes.yaml (100%) diff --git a/docs/zookeeper_setup.md b/docs/zookeeper_setup.md index 5cca07270..59c25a1b3 100644 --- a/docs/zookeeper_setup.md +++ b/docs/zookeeper_setup.md @@ -19,14 +19,15 @@ During ZooKeeper installation the following items are created/configured: ## Quick start Quick start is represented in two flavors: 1. With persistent volume - good for AWS. File are located in [manifests/zookeeper/quick-start-persistent-volume][quickstart_persistent] -1. With local [`emptyDir`][k8sdoc_emptydir] storage - good for standalone local run, however has to true persistence. -Files are located in [manifests/zookeeper/quick-start-local-emptyDir][quickstart_emptydir] +1. With local [`emptyDir`][k8sdoc_emptydir] storage - good for standalone local run, however has to true persistence. \ +Files are located in [manifests/zookeeper/quick-start-volume-emptyDir][quickstart_emptydir] + Each quick start flavor provides the following installation options: 1. 1-node Zookeeper cluster (**zookeeper-1-** files). No failover provided. 1. 3-node Zookeeper cluster (**zookeeper-3-** files). Failover provided. -In case you'd like to test with AWS or any other cloud provider, we recommend to go with [manifests/zookeeper/quick-start-persistent-volume][quickstart_persistent] persistent storage. -In case of local test, you'd may prefer to go with [manifests/zookeeper/quick-start-local-emptyDir][quickstart_emptydir] `emptyDir`. +In case you'd like to test with AWS or any other cloud provider, we recommend to go with [manifests/zookeeper/quick-start-persistent-volume][quickstart_persistent] persistent storage. +In case of local test, you'd may prefer to go with [manifests/zookeeper/quick-start-volume-emptyDir][quickstart_emptydir] `emptyDir`. ### Script-based Installation In this example we'll go with simple 1-node Zookeeper cluster on AWS and pick [manifests/zookeeper/quick-start-persistent-volume][quickstart_persistent]. @@ -57,8 +58,13 @@ For fine-tuned Zookeeper setup please refer to [advanced setup](#advanced-setup) Advanced files are are located in [manifests/zookeeper/advanced](../manifests/zookeeper/advanced) folder. All resources are separated into different files so it is easy to modify them and setup required options. -Both [create](../manifests/zookeeper/advanced/create-zookeeper.sh) and [delete](../manifests/zookeeper/advanced/delete-zookeeper.sh) -shell scripts are available. +Advanced setup is available in two options: +1. With [persistent volume][k8sdoc_persistent_volume] +1. With [emptyDir volume][k8sdoc_emptydir] + +Each of these options have both `create` and `delete` scripts provided +1. Persistent volume [create](../manifests/zookeeper/advanced/zookeeper-persistent-volume-create.sh) and [delete](../manifests/zookeeper/advanced/zookeeper-persistent-volume-delete.sh) scripts +1. EmptyDir volume [create](../manifests/zookeeper/advanced/zookeeper-volume-emptyDir-create.sh) and [delete](../manifests/zookeeper/advanced/zookeeper-volume-emptyDir-delete.sh) scripts Step-by-step explanations: @@ -103,17 +109,17 @@ This part is not that straightforward and may require communication with k8s ins First of all, we need to decide, whether Zookeeper would use [Persistent Volume][k8sdoc_persistent_volume] as a storage or just stick to more simple [Volume][k8sdoc_volume] (In doc [emptyDir][k8sdoc_emptydir] type is used) -In case we'd prefer to stick with simpler solution and go with [Volume of type emptyDir][k8sdoc_emptydir], we are done here and need to adjust [StatefulSet config](../manifests/zookeeper/05-stateful-set.yaml) as described in next [Stateful Set unit](#stateful-set). Just move to [it](#stateful-set). +In case we'd prefer to stick with simpler solution and go with [Volume of type emptyDir][k8sdoc_emptydir], we need to go with [emptyDir StatefulSet config](../manifests/zookeeper/advanced/05-stateful-set-volume-emptyDir.yaml) as described in next [Stateful Set unit](#stateful-set). Just move to [it](#stateful-set). -In case we'd prefer to go with [Persistent Volume][k8sdoc_persistent_volume] storage, some additional steps have to be done. +In case we'd prefer to go with [Persistent Volume][k8sdoc_persistent_volume] storage, we need to go with [persistent Volume StatefulSet config](../manifests/zookeeper/advanced/05-stateful-set-persistent-volume.yaml) Shortly, [Storage Class][k8sdoc_storage_class] is used to bind together [Persistent Volumes][k8sdoc_persistent_volume], which are created either by k8s admin manually or automatically by [Provisioner][k8sdocs_dynamic_provisioning]. In any case, Persistent Volumes are provided externally to an application to be deployed into k8s. So, this application has to know **Storage Class Name** to ask for from the k8s in application's claim for new persistent volume - [Persistent Volume Claim][k8sdoc_persistent_volume_claim]. -This **Storage Class Name** should be asked from k8s admin and written as application's **Persistent Volume Claim** `.spec.volumeClaimTemplates.storageClassName` parameter in [05-stateful-set.yaml](../manifests/zookeeper/advanced/05-stateful-set.yaml). +This **Storage Class Name** should be asked from k8s admin and written as application's **Persistent Volume Claim** `.spec.volumeClaimTemplates.storageClassName` parameter in `StatefulSet` configuration. [StatefulSet manifest with emptyDir](../manifests/zookeeper/advanced/05-stateful-set-volume-emptyDir.yaml) and/or [StatefulSet manifest with Persistent Volume](../manifests/zookeeper/advanced/05-stateful-set-persistent-volume.yaml). ### Stateful Set -Edit [05-stateful-set.yaml](../manifests/zookeeper/advanced/05-stateful-set.yaml) according to your Storage Preferences. +Edit [StatefulSet manifest with emptyDir](../manifests/zookeeper/advanced/05-stateful-set-volume-emptyDir.yaml) and/or [StatefulSet manifest with Persistent Volume](../manifests/zookeeper/advanced/05-stateful-set-persistent-volume.yaml). according to your Storage Preferences. In case we'd go with [Volume of type emptyDir][k8sdoc_emptydir], ensure `.spec.template.spec.containers.volumes` is in place and look like the following: ```yaml diff --git a/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-1-node-create.sh b/manifests/zookeeper/quick-start-volume-emptyDir/zookeeper-1-node-create.sh similarity index 100% rename from manifests/zookeeper/quick-start-local-emptyDir/zookeeper-1-node-create.sh rename to manifests/zookeeper/quick-start-volume-emptyDir/zookeeper-1-node-create.sh diff --git a/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-1-node-delete.sh b/manifests/zookeeper/quick-start-volume-emptyDir/zookeeper-1-node-delete.sh similarity index 100% rename from manifests/zookeeper/quick-start-local-emptyDir/zookeeper-1-node-delete.sh rename to manifests/zookeeper/quick-start-volume-emptyDir/zookeeper-1-node-delete.sh diff --git a/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-1-node.yaml b/manifests/zookeeper/quick-start-volume-emptyDir/zookeeper-1-node.yaml similarity index 100% rename from manifests/zookeeper/quick-start-local-emptyDir/zookeeper-1-node.yaml rename to manifests/zookeeper/quick-start-volume-emptyDir/zookeeper-1-node.yaml diff --git a/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-3-nodes-create.sh b/manifests/zookeeper/quick-start-volume-emptyDir/zookeeper-3-nodes-create.sh similarity index 100% rename from manifests/zookeeper/quick-start-local-emptyDir/zookeeper-3-nodes-create.sh rename to manifests/zookeeper/quick-start-volume-emptyDir/zookeeper-3-nodes-create.sh diff --git a/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-3-nodes-delete.sh b/manifests/zookeeper/quick-start-volume-emptyDir/zookeeper-3-nodes-delete.sh similarity index 100% rename from manifests/zookeeper/quick-start-local-emptyDir/zookeeper-3-nodes-delete.sh rename to manifests/zookeeper/quick-start-volume-emptyDir/zookeeper-3-nodes-delete.sh diff --git a/manifests/zookeeper/quick-start-local-emptyDir/zookeeper-3-nodes.yaml b/manifests/zookeeper/quick-start-volume-emptyDir/zookeeper-3-nodes.yaml similarity index 100% rename from manifests/zookeeper/quick-start-local-emptyDir/zookeeper-3-nodes.yaml rename to manifests/zookeeper/quick-start-volume-emptyDir/zookeeper-3-nodes.yaml From 6f5ecf8a87ee3a33859d43b5cdf603daebcec32f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 14 May 2019 15:12:01 +0300 Subject: [PATCH 31/32] docs: Zookeeper --- docs/zookeeper_setup.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/zookeeper_setup.md b/docs/zookeeper_setup.md index 59c25a1b3..970b0ead5 100644 --- a/docs/zookeeper_setup.md +++ b/docs/zookeeper_setup.md @@ -109,17 +109,17 @@ This part is not that straightforward and may require communication with k8s ins First of all, we need to decide, whether Zookeeper would use [Persistent Volume][k8sdoc_persistent_volume] as a storage or just stick to more simple [Volume][k8sdoc_volume] (In doc [emptyDir][k8sdoc_emptydir] type is used) -In case we'd prefer to stick with simpler solution and go with [Volume of type emptyDir][k8sdoc_emptydir], we need to go with [emptyDir StatefulSet config](../manifests/zookeeper/advanced/05-stateful-set-volume-emptyDir.yaml) as described in next [Stateful Set unit](#stateful-set). Just move to [it](#stateful-set). +In case we'd prefer to stick with simpler solution and go with [Volume of type emptyDir][k8sdoc_emptydir], we need to go with **emptyDir StatefulSet config** [05-stateful-set-volume-emptyDir.yaml](../manifests/zookeeper/advanced/05-stateful-set-volume-emptyDir.yaml) as described in next [Stateful Set unit](#stateful-set). Just move to [it](#stateful-set). -In case we'd prefer to go with [Persistent Volume][k8sdoc_persistent_volume] storage, we need to go with [persistent Volume StatefulSet config](../manifests/zookeeper/advanced/05-stateful-set-persistent-volume.yaml) +In case we'd prefer to go with [Persistent Volume][k8sdoc_persistent_volume] storage, we need to go with **Persistent Volume StatefulSet config** [05-stateful-set-persistent-volume.yaml](../manifests/zookeeper/advanced/05-stateful-set-persistent-volume.yaml) Shortly, [Storage Class][k8sdoc_storage_class] is used to bind together [Persistent Volumes][k8sdoc_persistent_volume], which are created either by k8s admin manually or automatically by [Provisioner][k8sdocs_dynamic_provisioning]. In any case, Persistent Volumes are provided externally to an application to be deployed into k8s. So, this application has to know **Storage Class Name** to ask for from the k8s in application's claim for new persistent volume - [Persistent Volume Claim][k8sdoc_persistent_volume_claim]. -This **Storage Class Name** should be asked from k8s admin and written as application's **Persistent Volume Claim** `.spec.volumeClaimTemplates.storageClassName` parameter in `StatefulSet` configuration. [StatefulSet manifest with emptyDir](../manifests/zookeeper/advanced/05-stateful-set-volume-emptyDir.yaml) and/or [StatefulSet manifest with Persistent Volume](../manifests/zookeeper/advanced/05-stateful-set-persistent-volume.yaml). +This **Storage Class Name** should be asked from k8s admin and written as application's **Persistent Volume Claim** `.spec.volumeClaimTemplates.storageClassName` parameter in `StatefulSet` configuration. **StatefulSet manifest with emptyDir** [05-stateful-set-volume-emptyDir.yaml](../manifests/zookeeper/advanced/05-stateful-set-volume-emptyDir.yaml) and/or **StatefulSet manifest with Persistent Volume** [05-stateful-set-persistent-volume.yaml](../manifests/zookeeper/advanced/05-stateful-set-persistent-volume.yaml). ### Stateful Set -Edit [StatefulSet manifest with emptyDir](../manifests/zookeeper/advanced/05-stateful-set-volume-emptyDir.yaml) and/or [StatefulSet manifest with Persistent Volume](../manifests/zookeeper/advanced/05-stateful-set-persistent-volume.yaml). according to your Storage Preferences. +Edit **StatefulSet manifest with emptyDir** [05-stateful-set-volume-emptyDir.yaml](../manifests/zookeeper/advanced/05-stateful-set-volume-emptyDir.yaml) and/or **StatefulSet manifest with Persistent Volume** [05-stateful-set-persistent-volume.yaml](../manifests/zookeeper/advanced/05-stateful-set-persistent-volume.yaml) according to your Storage Preferences. In case we'd go with [Volume of type emptyDir][k8sdoc_emptydir], ensure `.spec.template.spec.containers.volumes` is in place and look like the following: ```yaml From 36e540db938bd7071fe39159b2685e953471e6b2 Mon Sep 17 00:00:00 2001 From: alex-zaitsev Date: Wed, 15 May 2019 16:40:50 +0300 Subject: [PATCH 32/32] 0.2.3 --- release | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release b/release index ee1372d33..717903969 100644 --- a/release +++ b/release @@ -1 +1 @@ -0.2.2 +0.2.3