diff --git a/dev/binary_build.sh b/dev/binary_build.sh
index 7c2722f81..aea581607 100755
--- a/dev/binary_build.sh
+++ b/dev/binary_build.sh
@@ -5,19 +5,20 @@
# Source configuration
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-source ${CUR_DIR}/binary_build_config.sh
+source "${CUR_DIR}/binary_build_config.sh"
REPO="github.com/altinity/clickhouse-operator"
-VERSION=$(cd ${SRC_ROOT}; cat release)
-GIT_SHA=$(cd ${CUR_DIR}; git rev-parse --short HEAD)
+VERSION=$(cd "${SRC_ROOT}"; cat release)
+GIT_SHA=$(cd "${CUR_DIR}"; git rev-parse --short HEAD)
# Build clickhouse-operator install .yaml manifest
-${SRC_ROOT}/manifests/operator/build-clickhouse-operator-yaml.sh
+"${SRC_ROOT}/manifests/operator/build-clickhouse-operator-install-yaml.sh"
#CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ${CUR_DIR}/clickhouse-operator ${SRC_ROOT}/cmd/clickhouse-operator
CGO_ENABLED=0 go build \
+ -v -a \
-ldflags "-X ${REPO}/pkg/version.Version=${VERSION} -X ${REPO}/pkg/version.GitSHA=${GIT_SHA}" \
- -o ${OPERATOR_BIN} \
- ${SRC_ROOT}/cmd/manager/main.go
+ -o "${OPERATOR_BIN}" \
+ "${SRC_ROOT}/cmd/manager/main.go"
exit $?
diff --git a/dev/binary_build_config.sh b/dev/binary_build_config.sh
index 9a6af2863..f863a79da 100755
--- a/dev/binary_build_config.sh
+++ b/dev/binary_build_config.sh
@@ -3,8 +3,8 @@
# Build configuration options
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-SRC_ROOT=$(realpath ${CUR_DIR}/..)
+SRC_ROOT="$(realpath "${CUR_DIR}/..")"
# Operator binary name can be specified externally
# Default - put 'clickhouse-operator' into cur dir
-OPERATOR_BIN=${OPERATOR_BIN:-${CUR_DIR}/clickhouse-operator}
+OPERATOR_BIN="${OPERATOR_BIN:-${CUR_DIR}/clickhouse-operator}"
diff --git a/dev/binary_clean.sh b/dev/binary_clean.sh
index 308210aec..326c1a1d0 100755
--- a/dev/binary_clean.sh
+++ b/dev/binary_clean.sh
@@ -5,6 +5,6 @@
# Source configuration
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-source ${CUR_DIR}/binary_build_config.sh
+source "${CUR_DIR}/binary_build_config.sh"
-rm -f ${OPERATOR_BIN}
+rm -f "${OPERATOR_BIN}"
diff --git a/dev/find_unformatted_sources.sh b/dev/find_unformatted_sources.sh
index 6d5f53254..25edae0a8 100755
--- a/dev/find_unformatted_sources.sh
+++ b/dev/find_unformatted_sources.sh
@@ -11,16 +11,16 @@ set -o pipefail
# Source configuration
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-source ${CUR_DIR}/binary_build_config.sh
+source "${CUR_DIR}/binary_build_config.sh"
# Prepare list of all .go files in the project, but exclude all files from /vendor/ folder
-GO_FILES_LIST=$(find ${SRC_ROOT} -name \*.go -not -path "${SRC_ROOT}/vendor/*" -print)
+GO_FILES_LIST=$(find "${SRC_ROOT}" -name \*.go -not -path "${SRC_ROOT}/vendor/*" -print)
# Prepare unformatted files list
UNFORMATTED_FILES_LIST=$(gofmt -l ${GO_FILES_LIST})
if [[ ${UNFORMATTED_FILES_LIST} ]]; then
for FILE in ${UNFORMATTED_FILES_LIST}; do
- echo ${FILE}
+ echo "${FILE}"
done
exit 1
fi
diff --git a/dev/format_unformatted_sources.sh b/dev/format_unformatted_sources.sh
index 80dad0581..f356f2fa7 100755
--- a/dev/format_unformatted_sources.sh
+++ b/dev/format_unformatted_sources.sh
@@ -11,9 +11,9 @@ set -o pipefail
# Source configuration
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-source ${CUR_DIR}/binary_build_config.sh
+source "${CUR_DIR}/binary_build_config.sh"
# Iterate over list of unformatted files and format each of them
-${CUR_DIR}/find_unformatted_sources.sh | while read -r FILE; do
- go fmt ${FILE}
+"${CUR_DIR}/find_unformatted_sources.sh" | while read -r FILE; do
+ go fmt "${FILE}"
done
diff --git a/dev/image_build_altinity.sh b/dev/image_build_altinity.sh
index 7cc84e30e..fcf0aca96 100755
--- a/dev/image_build_altinity.sh
+++ b/dev/image_build_altinity.sh
@@ -4,7 +4,7 @@
# Source configuration
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-source ${CUR_DIR}/binary_build_config.sh
+source "${CUR_DIR}/binary_build_config.sh"
# Externally configurable build-dependent options
TAG="${TAG:-altinity/clickhouse-operator:dev}"
@@ -16,4 +16,4 @@ TAG="${TAG}" \
DOCKERHUB_LOGIN="${DOCKERHUB_LOGIN}" \
DOCKERHUB_PUBLISH="${DOCKERHUB_PUBLISH}" \
MINIKUBE="${MINIKUBE}" \
-${CUR_DIR}/image_build_universal.sh
+"${CUR_DIR}/image_build_universal.sh"
diff --git a/dev/image_build_dev.sh b/dev/image_build_dev.sh
index 41fa54f24..3c0894d11 100755
--- a/dev/image_build_dev.sh
+++ b/dev/image_build_dev.sh
@@ -4,7 +4,7 @@
# Source configuration
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-source ${CUR_DIR}/binary_build_config.sh
+source "${CUR_DIR}/binary_build_config.sh"
# Externally configurable build-dependent options
TAG="${TAG:-sunsingerus/clickhouse-operator:dev}"
@@ -16,4 +16,4 @@ TAG="${TAG}" \
DOCKERHUB_LOGIN="${DOCKERHUB_LOGIN}" \
DOCKERHUB_PUBLISH="${DOCKERHUB_PUBLISH}" \
MINIKUBE="${MINIKUBE}" \
-${CUR_DIR}/image_build_universal.sh
+"${CUR_DIR}/image_build_universal.sh"
diff --git a/dev/image_build_universal.sh b/dev/image_build_universal.sh
index 79a4b44bb..48c2cd861 100755
--- a/dev/image_build_universal.sh
+++ b/dev/image_build_universal.sh
@@ -10,12 +10,12 @@ MINIKUBE="${MINIKUBE:-no}"
# Source-dependent options
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-SRC_ROOT="$(realpath ${CUR_DIR}/..)"
+SRC_ROOT="$(realpath "${CUR_DIR}/..")"
DOCKERFILE_DIR="${SRC_ROOT}"
DOCKERFILE="${DOCKERFILE_DIR}/Dockerfile"
# Build clickhouse-operator install .yaml manifest
-${SRC_ROOT}/manifests/operator/build-clickhouse-operator-yaml.sh
+"${SRC_ROOT}/manifests/operator/build-clickhouse-operator-install-yaml.sh"
# Build image with Docker
if [[ "${MINIKUBE}" == "yes" ]]; then
diff --git a/dev/run_dev.sh b/dev/run_dev.sh
index 35f3c226a..8e76fb7df 100755
--- a/dev/run_dev.sh
+++ b/dev/run_dev.sh
@@ -5,16 +5,16 @@
# Source configuration
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-source ${CUR_DIR}/binary_build_config.sh
-LOG_DIR=${CUR_DIR}/log
+source "${CUR_DIR}/binary_build_config.sh"
+LOG_DIR="${CUR_DIR}/log"
echo -n "Building binary, please wait..."
-if ${CUR_DIR}/binary_build.sh; then
+if "${CUR_DIR}/binary_build.sh"; then
echo "successfully built clickhouse-operator. Starting"
- mkdir -p ${LOG_DIR}
- rm -f ${LOG_DIR}/clickhouse-operator.*.log.*
- ${OPERATOR_BIN} \
+ mkdir -p "${LOG_DIR}"
+ rm -f "${LOG_DIR}"/clickhouse-operator.*.log.*
+ "${OPERATOR_BIN}" \
-alsologtostderr=true \
-log_dir=log \
-v=1
@@ -27,12 +27,12 @@ if ${CUR_DIR}/binary_build.sh; then
# -stderrthreshold=FATAL Log events at or above this severity are logged to standard error as well as to files
# And clean binary after run. It'll be rebuilt next time
- ${CUR_DIR}/binary_clean.sh
+ "${CUR_DIR}/binary_clean.sh"
echo "======================"
echo "=== Logs available ==="
echo "======================"
- ls ${LOG_DIR}/*
+ ls "${LOG_DIR}"/*
else
echo "unable to build clickhouse-operator"
fi
diff --git a/dev/update-codegen.sh b/dev/update-codegen.sh
index 55294405c..4e1232082 100755
--- a/dev/update-codegen.sh
+++ b/dev/update-codegen.sh
@@ -7,10 +7,10 @@ set -o nounset
# Only exit with zero if all commands of the pipeline exit successfully
set -o pipefail
-PROJECT_ROOT=$(dirname ${BASH_SOURCE})/..
-CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${PROJECT_ROOT}; ls -d -1 ${PROJECT_ROOT}/vendor/k8s.io/code-generator 2>/dev/null || echo ${GOPATH}/src/k8s.io/code-generator)}
+PROJECT_ROOT="$(dirname "${BASH_SOURCE}")/.."
+CODEGEN_PKG="${CODEGEN_PKG:-$(cd "${PROJECT_ROOT}"; ls -d -1 "${PROJECT_ROOT}/vendor/k8s.io/code-generator" 2>/dev/null || echo "${GOPATH}/src/k8s.io/code-generator")}"
-${PROJECT_ROOT}/vendor/k8s.io/code-generator/generate-groups.sh all \
+"${PROJECT_ROOT}/vendor/k8s.io/code-generator/generate-groups.sh" all \
github.com/altinity/clickhouse-operator/pkg/client \
github.com/altinity/clickhouse-operator/pkg/apis \
"clickhouse.altinity.com:v1"
diff --git a/docs/examples/02-simple-layout-01-1shard-1repl-simple-persistent-volume.yaml b/docs/examples/02-simple-layout-01-1shard-1repl-simple-persistent-volume.yaml
index b275b8784..613258c96 100644
--- a/docs/examples/02-simple-layout-01-1shard-1repl-simple-persistent-volume.yaml
+++ b/docs/examples/02-simple-layout-01-1shard-1repl-simple-persistent-volume.yaml
@@ -15,6 +15,7 @@ spec:
templates:
volumeClaimTemplates:
- name: volumeclaim-template
+# reclaimPolicy: Retain
spec:
accessModes:
- ReadWriteOnce
diff --git a/docs/examples/10-zones-01-simple-01-aws-pods-in-availability-zones.yaml b/docs/examples/10-zones-01-simple-01-aws-pods-in-availability-zones.yaml
new file mode 100644
index 000000000..75215ece7
--- /dev/null
+++ b/docs/examples/10-zones-01-simple-01-aws-pods-in-availability-zones.yaml
@@ -0,0 +1,75 @@
+#
+# AWS-specific labels, applicable in 'nodeAffinity' statements
+#
+# beta.kubernetes.io/arch=amd64
+# beta.kubernetes.io/instance-type=t2.medium
+# beta.kubernetes.io/os=linux
+#
+# failure-domain.beta.kubernetes.io/region=us-east-1
+# failure-domain.beta.kubernetes.io/zone=us-east-1a
+#
+# kubernetes.io/hostname=ip-172-20-37-97.ec2.internal
+# kubernetes.io/role=node
+# node-role.kubernetes.io/node=
+#
+# kops.k8s.io/instancegroup=nodes2
+#
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+metadata:
+ name: "aws-zones"
+spec:
+ configuration:
+ clusters:
+ - name: zoned-cluster
+ layout:
+ shards:
+ - replicas:
+ - templates:
+ podTemplate: clickhouse-in-zone-us-east-1a
+ - templates:
+ podTemplate: clickhouse-in-zone-us-east-1a
+ - templates:
+ podTemplate: clickhouse-in-zone-us-east-1a
+ - templates:
+ podTemplate: clickhouse-in-zone-us-east-1b
+ - templates:
+ podTemplate: clickhouse-in-zone-us-east-1b
+ - templates:
+ podTemplate: clickhouse-in-zone-us-east-1b
+
+ templates:
+ podTemplates:
+ # Specify Pod Templates with affinity
+
+ - name: clickhouse-in-zone-us-east-1a
+ zone:
+ values:
+ - "us-east-1a"
+ spec:
+ containers:
+ - name: clickhouse-pod
+ image: yandex/clickhouse-server:19.3.7
+ ports:
+ - name: http
+ containerPort: 8123
+ - name: client
+ containerPort: 9000
+ - name: interserver
+ containerPort: 9009
+
+ - name: clickhouse-in-zone-us-east-1b
+ zone:
+ values:
+ - "us-east-1b"
+ spec:
+ containers:
+ - name: clickhouse-pod
+ image: yandex/clickhouse-server:19.3.7
+ ports:
+ - name: http
+ containerPort: 8123
+ - name: client
+ containerPort: 9000
+ - name: interserver
+ containerPort: 9009
diff --git a/docs/examples/10-zones-01-simple-02-aws-pod-per-host.yaml b/docs/examples/10-zones-01-simple-02-aws-pod-per-host.yaml
new file mode 100644
index 000000000..c91f45389
--- /dev/null
+++ b/docs/examples/10-zones-01-simple-02-aws-pod-per-host.yaml
@@ -0,0 +1,49 @@
+#
+# AWS-specific labels, applicable in 'nodeAffinity' statements
+#
+# beta.kubernetes.io/arch=amd64
+# beta.kubernetes.io/instance-type=t2.medium
+# beta.kubernetes.io/os=linux
+#
+# failure-domain.beta.kubernetes.io/region=us-east-1
+# failure-domain.beta.kubernetes.io/zone=us-east-1a
+#
+# kubernetes.io/hostname=ip-172-20-37-97.ec2.internal
+# kubernetes.io/role=node
+# node-role.kubernetes.io/node=
+#
+# kops.k8s.io/instancegroup=nodes2
+#
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+metadata:
+ name: "zones-pod-host"
+spec:
+ defaults:
+ templates:
+ podTemplate: clickhouse-per-host-in-zone-us-east-1a
+ configuration:
+ clusters:
+ - name: zoned-cluster
+ layout:
+ shardsCount: 3
+
+ templates:
+ podTemplates:
+ # Specify Pod Templates with affinity
+ - name: clickhouse-per-host-in-zone-us-east-1a
+ zone:
+ values:
+ - "us-east-1a"
+ distribution: "OnePerHost"
+ spec:
+ containers:
+ - name: clickhouse-pod
+ image: yandex/clickhouse-server:19.3.7
+ ports:
+ - name: http
+ containerPort: 8123
+ - name: client
+ containerPort: 9000
+ - name: interserver
+ containerPort: 9009
diff --git a/docs/examples/10-zones-aws-01-pods-in-availability-zones.yaml b/docs/examples/10-zones-02-advanced-01-aws-pods-in-availability-zones.yaml
similarity index 100%
rename from docs/examples/10-zones-aws-01-pods-in-availability-zones.yaml
rename to docs/examples/10-zones-02-advanced-01-aws-pods-in-availability-zones.yaml
diff --git a/docs/examples/10-zones-aws-02-pod-per-host.yaml b/docs/examples/10-zones-02-advanced-02-aws-pod-per-host.yaml
similarity index 99%
rename from docs/examples/10-zones-aws-02-pod-per-host.yaml
rename to docs/examples/10-zones-02-advanced-02-aws-pod-per-host.yaml
index 3d6d94b2c..dd84ba182 100644
--- a/docs/examples/10-zones-aws-02-pod-per-host.yaml
+++ b/docs/examples/10-zones-02-advanced-02-aws-pod-per-host.yaml
@@ -31,7 +31,6 @@ spec:
templates:
podTemplates:
# Specify Pod Templates with affinity
-
- name: clickhouse-per-host-in-zone-us-east-1a
spec:
affinity:
diff --git a/docs/examples/10-zones-03-advanced-03-pod-per-host-default-storage-class.yaml b/docs/examples/10-zones-03-advanced-03-pod-per-host-default-storage-class.yaml
new file mode 100644
index 000000000..3a6e34761
--- /dev/null
+++ b/docs/examples/10-zones-03-advanced-03-pod-per-host-default-storage-class.yaml
@@ -0,0 +1,73 @@
+#
+# AWS-specific labels, applicable in 'nodeAffinity' statements
+#
+# beta.kubernetes.io/arch=amd64
+# beta.kubernetes.io/instance-type=t2.medium
+# beta.kubernetes.io/os=linux
+#
+# failure-domain.beta.kubernetes.io/region=us-east-1
+# failure-domain.beta.kubernetes.io/zone=us-east-1a
+#
+# kubernetes.io/hostname=ip-172-20-37-97.ec2.internal
+# kubernetes.io/role=node
+# node-role.kubernetes.io/node=
+#
+# kops.k8s.io/instancegroup=nodes2
+#
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+metadata:
+ name: "ch-per-host-pvc"
+spec:
+ defaults:
+ templates:
+ podTemplate: clickhouse-per-host
+ volumeClaimTemplate: storage-vc-template
+ configuration:
+ templates:
+ serviceTemplate: ch-service
+ clusters:
+ - name: zoned
+ layout:
+ shardsCount: 2
+ templates:
+ serviceTemplates:
+ - name: ch-service
+ generateName: chendpoint
+ spec:
+ ports:
+ - name: http
+ port: 8123
+ - name: client
+ port: 9000
+ type: LoadBalancer
+ podTemplates:
+ # Specify Pod Templates with affinity
+ - name: clickhouse-per-host
+ zone:
+ key: "clickhouse"
+ values:
+ - "allow"
+ distribution: "OnePerHost"
+ spec:
+ containers:
+ - name: clickhouse-pod
+ image: yandex/clickhouse-server:19.3.7
+ ports:
+ - name: http
+ containerPort: 8123
+ - name: client
+ containerPort: 9000
+ - name: interserver
+ containerPort: 9009
+ volumeMounts:
+ - name: storage-vc-template
+ mountPath: /var/lib/clickhouse
+ volumeClaimTemplates:
+ - name: storage-vc-template
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 10Gi
diff --git a/docs/examples/10-zones-04-advanced-04-pod-per-host-local-storage.yaml b/docs/examples/10-zones-04-advanced-04-pod-per-host-local-storage.yaml
new file mode 100644
index 000000000..cf42af7f1
--- /dev/null
+++ b/docs/examples/10-zones-04-advanced-04-pod-per-host-local-storage.yaml
@@ -0,0 +1,70 @@
+#
+# AWS-specific labels, applicable in 'nodeAffinity' statements
+#
+# beta.kubernetes.io/arch=amd64
+# beta.kubernetes.io/instance-type=t2.medium
+# beta.kubernetes.io/os=linux
+#
+# failure-domain.beta.kubernetes.io/region=us-east-1
+# failure-domain.beta.kubernetes.io/zone=us-east-1a
+#
+# kubernetes.io/hostname=ip-172-20-37-97.ec2.internal
+# kubernetes.io/role=node
+# node-role.kubernetes.io/node=
+#
+# kops.k8s.io/instancegroup=nodes2
+#
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+metadata:
+ name: "ch-per-host-localstorage"
+spec:
+ defaults:
+ templates:
+ podTemplate: clickhouse-per-host-localstorage
+ configuration:
+ templates:
+ serviceTemplate: ch-service
+ clusters:
+ - name: ch-localstorage
+ layout:
+ shardsCount: 2
+ templates:
+ serviceTemplates:
+ - name: ch-service
+ generateName: chendpoint
+ spec:
+ ports:
+ - name: http
+ port: 8123
+ - name: client
+ port: 9000
+ type: LoadBalancer
+ podTemplates:
+ # Specify Pod Templates with affinity
+ - name: clickhouse-per-host-localstorage
+ zone:
+ key: "clickhouse"
+ values:
+ - "allow"
+ distribution: "OnePerHost"
+ spec:
+ volumes:
+ # Specify volume as path on local filesystem as a directory which will be created, if need be
+ - name: local-path
+ hostPath:
+ path: /mnt/data/clickhouse-test
+ type: DirectoryOrCreate
+ containers:
+ - name: clickhouse-pod
+ image: yandex/clickhouse-server:19.3.7
+ ports:
+ - name: http
+ containerPort: 8123
+ - name: client
+ containerPort: 9000
+ - name: interserver
+ containerPort: 9009
+ volumeMounts:
+ - name: local-path
+ mountPath: /var/lib/clickhouse
diff --git a/docs/examples/11-local-storage-01-simple-host-path.yaml b/docs/examples/11-local-storage-01-simple-host-path.yaml
new file mode 100644
index 000000000..9265d8152
--- /dev/null
+++ b/docs/examples/11-local-storage-01-simple-host-path.yaml
@@ -0,0 +1,61 @@
+#
+# AWS-specific labels, applicable in 'nodeAffinity' statements
+#
+# beta.kubernetes.io/arch=amd64
+# beta.kubernetes.io/instance-type=t2.medium
+# beta.kubernetes.io/os=linux
+#
+# failure-domain.beta.kubernetes.io/region=us-east-1
+# failure-domain.beta.kubernetes.io/zone=us-east-1a
+#
+# kubernetes.io/hostname=ip-172-20-37-97.ec2.internal
+# kubernetes.io/role=node
+# node-role.kubernetes.io/node=
+#
+# kops.k8s.io/instancegroup=nodes2
+#
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+metadata:
+ name: "volume-hostpath"
+spec:
+ defaults:
+ templates:
+ podTemplate: clickhouse-per-host-on-servers-with-ssd
+ configuration:
+ clusters:
+ - name: local-storage
+ layout:
+ shardsCount: 3
+
+ templates:
+ podTemplates:
+ # Specify Pod Templates with affinity
+
+ - name: clickhouse-per-host-on-servers-with-ssd
+ zone:
+ key: "disktype"
+ values:
+ - "ssd"
+ distribution: "OnePerHost"
+ spec:
+ volumes:
+ # Specify volume as path on local filesystem as a directory which will be created, if need be
+ - name: local-path
+ hostPath:
+ path: /mnt/podvolume
+ type: DirectoryOrCreate
+ containers:
+ - name: clickhouse-pod
+ image: yandex/clickhouse-server:19.3.7
+ volumeMounts:
+ # Specify reference to volume on local filesystem
+ - name: local-path
+ mountPath: /var/lib/clickhouse
+ ports:
+ - name: http
+ containerPort: 8123
+ - name: client
+ containerPort: 9000
+ - name: interserver
+ containerPort: 9009
diff --git a/docs/examples/11-local-storage-01-host-path.yaml b/docs/examples/11-local-storage-02-advanced-host-path.yaml
similarity index 100%
rename from docs/examples/11-local-storage-01-host-path.yaml
rename to docs/examples/11-local-storage-02-advanced-host-path.yaml
diff --git a/docs/examples/99-clickhouseinstallation-max.yaml b/docs/examples/99-clickhouseinstallation-max.yaml
index 1d198739d..fbbe9ea41 100644
--- a/docs/examples/99-clickhouseinstallation-max.yaml
+++ b/docs/examples/99-clickhouseinstallation-max.yaml
@@ -6,7 +6,7 @@ metadata:
spec:
defaults:
- replicasUseFQDN: 0 # 0 - by default, 1 - enabled
+ replicasUseFQDN: "no"
distributedDDL:
profile: default
templates:
@@ -14,13 +14,15 @@ spec:
volumeClaimTemplate: default-volume-claim
configuration:
+ templates:
+ serviceTemplate: chi-service-template
zookeeper:
nodes:
- - host: zk-statefulset-0.zk-service.default.svc.cluster.local
+ - host: zookeeper-0.zookeepers.zoo3ns.svc.cluster.local
port: 2181
- - host: zk-statefulset-1.zk-service.default.svc.cluster.local
+ - host: zookeeper-1.zookeepers.zoo3ns.svc.cluster.local
port: 2181
- - host: zk-statefulset-2.zk-service.default.svc.cluster.local
+ - host: zookeeper-2.zookeepers.zoo3ns.svc.cluster.local
port: 2181
users:
readonly/profile: readonly
@@ -69,7 +71,7 @@ spec:
clusters:
- - name: sharded-replicated
+ - name: all-counts
templates:
podTemplate: clickhouse-v18.16.1
volumeClaimTemplate: default-volume-claim
@@ -77,7 +79,7 @@ spec:
shardsCount: 3
replicasCount: 2
- - name: sharded-non-replicated
+ - name: shards-only
templates:
podTemplate: clickhouse-v18.16.1
volumeClaimTemplate: default-volume-claim
@@ -85,13 +87,13 @@ spec:
shardsCount: 3
# replicasCount not specified, assumed = 1, by default
- - name: replicated
+ - name: replicas-only
templates:
podTemplate: clickhouse-v18.16.1
volumeClaimTemplate: default-volume-claim
layout:
# shardsCount not specified, assumed = 1, by default
- replicasCount: 4
+ replicasCount: 3
- name: customized
templates:
@@ -121,14 +123,42 @@ spec:
templates:
podTemplate: clickhouse-v18.16.1
volumeClaimTemplate: default-volume-claim
+ serviceTemplate: replica-service-template
replicas:
- name: replica0
port: 9000
templates:
podTemplate: clickhouse-v18.16.2
volumeClaimTemplate: default-volume-claim
+ serviceTemplate: replica-service-template
templates:
+ serviceTemplates:
+ - name: chi-service-template
+ generateName: "service-{chi}"
+ # type ServiceSpec struct from k8s.io/core/v1
+ spec:
+ ports:
+ - name: http
+ port: 8123
+ - name: client
+ port: 9000
+ type: LoadBalancer
+
+ - name: replica-service-template
+ # type ServiceSpec struct from k8s.io/core/v1
+ spec:
+ ports:
+ - name: http
+ port: 8123
+ - name: client
+ port: 9000
+ - name: interserver
+ port: 9009
+ type: ClusterIP
+ ClusterIP: None
+
+
volumeClaimTemplates:
- name: default-volume-claim
# type PersistentVolumeClaimSpec struct from k8s.io/core/v1
diff --git a/docs/quick-start.md b/docs/quick-start.md
index 7231fafb5..2e7cdddb4 100644
--- a/docs/quick-start.md
+++ b/docs/quick-start.md
@@ -13,13 +13,23 @@
# Prerequisites
1. Operational Kubernetes instance
1. Properly configured `kubectl`
+1. `curl`
# ClickHouse Operator Installation
-Apply `clickhouse-operator` installation manifest. The simplest way - directly from github
+Apply `clickhouse-operator` installation manifest. The simplest way - directly from `github`.
+Please, `cd` into writable folder, because install script would download config files to build `.yaml` manifests from.
```bash
-kubectl apply -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/manifests/operator/clickhouse-operator-install.yaml
+cd ~
+curl -s https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/manifests/dev/clickhouse-operator-install.sh | CHOPERATOR_NAMESPACE=test-clickhouse-operator bash
```
+Take into account explicitly specified namespace
+```bash
+CHOPERATOR_NAMESPACE=test-clickhouse-operator
+```
+This namespace would be created and used to install `clickhouse-operator` into.
+Install script would download some `.yaml` and `.xml` files and install `clickhouse-operator` into specified namespace.
+
Operator installation process
```text
customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.altinity.com created
@@ -31,7 +41,7 @@ service/clickhouse-operator-metrics created
Check `clickhouse-operator` is running:
```bash
-kubectl get pods -n kube-system
+kubectl get pods -n test-clickhouse-operator
```
```text
NAME READY STATUS RESTARTS AGE
@@ -62,7 +72,7 @@ This is the trivial [1 shard 1 replica](./examples/01-standard-layout-01-1shard-
**WARNING**: Do not use it for anything other than 'Hello, world!', it does not have persistent storage!
```bash
-kubectl apply -n test -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/docs/examples/01-standard-layout-01-1shard-1repl.yaml
+kubectl apply -n test-clickhouse-operator -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/docs/examples/01-standard-layout-01-1shard-1repl.yaml
```
```text
clickhouseinstallation.clickhouse.altinity.com/example-01 created
@@ -86,7 +96,7 @@ spec:
Once cluster is created, there are two checks to be made.
```bash
-kubectl get pods -n test
+kubectl get pods -n test-clickhouse-operator
```
```text
NAME READY STATUS RESTARTS AGE
@@ -96,7 +106,7 @@ chi-b3d29f-a242-0-0-0 1/1 Running 0 10m
Watch out for 'Running' status. Also check services created by an operator:
```bash
-kubectl get service -n test
+kubectl get service -n test-clickhouse-operator
```
```text
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
@@ -121,7 +131,7 @@ Connected to ClickHouse server version 19.4.3 revision 54416.
```
1. In case there is not **EXTERNAL-IP** available, we can access ClickHouse from inside Kubernetes cluster
```bash
-kubectl -n test exec -it chi-b3d29f-a242-0-0-0 -- clickhouse-client
+kubectl -n test-clickhouse-operator exec -it chi-b3d29f-a242-0-0-0 -- clickhouse-client
```
```text
ClickHouse client version 19.4.3.11.
diff --git a/manifests/dev/cat-clickhouse-operator-install-yaml.sh b/manifests/dev/cat-clickhouse-operator-install-yaml.sh
new file mode 100755
index 000000000..45731281b
--- /dev/null
+++ b/manifests/dev/cat-clickhouse-operator-install-yaml.sh
@@ -0,0 +1,264 @@
+#!/bin/bash
+
+# Compose clickhouse-operator .yaml manifest from components
+
+# Paths
+CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
+PROJECT_ROOT="$(realpath "${CUR_DIR}/../..")"
+
+##########################################
+##
+## clickhouse-operator .yaml configuration
+##
+##########################################
+
+# Namespace to install operator
+CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE:-kube-system}"
+
+# Operator's docker image
+CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE:-altinity/clickhouse-operator:latest}"
+
+# Local path to operator's config file to be injected into .yaml
+CHOPERATOR_CONFIG_FILE="${PROJECT_ROOT}/config/config.yaml"
+
+# Local path to folder with ClickHouse's .xml configuration files which will be injected into .yaml
+# as content of /etc/clickhouse-server/conf.d folder
+CHOPERATOR_CONFD_FOLDER="${PROJECT_ROOT}/config/conf.d"
+
+# Local path to folder with ClickHouse's .xml configuration files which will be injected into .yaml
+# as content of /etc/clickhouse-server/config.d folder
+CHOPERATOR_CONFIGD_FOLDER="${PROJECT_ROOT}/config/config.d"
+
+# Local path to folder with ClickHouse's .xml configuration files which will be injected into .yaml
+# as content of /etc/clickhouse-server/users.d folder
+CHOPERATOR_USERSD_FOLDER="${PROJECT_ROOT}/config/users.d"
+
+# Local path to folder with operator's .yaml template files which will be injected into .yaml
+# as content of /etc/clickhouse-server/templates.d folder
+CHOPERATOR_TEMPLATESD_FOLDER="${PROJECT_ROOT}/config/templates.d"
+
+
+##
+## .yaml manifest sections to be rendered
+##
+
+# Render operator's CRD
+MANIFEST_PRINT_CRD="${MANIFEST_PRINT_CRD:-yes}"
+
+# Render operator's RBAC and other parts needed during operator's install procedure
+MANIFEST_PRINT_RBAC="${MANIFEST_PRINT_RBAC:-yes}"
+
+# Render operator's Deployment section. May be not required in case of dev localhost run
+MANIFEST_PRINT_DEPLOYMENT="${MANIFEST_PRINT_DEPLOYMENT:-yes}"
+
+##################################
+##
+## File handler
+##
+##################################
+
+function ensure_file() {
+ # Params
+ local LOCAL_DIR="$1"
+ local FILE="$2"
+ local REPO_DIR="$3"
+
+ local LOCAL_FILE="${LOCAL_DIR}/${FILE}"
+
+ if [[ -f "${LOCAL_FILE}" ]]; then
+ # File found, all is ok
+ :
+ else
+ download_file "${LOCAL_DIR}" "${FILE}" "${REPO_DIR}"
+ fi
+
+ if [[ -f "${LOCAL_FILE}" ]]; then
+ # File found, all is ok
+ :
+ else
+ # File not found
+ echo "Unable to get ${FILE}"
+ exit 1
+ fi
+}
+
+function download_file() {
+ # Params
+ local LOCAL_DIR="$1"
+ local FILE="$2"
+ local REPO_DIR="$3"
+
+ local LOCAL_FILE="${LOCAL_DIR}/${FILE}"
+
+ REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator"
+ #BRANCH="dev-vladislav"
+ BRANCH="master"
+ FILE_URL="${REPO_URL}/${BRANCH}/${REPO_DIR}/${FILE}"
+
+ # Check curl is in place
+ if ! curl --version > /dev/null; then
+ echo "curl is not available, can not continue"
+ exit 1
+ fi
+
+ # Download file
+ if ! curl --silent "${FILE_URL}" --output "${LOCAL_FILE}"; then
+ echo "curl call to download ${FILE_URL} failed, can not continue"
+ exit 1
+ fi
+
+ # Check file is in place
+ if [[ -f "${LOCAL_FILE}" ]]; then
+ # File found, all is ok
+ :
+ else
+ # File not found
+ echo "Unable to download ${FILE_URL}"
+ exit 1
+ fi
+}
+##################################
+##
+## Render .yaml manifest
+##
+##################################
+
+
+# Render CRD section
+if [[ "${MANIFEST_PRINT_CRD}" == "yes" ]]; then
+ ensure_file "${CUR_DIR}" "clickhouse-operator-template-01-section-crd.yaml" "manifests/dev"
+ cat "${CUR_DIR}/clickhouse-operator-template-01-section-crd.yaml" | \
+ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst
+fi
+
+# Render RBAC section
+if [[ "${MANIFEST_PRINT_RBAC}" == "yes" ]]; then
+ echo "---"
+ ensure_file "${CUR_DIR}" "clickhouse-operator-template-02-section-rbac-and-service.yaml" "manifests/dev"
+ cat "${CUR_DIR}/clickhouse-operator-template-02-section-rbac-and-service.yaml" | \
+ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst
+fi
+
+# Render header/beginning of ConfigMap yaml specification:
+# apiVersion: v1
+# kind: ConfigMap
+# metadata:
+# name: ${CONFIGMAP_NAME}
+# namespace: ${CHOPERATOR_NAMESPACE}
+# data:
+function render_configmap_header() {
+ # ConfigMap name
+ CM_NAME="$1"
+ # Template file with ConfigMap header/beginning
+
+ ensure_file "${CUR_DIR}" "clickhouse-operator-template-03-section-configmap-header.yaml" "manifests/dev"
+ # Render ConfigMap header template with vars substitution
+ cat "${CUR_DIR}/clickhouse-operator-template-03-section-configmap-header.yaml" | \
+ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" CONFIGMAP_NAME="${CM_NAME}" envsubst
+}
+
+# Render one file section in ConfigMap yaml specification:
+# apiVersion: v1
+# kind: ConfigMap
+# metadata:
+# name: game-config
+# data:
+# game.properties: |
+# enemies=aliens
+# lives=3
+#
+# ui.properties: |
+# color.good=purple
+function render_configmap_data_section_file() {
+ FILE_PATH="$1"
+
+ # ConfigMap .data section looks like
+ # config.yaml: |
+ # line 1
+ # line 2
+ # etc
+ FILE_NAME="$(basename "${FILE_PATH}")"
+ echo " ${FILE_NAME}: |"
+ cat "${FILE_PATH}" | sed 's/^/ /'
+ echo ""
+}
+
+# Render Deployment and ConfigMap sections
+if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then
+ if [[ -z "${CHOPERATOR_CONFIG_FILE}" ]]; then
+ # No config file specified, render simple deployment
+ echo "---"
+ ensure_file "${CUR_DIR}" "clickhouse-operator-template-04-section-deployment.yaml" "manifests/dev"
+ cat "${CUR_DIR}/clickhouse-operator-template-04-section-deployment.yaml" | \
+ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst
+ else
+ # Config file specified, render all ConfigMaps and then render deployment
+
+ echo "---"
+ render_configmap_header "etc-clickhouse-operator-files"
+ if [[ -f "${PROJECT_ROOT}/config/config.yaml" ]]; then
+ # Render clickhouse-operator config file
+ render_configmap_data_section_file "${PROJECT_ROOT}/config/config.yaml"
+ else
+ # Fetch from github and apply
+ # config/config.yaml
+ download_file "${CUR_DIR}" "config.yaml" "config"
+ render_configmap_data_section_file "${CUR_DIR}/config.yaml"
+ fi
+
+ # Render confd.d files
+ echo "---"
+ render_configmap_header "etc-clickhouse-operator-confd-files"
+ if [[ ! -z "${CHOPERATOR_CONFD_FOLDER}" ]] && [[ -d "${CHOPERATOR_CONFD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_CONFD_FOLDER}")" ]]; then
+ for FILE in "${CHOPERATOR_CONFD_FOLDER}"/*; do
+ render_configmap_data_section_file "${FILE}"
+ done
+ fi
+
+ # Render configd.d files
+ echo "---"
+ render_configmap_header "etc-clickhouse-operator-configd-files"
+ if [[ ! -z "${CHOPERATOR_CONFIGD_FOLDER}" ]] && [[ -d "${CHOPERATOR_CONFIGD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_CONFIGD_FOLDER}")" ]]; then
+ for FILE in "${CHOPERATOR_CONFIGD_FOLDER}"/*; do
+ render_configmap_data_section_file "${FILE}"
+ done
+ else
+ # Fetch from github and apply
+ # config/config.d/01-clickhouse-operator-listen.xml
+ # config/config.d/02-clickhouse-operator-logger.xml
+ download_file "${CUR_DIR}" "01-clickhouse-operator-listen.xml" "config/config.d"
+ download_file "${CUR_DIR}" "02-clickhouse-operator-logger.xml" "config/config.d"
+ render_configmap_data_section_file "${CUR_DIR}/01-clickhouse-operator-listen.xml"
+ render_configmap_data_section_file "${CUR_DIR}/02-clickhouse-operator-logger.xml"
+ fi
+
+ # Render templates.d files
+ echo "---"
+ render_configmap_header "etc-clickhouse-operator-templatesd-files"
+ if [[ ! -z "${CHOPERATOR_TEMPLATESD_FOLDER}" ]] && [[ -d "${CHOPERATOR_TEMPLATESD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_TEMPLATESD_FOLDER}")" ]]; then
+ for FILE in "${CHOPERATOR_TEMPLATESD_FOLDER}"/*; do
+ render_configmap_data_section_file "${FILE}"
+ done
+ fi
+
+ # Render users.d files
+ echo "---"
+ render_configmap_header "etc-clickhouse-operator-usersd-files"
+ if [[ ! -z "${CHOPERATOR_USERSD_FOLDER}" ]] && [[ -d "${CHOPERATOR_USERSD_FOLDER}" ]] && [[ ! -z "$(ls "${CHOPERATOR_USERSD_FOLDER}")" ]]; then
+ for FILE in "${CHOPERATOR_USERSD_FOLDER}"/*; do
+ render_configmap_data_section_file "${FILE}"
+ done
+ else
+ # Fetch from github and apply
+ # config/users.d/01-clickhouse-operator-user.xml
+ download_file "${CUR_DIR}" "01-clickhouse-operator-user.xml" "config/users.d"
+ render_configmap_data_section_file "${CUR_DIR}/01-clickhouse-operator-user.xml"
+ fi
+
+ # Render Deployment
+ echo "---"
+ ensure_file "${CUR_DIR}" "clickhouse-operator-template-04-section-deployment-with-configmap.yaml" "manifests/dev"
+ cat "${CUR_DIR}/clickhouse-operator-template-04-section-deployment-with-configmap.yaml" | \
+ CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst
+ fi
+fi
diff --git a/manifests/dev/cat-clickhouse-operator-yaml.sh b/manifests/dev/cat-clickhouse-operator-yaml.sh
deleted file mode 100755
index 1f9e09a88..000000000
--- a/manifests/dev/cat-clickhouse-operator-yaml.sh
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/bin/bash
-
-# Compose clickhouse-operator .yaml manifest from components
-
-# Paths
-CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-MANIFEST_ROOT=$(realpath ${CUR_DIR}/..)
-PROJECT_ROOT=$(realpath ${CUR_DIR}/../..)
-
-# clickhouse-operator details
-CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE:-kube-system}"
-CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE:-altinity/clickhouse-operator:latest}"
-CHOPERATOR_CONFIG_FILE="${PROJECT_ROOT}/config/config.yaml"
-CHOPERATOR_CONFD_FOLDER="${PROJECT_ROOT}/config/conf.d"
-CHOPERATOR_CONFIGD_FOLDER="${PROJECT_ROOT}/config/config.d"
-CHOPERATOR_TEMPLATESD_FOLDER="${PROJECT_ROOT}/config/templates.d"
-CHOPERATOR_USERSD_FOLDER="${PROJECT_ROOT}/config/users.d"
-
-# .yaml manifest sections to be rendered
-MANIFEST_PRINT_CRD="${MANIFEST_PRINT_CRD:-yes}"
-MANIFEST_PRINT_RBAC="${MANIFEST_PRINT_RBAC:-yes}"
-MANIFEST_PRINT_DEPLOYMENT="${MANIFEST_PRINT_DEPLOYMENT:-yes}"
-
-# Render CRD section
-if [[ "${MANIFEST_PRINT_CRD}" == "yes" ]]; then
- cat ${CUR_DIR}/clickhouse-operator-template-01-section-crd.yaml | \
- CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst
-fi
-
-# Render RBAC section
-if [[ "${MANIFEST_PRINT_RBAC}" == "yes" ]]; then
- echo "---"
- cat ${CUR_DIR}/clickhouse-operator-template-02-section-rbac-and-service.yaml | \
- CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst
-fi
-
-# Render header/beginning of ConfigMap yaml specification:
-# apiVersion: v1
-# kind: ConfigMap
-# metadata:
-# name: ${CONFIGMAP_NAME}
-# namespace: ${CHOPERATOR_NAMESPACE}
-# data:
-function render_configmap_header() {
- # ConfigMap name
- CM_NAME="$1"
- # Template file with ConfigMap header/beginning
- CM_HEADER_FILE="${CUR_DIR}/clickhouse-operator-template-03-section-configmap-header.yaml"
-
- # Render ConfigMap header template with vars substitution
- cat ${CM_HEADER_FILE} | \
- CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" CONFIGMAP_NAME="${CM_NAME}" envsubst
-}
-
-# Render one file section in ConfigMap yaml specification:
-# apiVersion: v1
-# kind: ConfigMap
-# metadata:
-# name: game-config
-# data:
-# game.properties: |
-# enemies=aliens
-# lives=3
-#
-# ui.properties: |
-# color.good=purple
-function render_configmap_data_section_file() {
- FILE_PATH=$1
- # ConfigMap .data section looks like
- # config.yaml: |
- # line 1
- # line 2
- # etc
- FILE_NAME=$(basename "${FILE_PATH}")
- echo " ${FILE_NAME}: |"
- cat ${FILE_PATH} | sed 's/^/ /'
- echo ""
-}
-
-# Render Deployment and ConfigMap sections
-if [[ "${MANIFEST_PRINT_DEPLOYMENT}" == "yes" ]]; then
- if [[ -z "${CHOPERATOR_CONFIG_FILE}" ]]; then
- # No config file specified, render simple deployment
- echo "---"
- cat ${CUR_DIR}/clickhouse-operator-template-04-section-deployment.yaml | \
- CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst
- else
- # Config file specified, render all ConfigMaps and then render deployment
-
- # Render clickhouse-operator config file
- echo "---"
- render_configmap_header "etc-clickhouse-operator-files"
- render_configmap_data_section_file "${PROJECT_ROOT}/config/config.yaml"
-
- # Render confd.d files
- echo "---"
- render_configmap_header "etc-clickhouse-operator-confd-files"
- if [[ ! -z "${CHOPERATOR_CONFD_FOLDER}" ]] && [[ ! -z "$(ls ${CHOPERATOR_CONFD_FOLDER})" ]]; then
- for FILE in ${CHOPERATOR_CONFD_FOLDER}/*; do
- render_configmap_data_section_file "${FILE}"
- done
- fi
-
- # Render configd.d files
- echo "---"
- render_configmap_header "etc-clickhouse-operator-configd-files"
- if [[ ! -z "${CHOPERATOR_CONFIGD_FOLDER}" ]] && [[ ! -z "$(ls ${CHOPERATOR_CONFIGD_FOLDER})" ]]; then
- for FILE in ${CHOPERATOR_CONFIGD_FOLDER}/*; do
- render_configmap_data_section_file "${FILE}"
- done
- fi
-
- # Render templates.d files
- echo "---"
- render_configmap_header "etc-clickhouse-operator-templatesd-files"
- if [[ ! -z "${CHOPERATOR_TEMPLATESD_FOLDER}" ]] && [[ ! -z "$(ls ${CHOPERATOR_TEMPLATESD_FOLDER})" ]]; then
- for FILE in ${CHOPERATOR_TEMPLATESD_FOLDER}/*; do
- render_configmap_data_section_file "${FILE}"
- done
- fi
-
- # Render users.d files
- echo "---"
- render_configmap_header "etc-clickhouse-operator-usersd-files"
- if [[ ! -z "${CHOPERATOR_USERSD_FOLDER}" ]] && [[ ! -z "$(ls ${CHOPERATOR_USERSD_FOLDER})" ]]; then
- for FILE in ${CHOPERATOR_USERSD_FOLDER}/*; do
- render_configmap_data_section_file "${FILE}"
- done
- fi
-
- # Render Deployment
- echo "---"
- cat ${CUR_DIR}/clickhouse-operator-template-04-section-deployment-with-configmap.yaml | \
- CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" envsubst
- fi
-fi
diff --git a/manifests/dev/clickhouse-operator-delete.sh b/manifests/dev/clickhouse-operator-delete.sh
index f9764a285..622c126cf 100755
--- a/manifests/dev/clickhouse-operator-delete.sh
+++ b/manifests/dev/clickhouse-operator-delete.sh
@@ -4,12 +4,12 @@ CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE:-dev}"
CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE:-altinity/clickhouse-operator:dev}"
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-MANIFEST_ROOT=$(realpath ${CUR_DIR}/..)
+MANIFEST_ROOT="$(realpath "${CUR_DIR}/..")"
if [[ "${CHOPERATOR_NAMESPACE}" == "kube-system" ]]; then
echo "Default k8s namespace 'kube-system' must not be deleted"
echo "Delete components only"
- kubectl delete --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" ${CUR_DIR}/cat-clickhouse-operator-yaml.sh)
+ kubectl delete --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh")
else
echo "Delete ClickHouse Operator namespace ${CHOPERATOR_NAMESPACE}"
kubectl delete namespace "${CHOPERATOR_NAMESPACE}"
diff --git a/manifests/dev/clickhouse-operator-install.sh b/manifests/dev/clickhouse-operator-install.sh
index 76af802bc..9d0c218f3 100755
--- a/manifests/dev/clickhouse-operator-install.sh
+++ b/manifests/dev/clickhouse-operator-install.sh
@@ -4,7 +4,77 @@ CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE:-dev}"
CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE:-altinity/clickhouse-operator:dev}"
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-MANIFEST_ROOT=$(realpath ${CUR_DIR}/..)
+
+function ensure_kubectl() {
+ if ! kubectl version > /dev/null; then
+ echo "kubectl failed, can not continue"
+ exit 1
+ fi
+}
+
+function ensure_file() {
+ # Params
+ local LOCAL_DIR="$1"
+ local FILE="$2"
+ local REPO_DIR="$3"
+
+ local LOCAL_FILE="${LOCAL_DIR}/${FILE}"
+
+ if [[ -f "${LOCAL_FILE}" ]]; then
+ # File found, all is ok
+ :
+ else
+ download_file "${LOCAL_DIR}" "${FILE}" "${REPO_DIR}"
+ fi
+
+ if [[ -f "${LOCAL_FILE}" ]]; then
+ # File found, all is ok
+ :
+ else
+ # File not found
+ echo "Unable to get ${FILE}"
+ exit 1
+ fi
+}
+
+function download_file() {
+ # Params
+ local LOCAL_DIR="$1"
+ local FILE="$2"
+ local REPO_DIR="$3"
+
+ local LOCAL_FILE="${LOCAL_DIR}/${FILE}"
+
+ REPO_URL="https://raw.githubusercontent.com/Altinity/clickhouse-operator"
+ #BRANCH="dev-vladislav"
+ BRANCH="master"
+ FILE_URL="${REPO_URL}/${BRANCH}/${REPO_DIR}/${FILE}"
+
+ # Check curl is in place
+ if ! curl --version > /dev/null; then
+ echo "curl is not available, can not continue"
+ exit 1
+ fi
+
+ # Download file
+ if ! curl --silent "${FILE_URL}" --output "${LOCAL_FILE}"; then
+ echo "curl call to download ${FILE_URL} failed, can not continue"
+ exit 1
+ fi
+
+ # Check file is in place
+ if [[ -f "${LOCAL_FILE}" ]]; then
+ # File found, all is ok
+ :
+ else
+ # File not found
+ echo "Unable to download ${FILE_URL}"
+ exit 1
+ fi
+}
+
+ensure_kubectl
+ensure_file "${CUR_DIR}" "cat-clickhouse-operator-install-yaml.sh" "manifests/dev"
echo "Setup ClickHouse Operator into ${CHOPERATOR_NAMESPACE} namespace"
@@ -12,4 +82,4 @@ echo "Setup ClickHouse Operator into ${CHOPERATOR_NAMESPACE} namespace"
kubectl create namespace "${CHOPERATOR_NAMESPACE}"
# Setup into dedicated namespace
-kubectl apply --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" ${CUR_DIR}/cat-clickhouse-operator-yaml.sh)
+kubectl apply --namespace="${CHOPERATOR_NAMESPACE}" -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" /bin/bash "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh")
diff --git a/manifests/dev/clickhouse-operator-template-01-section-crd.yaml b/manifests/dev/clickhouse-operator-template-01-section-crd.yaml
index a0706ce53..5ae07ac33 100644
--- a/manifests/dev/clickhouse-operator-template-01-section-crd.yaml
+++ b/manifests/dev/clickhouse-operator-template-01-section-crd.yaml
@@ -60,9 +60,20 @@ spec:
type: string
volumeClaimTemplate:
type: string
+ serviceTemplate:
+ type: string
configuration:
type: object
properties:
+ templates:
+ type: object
+ properties:
+ podTemplate:
+ type: string
+ volumeClaimTemplate:
+ type: string
+ serviceTemplate:
+ type: string
zookeeper:
type: object
properties:
@@ -109,6 +120,8 @@ spec:
type: string
volumeClaimTemplate:
type: string
+ serviceTemplate:
+ type: string
layout:
type: object
properties:
@@ -145,6 +158,8 @@ spec:
type: string
volumeClaimTemplate:
type: string
+ serviceTemplate:
+ type: string
replicas:
type: array
items:
@@ -163,9 +178,39 @@ spec:
type: string
volumeClaimTemplate:
type: string
+ serviceTemplate:
+ type: string
templates:
type: object
properties:
+ podTemplates:
+ type: array
+ items:
+ type: object
+ required:
+ - name
+ - spec
+ properies:
+ name:
+ type: string
+ zone:
+ type: object
+ required:
+ - values
+ properties:
+ key:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ distribution:
+ type: string
+ enum:
+ - OnePerHost
+ spec:
+ # TODO specify PodSpec
+ type: object
volumeClaimTemplates:
type: array
items:
@@ -176,19 +221,26 @@ spec:
properties:
name:
type: string
+ reclaimPolicy:
+ type: string
+ enum:
+ - Retain
+ - Delete
spec:
# TODO specify PersistentVolumeClaimSpec
type: object
- podTemplates:
+ serviceTemplates:
type: array
items:
type: object
required:
- name
- spec
- properies:
+ properties:
name:
type: string
+ generateName:
+ type: string
spec:
- # TODO specify PodSpec
+ # TODO specify ServiceSpec
type: object
diff --git a/manifests/dev/dev-delete.sh b/manifests/dev/dev-delete.sh
index b8ec5042a..53d9158d5 100755
--- a/manifests/dev/dev-delete.sh
+++ b/manifests/dev/dev-delete.sh
@@ -2,7 +2,7 @@
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-source ${CUR_DIR}/dev-config.sh
+source "${CUR_DIR}/dev-config.sh"
if [[ "${CHOPERATOR_NAMESPACE}" == "kube-system" ]]; then
echo "Default k8s namespace 'kube-system' must not be deleted"
diff --git a/manifests/dev/dev-install.sh b/manifests/dev/dev-install.sh
index 62717dfd8..e3613ea92 100755
--- a/manifests/dev/dev-install.sh
+++ b/manifests/dev/dev-install.sh
@@ -2,13 +2,13 @@
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-source ${CUR_DIR}/dev-config.sh
+source "${CUR_DIR}/dev-config.sh"
echo "Create ${CHOPERATOR_NAMESPACE} namespace"
kubectl create namespace "${CHOPERATOR_NAMESPACE}"
if [[ "${INSTALL_FROM_ALTINITY_RELEASE_DOCKERHUB}" == "yes" ]]; then
- kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" ${CUR_DIR}/cat-clickhouse-operator-yaml.sh)
+ kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh")
# Installation done
exit $?
@@ -17,10 +17,10 @@ else
echo "CHOPERATOR_NAMESPACE=${CHOPERATOR_NAMESPACE}"
echo "CHOPERATOR_IMAGE=${CHOPERATOR_IMAGE}"
- kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" MANIFEST_PRINT_DEPLOYMENT="no" ${CUR_DIR}/cat-clickhouse-operator-yaml.sh)
+ kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" MANIFEST_PRINT_DEPLOYMENT="no" "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh")
if [[ "${INSTALL_FROM_DEPLOYMENT_MANIFEST}" == "yes" ]]; then
# Install operator from Docker Registry (dockerhub or whatever)
- kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" MANIFEST_PRINT_CRD="no" MANIFEST_PRINT_RBAC="no" ${CUR_DIR}/cat-clickhouse-operator-yaml.sh)
+ kubectl -n "${CHOPERATOR_NAMESPACE}" apply -f <(CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" MANIFEST_PRINT_CRD="no" MANIFEST_PRINT_RBAC="no" "${CUR_DIR}/cat-clickhouse-operator-install-yaml.sh")
fi
fi
diff --git a/manifests/dev/dev-reset.sh b/manifests/dev/dev-reset.sh
index 1b9748230..9cd0138c0 100755
--- a/manifests/dev/dev-reset.sh
+++ b/manifests/dev/dev-reset.sh
@@ -2,7 +2,7 @@
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-source ${CUR_DIR}/dev-config.sh
+source "${CUR_DIR}/dev-config.sh"
echo "Reset namespace: ${CHOPERATOR_NAMESPACE}"
-${CUR_DIR}/dev-delete.sh && ${CUR_DIR}/dev-install.sh
+"${CUR_DIR}/dev-delete.sh" && "${CUR_DIR}/dev-install.sh"
diff --git a/manifests/dev/dev-show.sh b/manifests/dev/dev-show.sh
index c78493426..016d06289 100755
--- a/manifests/dev/dev-show.sh
+++ b/manifests/dev/dev-show.sh
@@ -2,7 +2,7 @@
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-source ${CUR_DIR}/dev-config.sh
+source "${CUR_DIR}/dev-config.sh"
echo "=== Pod ==="
kubectl -n "${CHOPERATOR_NAMESPACE}" -o wide get pod
diff --git a/manifests/dev/dev-watch.sh b/manifests/dev/dev-watch.sh
index 10db80efb..bb7583ff7 100755
--- a/manifests/dev/dev-watch.sh
+++ b/manifests/dev/dev-watch.sh
@@ -2,6 +2,6 @@
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-source ${CUR_DIR}/dev-config.sh
+source "${CUR_DIR}/dev-config.sh"
-watch -n1 "kubectl -n ${CHOPERATOR_NAMESPACE} get all,configmap,endpoints"
+watch -n1 "kubectl -n ${CHOPERATOR_NAMESPACE} get all,configmap,endpoints,pv,pvc"
diff --git a/manifests/operator/build-clickhouse-operator-yaml.sh b/manifests/operator/build-clickhouse-operator-install-yaml.sh
similarity index 52%
rename from manifests/operator/build-clickhouse-operator-yaml.sh
rename to manifests/operator/build-clickhouse-operator-install-yaml.sh
index b38dd5490..7763c43d7 100755
--- a/manifests/operator/build-clickhouse-operator-yaml.sh
+++ b/manifests/operator/build-clickhouse-operator-install-yaml.sh
@@ -1,5 +1,8 @@
#!/bin/bash
+# Full list of available vars check in ${MANIFEST_ROOT}/dev/cat-clickhouse-operator-install-yaml.sh file
+
+# Here we just build production all-sections-included .yaml manifest with namespace and image parameters
CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE:-kube-system}"
CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE:-altinity/clickhouse-operator:latest}"
@@ -8,4 +11,4 @@ MANIFEST_ROOT=$(realpath ${CUR_DIR}/..)
CHOPERATOR_IMAGE="${CHOPERATOR_IMAGE}" \
CHOPERATOR_NAMESPACE="${CHOPERATOR_NAMESPACE}" \
-${MANIFEST_ROOT}/dev/cat-clickhouse-operator-yaml.sh > ${CUR_DIR}/clickhouse-operator-install.yaml
+${MANIFEST_ROOT}/dev/cat-clickhouse-operator-install-yaml.sh > ${CUR_DIR}/clickhouse-operator-install.yaml
diff --git a/manifests/operator/clickhouse-operator-install.yaml b/manifests/operator/clickhouse-operator-install.yaml
index d9d1912f1..20b69c806 100644
--- a/manifests/operator/clickhouse-operator-install.yaml
+++ b/manifests/operator/clickhouse-operator-install.yaml
@@ -60,9 +60,20 @@ spec:
type: string
volumeClaimTemplate:
type: string
+ serviceTemplate:
+ type: string
configuration:
type: object
properties:
+ templates:
+ type: object
+ properties:
+ podTemplate:
+ type: string
+ volumeClaimTemplate:
+ type: string
+ serviceTemplate:
+ type: string
zookeeper:
type: object
properties:
@@ -109,6 +120,8 @@ spec:
type: string
volumeClaimTemplate:
type: string
+ serviceTemplate:
+ type: string
layout:
type: object
properties:
@@ -145,6 +158,8 @@ spec:
type: string
volumeClaimTemplate:
type: string
+ serviceTemplate:
+ type: string
replicas:
type: array
items:
@@ -163,9 +178,39 @@ spec:
type: string
volumeClaimTemplate:
type: string
+ serviceTemplate:
+ type: string
templates:
type: object
properties:
+ podTemplates:
+ type: array
+ items:
+ type: object
+ required:
+ - name
+ - spec
+ properies:
+ name:
+ type: string
+ zone:
+ type: object
+ required:
+ - values
+ properties:
+ key:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ distribution:
+ type: string
+ enum:
+ - OnePerHost
+ spec:
+ # TODO specify PodSpec
+ type: object
volumeClaimTemplates:
type: array
items:
@@ -176,21 +221,28 @@ spec:
properties:
name:
type: string
+ reclaimPolicy:
+ type: string
+ enum:
+ - Retain
+ - Delete
spec:
# TODO specify PersistentVolumeClaimSpec
type: object
- podTemplates:
+ serviceTemplates:
type: array
items:
type: object
required:
- name
- spec
- properies:
+ properties:
name:
type: string
+ generateName:
+ type: string
spec:
- # TODO specify PodSpec
+ # TODO specify ServiceSpec
type: object
---
# Possible Template Parameters:
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
index 27242276b..3aa5fd594 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
@@ -35,12 +35,12 @@ func (chi *ClickHouseInstallation) StatusFill(endpoint string, pods []string) {
chi.Status.Endpoint = endpoint
}
-func (chi *ClickHouseInstallation) IsFilled() bool {
+func (chi *ClickHouseInstallation) IsNormalized() bool {
filled := true
clusters := 0
chi.WalkClusters(func(cluster *ChiCluster) error {
clusters++
- if cluster.Address.Namespace == "" {
+ if cluster.Chi == nil {
filled = false
}
return nil
@@ -90,6 +90,25 @@ func (chi *ClickHouseInstallation) FillAddressInfo() int {
return replicasCount
}
+func (chi *ClickHouseInstallation) FillChiPointer() {
+
+ replicaProcessor := func(
+ chi *ClickHouseInstallation,
+ clusterIndex int,
+ cluster *ChiCluster,
+ shardIndex int,
+ shard *ChiShard,
+ replicaIndex int,
+ replica *ChiReplica,
+ ) error {
+ cluster.Chi = chi
+ shard.Chi = chi
+ replica.Chi = chi
+ return nil
+ }
+ chi.WalkReplicasFullPath(replicaProcessor)
+}
+
func (chi *ClickHouseInstallation) WalkClustersFullPath(
f func(chi *ClickHouseInstallation, clusterIndex int, cluster *ChiCluster) error,
) []error {
@@ -208,6 +227,58 @@ func (chi *ClickHouseInstallation) WalkReplicas(
return res
}
+func (chi *ClickHouseInstallation) WalkReplicasTillError(
+ f func(replica *ChiReplica) error,
+) error {
+ for clusterIndex := range chi.Spec.Configuration.Clusters {
+ cluster := &chi.Spec.Configuration.Clusters[clusterIndex]
+ for shardIndex := range cluster.Layout.Shards {
+ shard := &cluster.Layout.Shards[shardIndex]
+ for replicaIndex := range shard.Replicas {
+ replica := &shard.Replicas[replicaIndex]
+ if err := f(replica); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (chi *ClickHouseInstallation) WalkClusterTillError(
+ fChi func(chi *ClickHouseInstallation) error,
+ fCluster func(cluster *ChiCluster) error,
+ fShard func(shard *ChiShard) error,
+ fReplica func(replica *ChiReplica) error,
+) error {
+
+ if err := fChi(chi); err != nil {
+ return err
+ }
+
+ for clusterIndex := range chi.Spec.Configuration.Clusters {
+ cluster := &chi.Spec.Configuration.Clusters[clusterIndex]
+ if err := fCluster(cluster); err != nil {
+ return err
+ }
+ for shardIndex := range cluster.Layout.Shards {
+ shard := &cluster.Layout.Shards[shardIndex]
+ if err := fShard(shard); err != nil {
+ return err
+ }
+ for replicaIndex := range shard.Replicas {
+ replica := &shard.Replicas[replicaIndex]
+ if err := fReplica(replica); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
func (chi *ClickHouseInstallation) MergeFrom(from *ClickHouseInstallation) {
if from == nil {
return
@@ -249,3 +320,40 @@ func (chi *ClickHouseInstallation) ReplicasCount() int {
})
return count
}
+
+// GetPodTemplate gets ChiPodTemplate by name
+func (chi *ClickHouseInstallation) GetPodTemplate(name string) (*ChiPodTemplate, bool) {
+ if chi.Spec.Templates.PodTemplatesIndex == nil {
+ return nil, false
+ } else {
+ template, ok := chi.Spec.Templates.PodTemplatesIndex[name]
+ return template, ok
+ }
+}
+
+// GetVolumeClaimTemplate gets ChiVolumeClaimTemplate by name
+func (chi *ClickHouseInstallation) GetVolumeClaimTemplate(name string) (*ChiVolumeClaimTemplate, bool) {
+ if chi.Spec.Templates.VolumeClaimTemplatesIndex == nil {
+ return nil, false
+ } else {
+ template, ok := chi.Spec.Templates.VolumeClaimTemplatesIndex[name]
+ return template, ok
+ }
+}
+
+// GetServiceTemplate gets ChiServiceTemplate by name
+func (chi *ClickHouseInstallation) GetServiceTemplate(name string) (*ChiServiceTemplate, bool) {
+ if chi.Spec.Templates.ServiceTemplatesIndex == nil {
+ return nil, false
+ } else {
+ template, ok := chi.Spec.Templates.ServiceTemplatesIndex[name]
+ return template, ok
+ }
+}
+
+// GetServiceTemplate gets own ChiServiceTemplate
+func (chi *ClickHouseInstallation) GetOwnServiceTemplate() (*ChiServiceTemplate, bool) {
+ name := chi.Spec.Configuration.Templates.ServiceTemplate
+ template, ok := chi.GetServiceTemplate(name)
+ return template, ok
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
index 019755c6b..ed6ffcecb 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
@@ -15,12 +15,13 @@
package v1
func (cluster *ChiCluster) InheritTemplates(chi *ClickHouseInstallation) {
- if cluster.Templates.PodTemplate == "" {
- cluster.Templates.PodTemplate = chi.Spec.Defaults.Templates.PodTemplate
- }
- if cluster.Templates.VolumeClaimTemplate == "" {
- cluster.Templates.VolumeClaimTemplate = chi.Spec.Defaults.Templates.VolumeClaimTemplate
- }
+ (&cluster.Templates).MergeFrom(&chi.Spec.Defaults.Templates)
+}
+
+func (cluster *ChiCluster) GetServiceTemplate() (*ChiServiceTemplate, bool) {
+ name := cluster.Templates.ServiceTemplate
+ template, ok := cluster.Chi.GetServiceTemplate(name)
+ return template, ok
}
func (cluster *ChiCluster) WalkShards(
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_defaults.go b/pkg/apis/clickhouse.altinity.com/v1/type_defaults.go
index 3a9a86610..0da3aeec9 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_defaults.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_defaults.go
@@ -23,12 +23,5 @@ func (defaults *ChiDefaults) MergeFrom(from *ChiDefaults) {
defaults.ReplicasUseFQDN = from.ReplicasUseFQDN
}
(&defaults.DistributedDDL).MergeFrom(&from.DistributedDDL)
-
- if defaults.Templates.PodTemplate == "" {
- defaults.Templates.PodTemplate = from.Templates.PodTemplate
- }
-
- if defaults.Templates.VolumeClaimTemplate == "" {
- defaults.Templates.VolumeClaimTemplate = from.Templates.VolumeClaimTemplate
- }
+ (&defaults.Templates).MergeFrom(&from.Templates)
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_replica.go b/pkg/apis/clickhouse.altinity.com/v1/type_replica.go
index dad7d5f92..1520fef89 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_replica.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_replica.go
@@ -15,10 +15,23 @@
package v1
func (replica *ChiReplica) InheritTemplates(shard *ChiShard) {
- if replica.Templates.PodTemplate == "" {
- replica.Templates.PodTemplate = shard.Templates.PodTemplate
- }
- if replica.Templates.VolumeClaimTemplate == "" {
- replica.Templates.VolumeClaimTemplate = shard.Templates.VolumeClaimTemplate
- }
+ (&replica.Templates).MergeFrom(&shard.Templates)
+}
+
+func (replica *ChiReplica) GetPodTemplate() (*ChiPodTemplate, bool) {
+ name := replica.Templates.PodTemplate
+ template, ok := replica.Chi.GetPodTemplate(name)
+ return template, ok
+}
+
+func (replica *ChiReplica) GetVolumeClaimTemplate() (*ChiVolumeClaimTemplate, bool) {
+ name := replica.Templates.VolumeClaimTemplate
+ template, ok := replica.Chi.GetVolumeClaimTemplate(name)
+ return template, ok
+}
+
+func (replica *ChiReplica) GetServiceTemplate() (*ChiServiceTemplate, bool) {
+ name := replica.Templates.ServiceTemplate
+ template, ok := replica.Chi.GetServiceTemplate(name)
+ return template, ok
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_shard.go b/pkg/apis/clickhouse.altinity.com/v1/type_shard.go
index 8787100c0..546f32be4 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_shard.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_shard.go
@@ -15,12 +15,13 @@
package v1
func (shard *ChiShard) InheritTemplates(cluster *ChiCluster) {
- if shard.Templates.PodTemplate == "" {
- shard.Templates.PodTemplate = cluster.Templates.PodTemplate
- }
- if shard.Templates.VolumeClaimTemplate == "" {
- shard.Templates.VolumeClaimTemplate = cluster.Templates.VolumeClaimTemplate
- }
+ (&shard.Templates).MergeFrom(&cluster.Templates)
+}
+
+func (shard *ChiShard) GetServiceTemplate() (*ChiServiceTemplate, bool) {
+ name := shard.Templates.ServiceTemplate
+ template, ok := shard.Chi.GetServiceTemplate(name)
+ return template, ok
}
func (shard *ChiShard) WalkReplicas(
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_templates.go b/pkg/apis/clickhouse.altinity.com/v1/type_templates.go
index 25bda5be7..e9d1efb29 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_templates.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_templates.go
@@ -27,13 +27,13 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates) {
}
// Loop over all 'from' templates and copy it in case no such template in receiver
for fromIndex := range from.PodTemplates {
- fromPodTemplate := &from.PodTemplates[fromIndex]
+ fromTemplate := &from.PodTemplates[fromIndex]
// Try to find equal entry among local templates in receiver
equalFound := false
for toIndex := range templates.PodTemplates {
- toPodTemplate := &templates.PodTemplates[toIndex]
- if toPodTemplate.Name == fromPodTemplate.Name {
+ toTemplate := &templates.PodTemplates[toIndex]
+ if toTemplate.Name == fromTemplate.Name {
// Receiver already have such a template
equalFound = true
break
@@ -43,7 +43,7 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates) {
if !equalFound {
// Receiver has not such template
// Append template from `from`
- templates.PodTemplates = append(templates.PodTemplates, *fromPodTemplate.DeepCopy())
+ templates.PodTemplates = append(templates.PodTemplates, *fromTemplate.DeepCopy())
}
}
}
@@ -56,13 +56,13 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates) {
}
// Loop over all 'from' templates and copy it in case no such template in receiver
for fromIndex := range from.VolumeClaimTemplates {
- fromVolumeClaimTemplate := &from.VolumeClaimTemplates[fromIndex]
+ fromTemplate := &from.VolumeClaimTemplates[fromIndex]
// Try to find equal entry among local templates in receiver
equalFound := false
for toIndex := range templates.VolumeClaimTemplates {
- toVolumeClaimTemplate := &templates.VolumeClaimTemplates[toIndex]
- if toVolumeClaimTemplate.Name == fromVolumeClaimTemplate.Name {
+ toTemplate := &templates.VolumeClaimTemplates[toIndex]
+ if toTemplate.Name == fromTemplate.Name {
// Received already have such a node
equalFound = true
break
@@ -72,7 +72,36 @@ func (templates *ChiTemplates) MergeFrom(from *ChiTemplates) {
if !equalFound {
// Receiver has not such template
// Append Node from `from`
- templates.VolumeClaimTemplates = append(templates.VolumeClaimTemplates, *fromVolumeClaimTemplate.DeepCopy())
+ templates.VolumeClaimTemplates = append(templates.VolumeClaimTemplates, *fromTemplate.DeepCopy())
+ }
+ }
+ }
+
+ if len(from.ServiceTemplates) > 0 {
+ // We have templates to copy from
+ // Append ServiceTemplates from `from` to receiver
+ if templates.ServiceTemplates == nil {
+ templates.ServiceTemplates = make([]ChiServiceTemplate, 0)
+ }
+ // Loop over all 'from' templates and copy it in case no such template in receiver
+ for fromIndex := range from.ServiceTemplates {
+ fromTemplate := &from.ServiceTemplates[fromIndex]
+
+ // Try to find equal entry among local templates in receiver
+ equalFound := false
+ for toIndex := range templates.ServiceTemplates {
+ toTemplate := &templates.ServiceTemplates[toIndex]
+ if toTemplate.Name == fromTemplate.Name {
+ // Received already have such a node
+ equalFound = true
+ break
+ }
+ }
+
+ if !equalFound {
+ // Receiver has not such template
+ // Append Node from `from`
+ templates.ServiceTemplates = append(templates.ServiceTemplates, *fromTemplate.DeepCopy())
}
}
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go
index 4a0abd08e..54fd98a40 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/types.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/types.go
@@ -52,17 +52,31 @@ type ChiStatus struct {
type ChiDefaults struct {
ReplicasUseFQDN string `json:"replicasUseFQDN,omitempty" yaml:"replicasUseFQDN"`
DistributedDDL ChiDistributedDDL `json:"distributedDDL,omitempty" yaml:"distributedDDL"`
- Templates ChiTemplateNames `json:"templates" yaml:"templates"`
+ Templates ChiTemplateNames `json:"templates,omitempty" yaml:"templates"`
}
// ChiTemplateNames defines references to .spec.templates to be used on current level of cluster
type ChiTemplateNames struct {
- PodTemplate string `json:"podTemplate,omitempty" yaml:"podTemplate"`
+ PodTemplate string `json:"podTemplate,omitempty" yaml:"podTemplate"`
VolumeClaimTemplate string `json:"volumeClaimTemplate,omitempty" yaml:"volumeClaimTemplate"`
+ ServiceTemplate string `json:"serviceTemplate,omitempty" yaml:"serviceTemplate"`
+}
+
+func (templates *ChiTemplateNames) MergeFrom(from *ChiTemplateNames) {
+ if templates.PodTemplate == "" {
+ templates.PodTemplate = from.PodTemplate
+ }
+ if templates.VolumeClaimTemplate == "" {
+ templates.VolumeClaimTemplate = from.VolumeClaimTemplate
+ }
+ if templates.ServiceTemplate == "" {
+ templates.ServiceTemplate = from.ServiceTemplate
+ }
}
// ChiConfiguration defines configuration section of .spec
type ChiConfiguration struct {
+ Templates ChiTemplateNames `json:"templates" yaml:"templates"`
Zookeeper ChiZookeeperConfig `json:"zookeeper,omitempty" yaml:"zookeeper"`
Users map[string]interface{} `json:"users,omitempty" yaml:"users"`
Profiles map[string]interface{} `json:"profiles,omitempty" yaml:"profiles"`
@@ -78,7 +92,9 @@ type ChiCluster struct {
Layout ChiLayout `json:"layout"`
Templates ChiTemplateNames `json:"templates,omitempty"`
- Address ChiClusterAddress `json:"address"`
+ // Internal data
+ Address ChiClusterAddress `json:"address"`
+ Chi *ClickHouseInstallation `json:"-"`
}
// ChiClusterAddress defines address of a cluster within ClickHouseInstallation
@@ -92,10 +108,11 @@ type ChiClusterAddress struct {
// ChiLayout defines layout section of .spec.configuration.clusters
type ChiLayout struct {
// DEPRECATED - to be removed soon
- Type string `json:"type"`
- ShardsCount int `json:"shardsCount,omitempty"`
- ReplicasCount int `json:"replicasCount,omitempty"`
- Shards []ChiShard `json:"shards,omitempty"`
+ Type string `json:"type"`
+ ShardsCount int `json:"shardsCount,omitempty"`
+ ReplicasCount int `json:"replicasCount,omitempty"`
+ // TODO refactor into map[string]ChiShard
+ Shards []ChiShard `json:"shards,omitempty"`
}
// ChiShard defines item of a shard section of .spec.configuration.clusters[n].shards
@@ -107,9 +124,12 @@ type ChiShard struct {
InternalReplication string `json:"internalReplication,omitempty"`
Templates ChiTemplateNames `json:"templates,omitempty"`
ReplicasCount int `json:"replicasCount,omitempty"`
- Replicas []ChiReplica `json:"replicas,omitempty"`
+ // TODO refactor into map[string]ChiReplica
+ Replicas []ChiReplica `json:"replicas,omitempty"`
- Address ChiShardAddress `json:"address"`
+ // Internal data
+ Address ChiShardAddress `json:"address"`
+ Chi *ClickHouseInstallation `json:"-"`
}
// ChiShardAddress defines address of a shard within ClickHouseInstallation
@@ -128,8 +148,10 @@ type ChiReplica struct {
Port int32 `json:"port,omitempty"`
Templates ChiTemplateNames `json:"templates,omitempty"`
- Address ChiReplicaAddress `json:"address"`
- Config ChiReplicaConfig `json:"config"`
+ // Internal data
+ Address ChiReplicaAddress `json:"address"`
+ Config ChiReplicaConfig `json:"config"`
+ Chi *ClickHouseInstallation `json:"-"`
}
// ChiReplicaAddress defines address of a replica within ClickHouseInstallation
@@ -152,22 +174,59 @@ type ChiReplicaConfig struct {
// ChiTemplates defines templates section of .spec
type ChiTemplates struct {
- // TODO refactor into [string]ChiPodTemplate
- PodTemplates []ChiPodTemplate `json:"podTemplates,omitempty" yaml:"podTemplates"`
- // TODO refactor into [string]ChiVolumeClaimTemplate
+ // Templates
+ PodTemplates []ChiPodTemplate `json:"podTemplates,omitempty" yaml:"podTemplates"`
VolumeClaimTemplates []ChiVolumeClaimTemplate `json:"volumeClaimTemplates,omitempty" yaml:"volumeClaimTemplates"`
+ ServiceTemplates []ChiServiceTemplate `json:"serviceTemplates,omitempty" yaml:"serviceTemplates"`
+
+ // Index maps template name to template itself
+ PodTemplatesIndex map[string]*ChiPodTemplate
+ VolumeClaimTemplatesIndex map[string]*ChiVolumeClaimTemplate
+ ServiceTemplatesIndex map[string]*ChiServiceTemplate
}
// ChiPodTemplate defines full Pod Template, directly used by StatefulSet
type ChiPodTemplate struct {
- Name string `json:"name" yaml:"name"`
- Spec corev1.PodSpec `json:"spec" yaml:"spec"`
+ Name string `json:"name" yaml:"name"`
+ Zone ChiPodTemplateZone `json:"zone" yaml:"zone""`
+ Distribution string `json:"distribution" yaml:"distribution"`
+ Spec corev1.PodSpec `json:"spec" yaml:"spec"`
+}
+
+type ChiPodTemplateZone struct {
+ Key string `json:"key" yaml:"key"`
+ Values []string `json:"values" yaml:"values"`
}
// ChiVolumeClaimTemplate defines PersistentVolumeClaim Template, directly used by StatefulSet
type ChiVolumeClaimTemplate struct {
- Name string `json:"name" yaml:"name"`
- Spec corev1.PersistentVolumeClaimSpec `json:"spec" yaml:"spec"`
+ Name string `json:"name" yaml:"name"`
+ PVCReclaimPolicy PVCReclaimPolicy `json:"reclaimPolicy" yaml:"reclaimPolicy"`
+ Spec corev1.PersistentVolumeClaimSpec `json:"spec" yaml:"spec"`
+}
+
+type PVCReclaimPolicy string
+
+const (
+ PVCReclaimPolicyRetain PVCReclaimPolicy = "Retain"
+ PVCReclaimPolicyDelete PVCReclaimPolicy = "Delete"
+)
+
+// isValid checks whether PVCReclaimPolicy is valid
+func (v PVCReclaimPolicy) IsValid() bool {
+ switch v {
+ case PVCReclaimPolicyRetain:
+ return true
+ case PVCReclaimPolicyDelete:
+ return true
+ }
+ return false
+}
+
+type ChiServiceTemplate struct {
+ Name string `json:"name" yaml:"name"`
+ GenerateName string `json:"generateName" yaml:"generateName"`
+ Spec corev1.ServiceSpec `json:"spec" yaml:"spec"`
}
// ChiDistributedDDL defines distributedDDL section of .spec.defaults
diff --git a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
index 2a13f320c..b1e509c7d 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
@@ -30,6 +30,15 @@ func (in *ChiCluster) DeepCopyInto(out *ChiCluster) {
in.Layout.DeepCopyInto(&out.Layout)
out.Templates = in.Templates
out.Address = in.Address
+ if in.Chi != nil {
+ in, out := &in.Chi, &out.Chi
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(ClickHouseInstallation)
+ (*in).DeepCopyInto(*out)
+ }
+ }
return
}
@@ -62,6 +71,7 @@ func (in *ChiClusterAddress) DeepCopy() *ChiClusterAddress {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChiConfiguration) DeepCopyInto(out *ChiConfiguration) {
*out = *in
+ out.Templates = in.Templates
in.Zookeeper.DeepCopyInto(&out.Zookeeper)
if in.Users != nil {
in, out := &in.Users, &out.Users
@@ -187,6 +197,7 @@ func (in *ChiLayout) DeepCopy() *ChiLayout {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChiPodTemplate) DeepCopyInto(out *ChiPodTemplate) {
*out = *in
+ in.Zone.DeepCopyInto(&out.Zone)
in.Spec.DeepCopyInto(&out.Spec)
return
}
@@ -201,12 +212,42 @@ func (in *ChiPodTemplate) DeepCopy() *ChiPodTemplate {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChiPodTemplateZone) DeepCopyInto(out *ChiPodTemplateZone) {
+ *out = *in
+ if in.Values != nil {
+ in, out := &in.Values, &out.Values
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiPodTemplateZone.
+func (in *ChiPodTemplateZone) DeepCopy() *ChiPodTemplateZone {
+ if in == nil {
+ return nil
+ }
+ out := new(ChiPodTemplateZone)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChiReplica) DeepCopyInto(out *ChiReplica) {
*out = *in
out.Templates = in.Templates
out.Address = in.Address
out.Config = in.Config
+ if in.Chi != nil {
+ in, out := &in.Chi, &out.Chi
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(ClickHouseInstallation)
+ (*in).DeepCopyInto(*out)
+ }
+ }
return
}
@@ -252,6 +293,23 @@ func (in *ChiReplicaConfig) DeepCopy() *ChiReplicaConfig {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChiServiceTemplate) DeepCopyInto(out *ChiServiceTemplate) {
+ *out = *in
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiServiceTemplate.
+func (in *ChiServiceTemplate) DeepCopy() *ChiServiceTemplate {
+ if in == nil {
+ return nil
+ }
+ out := new(ChiServiceTemplate)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChiShard) DeepCopyInto(out *ChiShard) {
*out = *in
@@ -259,9 +317,20 @@ func (in *ChiShard) DeepCopyInto(out *ChiShard) {
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = make([]ChiReplica, len(*in))
- copy(*out, *in)
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
}
out.Address = in.Address
+ if in.Chi != nil {
+ in, out := &in.Chi, &out.Chi
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(ClickHouseInstallation)
+ (*in).DeepCopyInto(*out)
+ }
+ }
return
}
@@ -364,6 +433,49 @@ func (in *ChiTemplates) DeepCopyInto(out *ChiTemplates) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.ServiceTemplates != nil {
+ in, out := &in.ServiceTemplates, &out.ServiceTemplates
+ *out = make([]ChiServiceTemplate, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.PodTemplatesIndex != nil {
+ in, out := &in.PodTemplatesIndex, &out.PodTemplatesIndex
+ *out = make(map[string]*ChiPodTemplate, len(*in))
+ for key, val := range *in {
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ (*out)[key] = new(ChiPodTemplate)
+ val.DeepCopyInto((*out)[key])
+ }
+ }
+ }
+ if in.VolumeClaimTemplatesIndex != nil {
+ in, out := &in.VolumeClaimTemplatesIndex, &out.VolumeClaimTemplatesIndex
+ *out = make(map[string]*ChiVolumeClaimTemplate, len(*in))
+ for key, val := range *in {
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ (*out)[key] = new(ChiVolumeClaimTemplate)
+ val.DeepCopyInto((*out)[key])
+ }
+ }
+ }
+ if in.ServiceTemplatesIndex != nil {
+ in, out := &in.ServiceTemplatesIndex, &out.ServiceTemplatesIndex
+ *out = make(map[string]*ChiServiceTemplate, len(*in))
+ for key, val := range *in {
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ (*out)[key] = new(ChiServiceTemplate)
+ val.DeepCopyInto((*out)[key])
+ }
+ }
+ }
return
}
diff --git a/pkg/apis/metrics/fetcher.go b/pkg/apis/metrics/fetcher.go
index 8102846ba..8c463c640 100644
--- a/pkg/apis/metrics/fetcher.go
+++ b/pkg/apis/metrics/fetcher.go
@@ -22,26 +22,54 @@ import (
const (
queryMetricsSQL = `
- SELECT
- concat('metric.', metric) AS metric,
- toString(value) AS value,
- '' AS description,
- 'gauge' AS type
- FROM system.asynchronous_metrics
- UNION ALL
- SELECT
- concat('metric.', metric) AS metric,
- toString(value) AS value,
- description AS description,
- 'gauge' AS type
- FROM system.metrics
- UNION ALL
- SELECT
- concat('event.', event) AS metric,
- toString(value) AS value,
- description AS description,
- 'counter' AS type
- FROM system.events`
+ SELECT
+ concat('metric.', metric) AS metric,
+ toString(value) AS value,
+ '' AS description,
+ 'gauge' AS type
+ FROM system.asynchronous_metrics
+ UNION ALL
+ SELECT
+ concat('metric.', metric) AS metric,
+ toString(value) AS value,
+ description AS description,
+ 'gauge' AS type
+ FROM system.metrics
+ UNION ALL
+ SELECT
+ concat('event.', event) AS metric,
+ toString(value) AS value,
+ description AS description,
+ 'counter' AS type
+ FROM system.events
+ UNION ALL
+ SELECT
+ 'metric.DiskDataBytes' AS metric,
+ toString(sum(bytes_on_disk)) AS value,
+ 'Total data size for all ClickHouse tables' AS description,
+ 'gauge' AS type
+ FROM system.parts
+ UNION ALL
+ SELECT
+ 'metric.MemoryPrimaryKeyBytesAllocated' AS metric,
+ toString(sum(primary_key_bytes_in_memory_allocated)) AS value,
+ 'Memory size allocated for primary keys' AS description,
+ 'gauge' AS type
+ FROM system.parts
+ UNION ALL
+ SELECT
+ 'metric.MemoryDictionaryBytesAllocated' AS metric,
+ toString(sum(bytes_allocated)) AS value,
+ 'Memory size allocated for dictionaries' AS description,
+ 'gauge' AS type
+ FROM system.dictionaries
+ UNION ALL
+ SELECT
+ 'metric.DiskFreeBytes' AS metric,
+ toString(filesystemFree()) AS value,
+ 'Free disk space available at file system' AS description,
+ 'gauge' AS type
+ `
queryTableSizesSQL = `
SELECT
diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go
index 2782bde56..890e4171c 100644
--- a/pkg/controller/chi/controller.go
+++ b/pkg/controller/chi/controller.go
@@ -28,7 +28,6 @@ import (
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
appsinformers "k8s.io/client-go/informers/apps/v1"
@@ -452,7 +451,7 @@ func (c *Controller) onAddChi(chi *chop.ClickHouseInstallation) error {
}
c.eventChi(chi, eventTypeNormal, eventActionCreate, eventReasonCreateInProgress, fmt.Sprintf("onAddChi(%s/%s) create objects", chi.Namespace, chi.Name))
- err = c.reconcileChi(chi)
+ err = c.reconcile(chi)
if err != nil {
glog.V(1).Infof("ClickHouseInstallation (%q): unable to create controlled resources: %q", chi.Name, err)
c.eventChi(chi, eventTypeWarning, eventActionCreate, eventReasonCreateFailed, fmt.Sprintf("ClickHouseInstallation (%s): unable to create", chi.Name))
@@ -493,11 +492,11 @@ func (c *Controller) onUpdateChi(old, new *chop.ClickHouseInstallation) error {
return nil
}
- if !old.IsFilled() {
+ if !old.IsNormalized() {
old, _ = c.normalizer.CreateTemplatedChi(old)
}
- if !new.IsFilled() {
+ if !new.IsNormalized() {
new, _ = c.normalizer.CreateTemplatedChi(new)
}
@@ -512,6 +511,7 @@ func (c *Controller) onUpdateChi(old, new *chop.ClickHouseInstallation) error {
c.eventChi(old, eventTypeNormal, eventActionUpdate, eventReasonUpdateStarted, fmt.Sprintf("onUpdateChi(%s/%s):", old.Namespace, old.Name))
// Deal with removed items
+ // TODO refactor to map[string]object handling, instead of slice
for path := range diff.Removed {
switch diff.Removed[path].(type) {
case chop.ChiCluster:
@@ -532,7 +532,7 @@ func (c *Controller) onUpdateChi(old, new *chop.ClickHouseInstallation) error {
// Deal with added/updated items
// c.listStatefulSetResources(chi)
c.eventChi(old, eventTypeNormal, eventActionUpdate, eventReasonUpdateInProgress, fmt.Sprintf("onUpdateChi(%s/%s) update resources", old.Namespace, old.Name))
- if err := c.reconcileChi(new); err != nil {
+ if err := c.reconcile(new); err != nil {
glog.V(1).Infof("reconcileChi() FAILED: %v", err)
c.eventChi(old, eventTypeWarning, eventActionUpdate, eventReasonUpdateFailed, fmt.Sprintf("onUpdateChi(%s/%s) update resources failed", old.Namespace, old.Name))
} else {
@@ -559,6 +559,12 @@ func (c *Controller) onUpdateChi(old, new *chop.ClickHouseInstallation) error {
}
func (c *Controller) onDeleteChi(chi *chop.ClickHouseInstallation) error {
+ chi, err := c.normalizer.CreateTemplatedChi(chi)
+ if err != nil {
+ glog.V(1).Infof("ClickHouseInstallation (%q): unable to normalize: %q", chi.Name, err)
+ return err
+ }
+
c.eventChi(chi, eventTypeNormal, eventActionDelete, eventReasonDeleteStarted, fmt.Sprintf("onDeleteChi(%s/%s) started", chi.Namespace, chi.Name))
c.deleteChi(chi)
c.eventChi(chi, eventTypeNormal, eventActionDelete, eventReasonDeleteCompleted, fmt.Sprintf("onDeleteChi(%s/%s) completed", chi.Namespace, chi.Name))
@@ -632,21 +638,3 @@ func waitForCacheSync(name string, stopCh <-chan struct{}, cacheSyncs ...cache.I
glog.V(1).Infof("Caches are synced for %s controller", name)
return true
}
-
-// clusterWideSelector returns labels.Selector object
-func clusterWideSelector(name string) labels.Selector {
- return labels.SelectorFromSet(labels.Set{
- chopmodels.LabelChop: name,
- })
- /*
- glog.V(2).Infof("ClickHouseInstallation (%q) listing controlled resources", chi.Name)
- ssList, err := c.statefulSetLister.StatefulSets(chi.Namespace).List(clusterWideSelector(chi.Name))
- if err != nil {
- return err
- }
- // Listing controlled resources
- for i := range ssList {
- glog.V(2).Infof("ClickHouseInstallation (%q) controlls StatefulSet: %q", chi.Name, ssList[i].Name)
- }
- */
-}
diff --git a/pkg/controller/chi/creators.go b/pkg/controller/chi/creators.go
index 1bda5bab9..434f2e9f7 100644
--- a/pkg/controller/chi/creators.go
+++ b/pkg/controller/chi/creators.go
@@ -29,38 +29,21 @@ import (
)
// reconcileChi reconciles ClickHouseInstallation
-func (c *Controller) reconcileChi(chi *chop.ClickHouseInstallation) error {
- creator := chopmodel.NewCreator(chi, c.chopConfig, c.version)
- listOfObjectsLists := creator.CreateObjects()
-
- for i := range listOfObjectsLists {
- switch listOfObjectsLists[i].(type) {
- case chopmodel.ServiceList:
- for j := range listOfObjectsLists[i].(chopmodel.ServiceList) {
- if err := c.reconcileService(listOfObjectsLists[i].(chopmodel.ServiceList)[j]); err != nil {
- return err
- }
- }
- case chopmodel.ConfigMapList:
- for j := range listOfObjectsLists[i].(chopmodel.ConfigMapList) {
- if err := c.reconcileConfigMap(listOfObjectsLists[i].(chopmodel.ConfigMapList)[j]); err != nil {
- return err
- }
- }
- case chopmodel.StatefulSetList:
- for j := range listOfObjectsLists[i].(chopmodel.StatefulSetList) {
- if err := c.reconcileStatefulSet(listOfObjectsLists[i].(chopmodel.StatefulSetList)[j]); err != nil {
- return err
- }
- }
- }
- }
-
- return nil
+func (c *Controller) reconcile(chi *chop.ClickHouseInstallation) error {
+ reconciler := chopmodel.NewReconciler(
+ chi,
+ c.chopConfig,
+ c.version,
+ &chopmodel.ReconcileFuncs{
+ ReconcileConfigMap: c.ReconcileConfigMap,
+ ReconcileService: c.ReconcileService,
+ ReconcileStatefulSet: c.ReconcileStatefulSet,
+ })
+ return reconciler.Reconcile()
}
// reconcileConfigMap reconciles core.ConfigMap
-func (c *Controller) reconcileConfigMap(configMap *core.ConfigMap) error {
+func (c *Controller) ReconcileConfigMap(configMap *core.ConfigMap) error {
// Check whether object with such name already exists in k8s
curConfigMap, err := c.getConfigMap(&configMap.ObjectMeta)
@@ -89,7 +72,7 @@ func (c *Controller) reconcileConfigMap(configMap *core.ConfigMap) error {
}
// reconcileService reconciles core.Service
-func (c *Controller) reconcileService(service *core.Service) error {
+func (c *Controller) ReconcileService(service *core.Service) error {
// Check whether object with such name already exists in k8s
curService, err := c.getService(&service.ObjectMeta)
@@ -113,7 +96,7 @@ func (c *Controller) reconcileService(service *core.Service) error {
}
// reconcileStatefulSet reconciles apps.StatefulSet
-func (c *Controller) reconcileStatefulSet(newStatefulSet *apps.StatefulSet) error {
+func (c *Controller) ReconcileStatefulSet(newStatefulSet *apps.StatefulSet, replica *chop.ChiReplica) error {
// Check whether object with such name already exists in k8s
curStatefulSet, err := c.getStatefulSet(&newStatefulSet.ObjectMeta)
@@ -124,25 +107,24 @@ func (c *Controller) reconcileStatefulSet(newStatefulSet *apps.StatefulSet) erro
if apierrors.IsNotFound(err) {
// StatefulSet with such name not found - create StatefulSet
- return c.createStatefulSet(newStatefulSet)
+ return c.createStatefulSet(newStatefulSet, replica)
}
// Error has happened with .Get()
return err
}
-func (c *Controller) createStatefulSet(statefulSet *apps.StatefulSet) error {
+func (c *Controller) createStatefulSet(statefulSet *apps.StatefulSet, replica *chop.ChiReplica) error {
if statefulSet, err := c.kubeClient.AppsV1().StatefulSets(statefulSet.Namespace).Create(statefulSet); err != nil {
+ // Error call Create()
return err
} else if err := c.waitStatefulSetGeneration(statefulSet.Namespace, statefulSet.Name, statefulSet.Generation); err == nil {
// Target generation reached, StatefulSet created successfully
return nil
} else {
// Unable to reach target generation, StatefulSet create failed, time to rollback?
- return c.onStatefulSetCreateFailed(statefulSet)
+ return c.onStatefulSetCreateFailed(statefulSet, replica)
}
-
- return errors.New("createStatefulSet() - unknown position")
}
func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStatefulSet *apps.StatefulSet) error {
@@ -177,8 +159,6 @@ func (c *Controller) updateStatefulSet(oldStatefulSet *apps.StatefulSet, newStat
// Unable to reach target generation, StatefulSet update failed, time to rollback?
return c.onStatefulSetUpdateFailed(oldStatefulSet)
}
-
- return errors.New("updateStatefulSet() - unknown position")
}
// waitStatefulSetGeneration polls StatefulSet for reaching target generation
@@ -216,13 +196,11 @@ func (c *Controller) waitStatefulSetGeneration(namespace, name string, targetGen
return errors.New(fmt.Sprintf("waitStatefulSetGeneration(%s/%s) - wait timeout", namespace, name))
}
}
-
- return errors.New(fmt.Sprintf("waitStatefulSetGeneration(%s/%s) - unknown position", namespace, name))
}
// onStatefulSetCreateFailed handles situation when StatefulSet create failed
// It can just delete failed StatefulSet or do nothing
-func (c *Controller) onStatefulSetCreateFailed(failedStatefulSet *apps.StatefulSet) error {
+func (c *Controller) onStatefulSetCreateFailed(failedStatefulSet *apps.StatefulSet, replica *chop.ChiReplica) error {
// Convenience shortcuts
namespace := failedStatefulSet.Namespace
name := failedStatefulSet.Name
@@ -237,14 +215,12 @@ func (c *Controller) onStatefulSetCreateFailed(failedStatefulSet *apps.StatefulS
case config.OnStatefulSetCreateFailureActionDelete:
// Delete gracefully problematic failed StatefulSet
glog.V(1).Infof("onStatefulSetCreateFailed(%s/%s) - going to DELETE FAILED StatefulSet", namespace, name)
- c.statefulSetDelete(namespace, name)
+ _ = c.deleteReplica(replica)
return c.shouldContinueOnCreateFailed()
default:
glog.V(1).Infof("Unknown c.chopConfig.OnStatefulSetCreateFailureAction=%s", c.chopConfig.OnStatefulSetCreateFailureAction)
return nil
}
-
- return errors.New(fmt.Sprintf("onStatefulSetCreateFailed(%s/%s) - unknown position", namespace, name))
}
// onStatefulSetUpdateFailed handles situation when StatefulSet update failed
@@ -283,8 +259,6 @@ func (c *Controller) onStatefulSetUpdateFailed(rollbackStatefulSet *apps.Statefu
glog.V(1).Infof("Unknown c.chopConfig.OnStatefulSetUpdateFailureAction=%s", c.chopConfig.OnStatefulSetUpdateFailureAction)
return nil
}
-
- return errors.New(fmt.Sprintf("onStatefulSetUpdateFailed(%s/%s) - unknown position", namespace, name))
}
// shouldContinueOnCreateFailed return nil in case 'continue' or error in case 'do not continue'
@@ -298,7 +272,7 @@ func (c *Controller) shouldContinueOnCreateFailed() error {
}
// Do not continue update
- return errors.New(fmt.Sprintf("Create stopped due to previous errors"))
+ return fmt.Errorf("create stopped due to previous errors")
}
// shouldContinueOnUpdateFailed return nil in case 'continue' or error in case 'do not continue'
@@ -312,7 +286,7 @@ func (c *Controller) shouldContinueOnUpdateFailed() error {
}
// Do not continue update
- return errors.New(fmt.Sprintf("Update stopped due to previous errors"))
+ return fmt.Errorf("update stopped due to previous errors")
}
// hasStatefulSetReachedGeneration returns whether has StatefulSet reached the expected generation after upgrade or not
diff --git a/pkg/controller/chi/deleters.go b/pkg/controller/chi/deleters.go
index 0136bc69e..b41b4d935 100644
--- a/pkg/controller/chi/deleters.go
+++ b/pkg/controller/chi/deleters.go
@@ -15,10 +15,9 @@
package chi
import (
- "github.com/golang/glog"
-
chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
chopmodel "github.com/altinity/clickhouse-operator/pkg/model"
+ "github.com/golang/glog"
apps "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -34,11 +33,13 @@ func newDeleteOptions() *metav1.DeleteOptions {
}
// deleteTablesOnReplica deletes ClickHouse tables on replica before replica is deleted
-func (c *Controller) deleteTablesOnReplica(replica *chop.ChiReplica) {
+func (c *Controller) deleteTablesOnReplica(replica *chop.ChiReplica) error {
// Delete tables on replica
tableNames, dropTableSQLs, _ := c.schemer.ReplicaGetDropTables(replica)
glog.V(1).Infof("Drop tables: %v as %v", tableNames, dropTableSQLs)
_ = c.schemer.ReplicaApplySQLs(replica, dropTableSQLs, true)
+
+ return nil
}
// deleteReplica deletes all kubernetes resources related to replica *chop.ChiReplica
@@ -46,33 +47,17 @@ func (c *Controller) deleteReplica(replica *chop.ChiReplica) error {
// Each replica consists of
// 1. Tables on replica - we need to delete tables on replica in order to clean Zookeeper data
// 2. StatefulSet
- // 3. ConfigMap
- // 4. Service
+ // 3. PersistentVolumeClaim
+ // 4. ConfigMap
+ // 5. Service
// Need to delete all these item
+ glog.V(1).Infof("Start delete replica %s/%s", replica.Address.ClusterName, replica.Name)
- c.deleteTablesOnReplica(replica)
-
- namespace := replica.Address.Namespace
-
- // Delete StatefulSet
- statefulSetName := chopmodel.CreateStatefulSetName(replica)
- c.statefulSetDelete(namespace, statefulSetName)
-
- // Delete ConfigMap
- configMapName := chopmodel.CreateConfigMapPodName(replica)
- if err := c.kubeClient.CoreV1().ConfigMaps(namespace).Delete(configMapName, newDeleteOptions()); err == nil {
- glog.V(1).Infof("ConfigMap %s/%s deleted", namespace, configMapName)
- } else {
- glog.V(1).Infof("ConfigMap %s/%s delete FAILED %v", namespace, configMapName, err)
- }
-
- // Delete Service
- statefulSetServiceName := chopmodel.CreateStatefulSetServiceName(replica)
- if err := c.kubeClient.CoreV1().Services(namespace).Delete(statefulSetServiceName, newDeleteOptions()); err == nil {
- glog.V(1).Infof("Service %s/%s deleted", namespace, statefulSetServiceName)
- } else {
- glog.V(1).Infof("Service %s/%s delete FAILED %v", namespace, statefulSetServiceName, err)
- }
+ _ = c.deleteTablesOnReplica(replica)
+ _ = c.statefulSetDelete(replica)
+ _ = c.persistentVolumeClaimDelete(replica)
+ _ = c.configMapDelete(replica)
+ _ = c.serviceDelete(replica)
return nil
}
@@ -84,11 +69,13 @@ func (c *Controller) deleteShard(shard *chop.ChiShard) {
// deleteCluster deletes all kubernetes resources related to cluster *chop.ChiCluster
func (c *Controller) deleteCluster(cluster *chop.ChiCluster) {
+ glog.V(1).Infof("Start delete cluster %s", cluster.Name)
cluster.WalkReplicas(c.deleteReplica)
}
// deleteChi deletes all kubernetes resources related to chi *chop.ClickHouseInstallation
func (c *Controller) deleteChi(chi *chop.ClickHouseInstallation) {
+ // Delete all clusters
chi.WalkClusters(func(cluster *chop.ChiCluster) error {
c.deleteCluster(cluster)
return nil
@@ -137,12 +124,18 @@ func (c *Controller) statefulSetDeletePod(statefulSet *apps.StatefulSet) error {
}
// statefulSetDelete gracefully deletes StatefulSet through zeroing Pod's count
-func (c *Controller) statefulSetDelete(namespace, name string) error {
+func (c *Controller) statefulSetDelete(replica *chop.ChiReplica) error {
// IMPORTANT
// StatefulSets do not provide any guarantees on the termination of pods when a StatefulSet is deleted.
// To achieve ordered and graceful termination of the pods in the StatefulSet,
// it is possible to scale the StatefulSet down to 0 prior to deletion.
+ // Namespaced name
+ name := chopmodel.CreateStatefulSetName(replica)
+ namespace := replica.Address.Namespace
+
+ glog.V(1).Infof("statefulSetDelete(%s/%s)", namespace, name)
+
statefulSet, err := c.statefulSetLister.StatefulSets(namespace).Get(name)
if err != nil {
glog.V(1).Infof("error get StatefulSet %s/%s", namespace, name)
@@ -164,3 +157,62 @@ func (c *Controller) statefulSetDelete(namespace, name string) error {
return nil
}
+
+// persistentVolumeClaimDelete deletes PersistentVolumeClaim
+func (c *Controller) persistentVolumeClaimDelete(replica *chop.ChiReplica) error {
+
+ if !chopmodel.ReplicaCanDeletePVC(replica) {
+ glog.V(1).Infof("PVC should not be deleted, leave them intact")
+ return nil
+ }
+
+ namespace := replica.Address.Namespace
+ labeler := chopmodel.NewLabeler(c.version, replica.Chi)
+ listOptions := newListOptions(labeler.GetSelectorReplicaScope(replica))
+ if list, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).List(listOptions); err == nil {
+ glog.V(1).Infof("OK get list of PVC for replica %s/%s", namespace, replica.Name)
+ for i := range list.Items {
+ pvc := &list.Items[i]
+ if err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Delete(pvc.Name, newDeleteOptions()); err == nil {
+ glog.V(1).Infof("OK delete PVC %s/%s", namespace, pvc.Name)
+ } else {
+ glog.V(1).Infof("FAIL delete PVC %s/%s %v", namespace, pvc.Name, err)
+ }
+ }
+ } else {
+ glog.V(1).Infof("FAIL get list of PVC for replica %s/%s %v", namespace, replica.Name, err)
+ }
+
+ return nil
+}
+
+// configMapDelete deletes ConfigMap
+func (c *Controller) configMapDelete(replica *chop.ChiReplica) error {
+ name := chopmodel.CreateConfigMapPodName(replica)
+ namespace := replica.Address.Namespace
+
+ glog.V(1).Infof("configMapDelete(%s/%s)", namespace, name)
+
+ if err := c.kubeClient.CoreV1().ConfigMaps(namespace).Delete(name, newDeleteOptions()); err == nil {
+ glog.V(1).Infof("ConfigMap %s/%s deleted", namespace, name)
+ } else {
+ glog.V(1).Infof("ConfigMap %s/%s delete FAILED %v", namespace, name, err)
+ }
+
+ return nil
+}
+
+// serviceDelete deletes Service
+func (c *Controller) serviceDelete(replica *chop.ChiReplica) error {
+ name := chopmodel.CreateStatefulSetServiceName(replica)
+ namespace := replica.Address.Namespace
+
+ glog.V(1).Infof("serviceDelete(%s/%s)", namespace, name)
+
+ if err := c.kubeClient.CoreV1().Services(namespace).Delete(name, newDeleteOptions()); err == nil {
+ glog.V(1).Infof("Service %s/%s deleted", namespace, name)
+ } else {
+ glog.V(1).Infof("Service %s/%s delete FAILED %v", namespace, name, err)
+ }
+ return nil
+}
diff --git a/pkg/controller/chi/getters.go b/pkg/controller/chi/getters.go
index 7def88cb5..631a3d2bc 100644
--- a/pkg/controller/chi/getters.go
+++ b/pkg/controller/chi/getters.go
@@ -23,7 +23,7 @@ import (
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
- chi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
chopmodel "github.com/altinity/clickhouse-operator/pkg/model"
)
@@ -101,16 +101,20 @@ func (c *Controller) getStatefulSet(obj *meta.ObjectMeta) (*apps.StatefulSet, er
if apierrors.IsNotFound(err) {
// Object with such name not found
// Try to find by labels
- if set, err := chopmodel.GetSelectorReplicaFromObjectMeta(obj); err == nil {
+ if set, err := chopmodel.GetSelectorReplicaFromObjectMeta(obj); err != nil {
+ return nil, err
+ } else {
selector := labels.SelectorFromSet(set)
-
- objects, err := c.statefulSetLister.StatefulSets(obj.Namespace).List(selector)
- if err != nil {
+ if objects, err := c.statefulSetLister.StatefulSets(obj.Namespace).List(selector); err != nil {
return nil, err
- }
- if len(objects) == 1 {
+ } else if len(objects) == 1 {
// Object found by labels
return objects[0], nil
+ } else if len(objects) > 1 {
+ // Object found by labels
+ return nil, fmt.Errorf("ERROR too much objects returned by selector")
+ } else {
+ // Zero? Fall through and return IsNotFound() error
}
}
}
@@ -119,26 +123,10 @@ func (c *Controller) getStatefulSet(obj *meta.ObjectMeta) (*apps.StatefulSet, er
return nil, err
}
-// TODO move labels into models modules
-func (c *Controller) createChiFromObjectMeta(objectMeta *meta.ObjectMeta) (*chi.ClickHouseInstallation, error) {
- // Parse Labels
- // Labels: map[string]string{
- // labelChop: AppVersion,
- // LabelChi: replica.Address.ChiName,
- // LabelCluster: replica.Address.ClusterName,
- // LabelClusterIndex: strconv.Itoa(replica.Address.ClusterIndex),
- // LabelReplicaIndex: strconv.Itoa(replica.Address.ReplicaIndex),
- // },
-
- // ObjectMeta must have some labels
- if len(objectMeta.Labels) == 0 {
- return nil, fmt.Errorf("ObjectMeta %s does not have labels", objectMeta.Name)
- }
-
- // ObjectMeta must have LabelChi: chi.Name label
- chiName, ok := objectMeta.Labels[chopmodel.LabelChi]
- if !ok {
- return nil, fmt.Errorf("ObjectMeta %s does not generated by CHI", objectMeta.Name)
+func (c *Controller) createChiFromObjectMeta(objectMeta *meta.ObjectMeta) (*chiv1.ClickHouseInstallation, error) {
+ chiName, err := chopmodel.GetChiNameFromObjectMeta(objectMeta)
+ if err != nil {
+ return nil, fmt.Errorf("ObjectMeta %s does not generated by CHI %v", objectMeta.Name, err)
}
chi, err := c.chiLister.ClickHouseInstallations(objectMeta.Namespace).Get(chiName)
@@ -154,26 +142,10 @@ func (c *Controller) createChiFromObjectMeta(objectMeta *meta.ObjectMeta) (*chi.
return chi, nil
}
-// TODO move labels into models modules
-func (c *Controller) createClusterFromObjectMeta(objectMeta *meta.ObjectMeta) (*chi.ChiCluster, error) {
- // Parse Labels
- // Labels: map[string]string{
- // labelChop: AppVersion,
- // LabelChi: replica.Address.ChiName,
- // LabelCluster: replica.Address.ClusterName,
- // LabelClusterIndex: strconv.Itoa(replica.Address.ClusterIndex),
- // LabelReplicaIndex: strconv.Itoa(replica.Address.ReplicaIndex),
- // },
-
- // ObjectMeta must have some labels
- if len(objectMeta.Labels) == 0 {
- return nil, fmt.Errorf("ObjectMeta %s does not have labels", objectMeta.Name)
- }
-
- // ObjectMeta must have LabelCluster
- clusterName, ok := objectMeta.Labels[chopmodel.LabelCluster]
- if !ok {
- return nil, fmt.Errorf("ObjectMeta %s does not generated by CHI", objectMeta.Name)
+func (c *Controller) createClusterFromObjectMeta(objectMeta *meta.ObjectMeta) (*chiv1.ChiCluster, error) {
+ clusterName, err := chopmodel.GetClusterNameFromObjectMeta(objectMeta)
+ if err != nil {
+ return nil, fmt.Errorf("ObjectMeta %s does not generated by CHI %v", objectMeta.Name, err)
}
chi, err := c.createChiFromObjectMeta(objectMeta)
@@ -183,7 +155,7 @@ func (c *Controller) createClusterFromObjectMeta(objectMeta *meta.ObjectMeta) (*
cluster := chi.FindCluster(clusterName)
if cluster == nil {
- return nil, fmt.Errorf("Can't find cluster %s in CHI %s", clusterName, chi.Name)
+ return nil, fmt.Errorf("can't find cluster %s in CHI %s", clusterName, chi.Name)
}
return cluster, nil
diff --git a/pkg/controller/chi/labeler.go b/pkg/controller/chi/labeler.go
index 86bffaaee..44d5f8f2f 100644
--- a/pkg/controller/chi/labeler.go
+++ b/pkg/controller/chi/labeler.go
@@ -51,21 +51,21 @@ func (c *Controller) labelMyObjectsTree() {
namespace, ok2 := c.runtimeParams["OPERATOR_POD_NAMESPACE"]
if !ok1 || !ok2 {
- glog.V(1).Info("ERROR fetch Pod name out of %s/%s", namespace, podName)
+ glog.V(1).Infof("ERROR fetch Pod name out of %s/%s", namespace, podName)
return
}
// Pod namespaced name found, fetch it
pod, err := c.podLister.Pods(namespace).Get(podName)
if err != nil {
- glog.V(1).Info("ERROR get Pod %s/%s", namespace, podName)
+ glog.V(1).Infof("ERROR get Pod %s/%s", namespace, podName)
return
}
// Put label on the Pod
pod.Labels["version"] = c.version
if _, err := c.kubeClient.CoreV1().Pods(namespace).Update(pod); err != nil {
- glog.V(1).Info("ERROR put label on Pod %s/%s", namespace, podName)
+ glog.V(1).Infof("ERROR put label on Pod %s/%s %v", namespace, podName, err)
}
// Find parent ReplicaSet
@@ -81,21 +81,21 @@ func (c *Controller) labelMyObjectsTree() {
if replicaSetName == "" {
// ReplicaSet not found
- glog.V(1).Info("ERROR ReplicaSet for Pod %s/%s not found", namespace, podName)
+ glog.V(1).Infof("ERROR ReplicaSet for Pod %s/%s not found", namespace, podName)
return
}
// ReplicaSet namespaced name found, fetch it
replicaSet, err := c.kubeClient.AppsV1().ReplicaSets(namespace).Get(replicaSetName, v1.GetOptions{})
if err != nil {
- glog.V(1).Info("ERROR get ReplicaSet %s/%s", namespace, replicaSetName)
+ glog.V(1).Infof("ERROR get ReplicaSet %s/%s %v", namespace, replicaSetName, err)
return
}
// Put label on the ReplicaSet
replicaSet.Labels["version"] = c.version
if _, err := c.kubeClient.AppsV1().ReplicaSets(namespace).Update(replicaSet); err != nil {
- glog.V(1).Info("ERROR put label on ReplicaSet %s/%s", namespace, replicaSetName)
+ glog.V(1).Infof("ERROR put label on ReplicaSet %s/%s %v", namespace, replicaSetName, err)
}
// Find parent Deployment
@@ -111,20 +111,20 @@ func (c *Controller) labelMyObjectsTree() {
if deploymentName == "" {
// Deployment not found
- glog.V(1).Info("ERROR Deployment for %s Pod %s ReplicaSet %s not found", namespace, podName, replicaSetName)
+ glog.V(1).Infof("ERROR Deployment for %s Pod %s ReplicaSet %s not found", namespace, podName, replicaSetName)
return
}
// Deployment namespaced name found, fetch it
deployment, err := c.kubeClient.AppsV1().Deployments(namespace).Get(deploymentName, v1.GetOptions{})
if err != nil {
- glog.V(1).Info("ERROR get Deployment %s/%s", namespace, deploymentName)
+ glog.V(1).Infof("ERROR get Deployment %s/%s", namespace, deploymentName)
return
}
// Put label on the Deployment
deployment.Labels["version"] = c.version
if _, err := c.kubeClient.AppsV1().Deployments(namespace).Update(deployment); err != nil {
- glog.V(1).Info("ERROR put label on Deployment %s/%s", namespace, deploymentName)
+ glog.V(1).Infof("ERROR put label on Deployment %s/%s %v", namespace, deploymentName, err)
}
}
diff --git a/pkg/controller/chi/lister.go b/pkg/controller/chi/lister.go
new file mode 100644
index 000000000..556575b47
--- /dev/null
+++ b/pkg/controller/chi/lister.go
@@ -0,0 +1,28 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "k8s.io/apimachinery/pkg/labels"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func newListOptions(labelsMap map[string]string) metav1.ListOptions {
+ labelSelector := labels.SelectorFromSet(labelsMap)
+ return metav1.ListOptions{
+ LabelSelector: labelSelector.String(),
+ }
+}
diff --git a/pkg/model/ch_config.go b/pkg/model/ch_config.go
index 78db502c1..9d4751d63 100644
--- a/pkg/model/ch_config.go
+++ b/pkg/model/ch_config.go
@@ -23,6 +23,8 @@ import (
)
const (
+ distributedDDLPathPattern = "/clickhouse/%s/task_queue/ddl"
+
// Special auto-generated clusters. Each of these clusters lay over all replicas in CHI
// 1. Cluster with one shard and all replicas. Used to duplicate data over all replicas.
// 2. Cluster with all shards (1 replica). Used to gather/scatter data over all replicas.
@@ -284,6 +286,11 @@ func (c *ClickHouseConfigGenerator) GetHostMacros(replica *chiv1.ChiReplica) str
// 0-based shard index within all-shards-one-replica-cluster would always be GlobalReplicaIndex
cline(b, 8, "<%s-shard>%d%[1]s-shard>", allShardsOneReplicaClusterName, replica.Address.GlobalReplicaIndex)
+ // and macros are applicable to main cluster only. All aux clusters do not have ambiguous macros
+ // macro
+ cline(b, 8, "%s", replica.Address.ClusterName)
+ // macro
+ cline(b, 8, "%s", replica.Address.ShardName)
// replica id = full deployment id
// full deployment id is unique to identify replica within the cluster
cline(b, 8, "%s", CreatePodHostname(replica))
@@ -329,7 +336,7 @@ func (c *ClickHouseConfigGenerator) getRemoteServersReplicaHostname(replica *chi
// In case .Spec.Defaults.ReplicasUseFQDN is set replicas would use FQDN pod hostname,
// otherwise hostname+service name (unique within namespace) would be used
// .my-dev-namespace.svc.cluster.local
- return CreatePodHostname(replica) + "." + CreateNamespaceDomainName(replica.Address.Namespace)
+ return CreatePodFQDN(replica)
} else {
return CreatePodHostname(replica)
}
diff --git a/pkg/model/ch_config_sections.go b/pkg/model/ch_config_sections.go
new file mode 100644
index 000000000..2507afe95
--- /dev/null
+++ b/pkg/model/ch_config_sections.go
@@ -0,0 +1,82 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/config"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+type configSections struct {
+ // commonConfigSections maps section name to section XML chopConfig
+ commonConfigSections map[string]string
+ // commonUsersConfigSections maps section name to section XML chopConfig
+ commonUsersConfigSections map[string]string
+
+ // ClickHouse config generator
+ chConfigGenerator *ClickHouseConfigGenerator
+ // clickhouse-operator configuration
+ chopConfig *config.Config
+}
+
+func NewConfigSections(chConfigGenerator *ClickHouseConfigGenerator, chopConfig *config.Config) *configSections {
+ return &configSections{
+ commonConfigSections: make(map[string]string),
+ commonUsersConfigSections: make(map[string]string),
+ chConfigGenerator: chConfigGenerator,
+ chopConfig: chopConfig,
+ }
+}
+
+func (c *configSections) CreateConfigsCommon() {
+ // commonConfigSections maps section name to section XML chopConfig of the following sections:
+ // 1. remote servers
+ // 2. zookeeper
+ // 3. settings
+ util.IncludeNonEmpty(c.commonConfigSections, filenameRemoteServersXML, c.chConfigGenerator.GetRemoteServers())
+ util.IncludeNonEmpty(c.commonConfigSections, filenameZookeeperXML, c.chConfigGenerator.GetZookeeper())
+ util.IncludeNonEmpty(c.commonConfigSections, filenameSettingsXML, c.chConfigGenerator.GetSettings())
+ // Extra user-specified configs
+ for filename, content := range c.chopConfig.ChCommonConfigs {
+ util.IncludeNonEmpty(c.commonConfigSections, filename, content)
+ }
+}
+
+func (c *configSections) CreateConfigsUsers() {
+ // commonConfigSections maps section name to section XML chopConfig of the following sections:
+ // 1. users
+ // 2. quotas
+ // 3. profiles
+ util.IncludeNonEmpty(c.commonUsersConfigSections, filenameUsersXML, c.chConfigGenerator.GetUsers())
+ util.IncludeNonEmpty(c.commonUsersConfigSections, filenameQuotasXML, c.chConfigGenerator.GetQuotas())
+ util.IncludeNonEmpty(c.commonUsersConfigSections, filenameProfilesXML, c.chConfigGenerator.GetProfiles())
+ // Extra user-specified configs
+ for filename, content := range c.chopConfig.ChUsersConfigs {
+ util.IncludeNonEmpty(c.commonUsersConfigSections, filename, content)
+ }
+}
+
+func (c *configSections) CreateConfigsPod(replica *v1.ChiReplica) map[string]string {
+ // Prepare for this replica deployment chopConfig files map as filename->content
+ podConfigSections := make(map[string]string)
+ util.IncludeNonEmpty(podConfigSections, filenameMacrosXML, c.chConfigGenerator.GetHostMacros(replica))
+ // Extra user-specified configs
+ for filename, content := range c.chopConfig.ChPodConfigs {
+ util.IncludeNonEmpty(podConfigSections, filename, content)
+ }
+
+ return podConfigSections
+}
diff --git a/pkg/model/const.go b/pkg/model/const.go
index 68ce40ae6..264f6334f 100644
--- a/pkg/model/const.go
+++ b/pkg/model/const.go
@@ -86,61 +86,6 @@ const (
dirPathClickHouseData = "/var/lib/clickhouse"
)
-const (
- // NAME READY AGE CONTAINERS IMAGES
- // statefulset.apps/ss-1eb454-1 0/1 2s ss-1eb454-1 yandex/clickhouse-server:latest
- statefulSetNamePattern = "chi-%s-%s-%s-%s"
-
- // NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
- // service/svc-1eb454-1 ClusterIP None 9000/TCP,9009/TCP,8123/TCP 2s clickhouse.altinity.com/app=ss-1eb454-1
- // service/svc-1eb454-2 ClusterIP None 9000/TCP,9009/TCP,8123/TCP 2s clickhouse.altinity.com/app=ss-1eb454-2
- // In this pattern "%s" is substituted with fullDeploymentIDPattern-generated value
- // Ex.: svc-1eb454-2
- statefulSetServiceNamePattern = "chi-%s-%s-%s-%s"
-
- // namespaceDomainPattern presents Domain Name pattern of a namespace
- // In this pattern "%s" is substituted namespace name's value
- // Ex.: my-dev-namespace.svc.cluster.local
- namespaceDomainPattern = "%s.svc.cluster.local"
-
- // NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
- // service/clickhouse-replcluster ClusterIP None 9000/TCP,9009/TCP,8123/TCP 1h
- // In this pattern "%s" is substituted with clickhouse installation name - 'replcluster' in this case
- // Ex.: test
- chiServiceNamePattern = "clickhouse-%s"
-
- // ServiceName.domain.name
- chiServiceFQDNPattern = "%s" + "." + namespaceDomainPattern
-
- // podFQDNPattern consists of 3 parts:
- // 1. nameless service of of stateful set
- // 2. namespace name
- // Hostname.domain.name
- podFQDNPattern = "%s" + "." + namespaceDomainPattern
-
- // podNamePattern is a name of a Pod as ServiceName-0
- podNamePattern = "%s-0"
-
- // NAME DATA AGE
- // chi-example-01-common-configd 2 2s
- // chi-example-01-common-usersd 0 2s
- // chi-example-01-deploy-confd-4a8ff63336-0 1 1s
-
- // configMapCommonNamePattern is a template of common settings for the CHI ConfigMap
- // Ex.: chi-example02-common-configd for chi named as 'example02'
- configMapCommonNamePattern = "chi-%s-common-configd"
-
- // configMapCommonUsersNamePattern is a template of common users settings for the CHI ConfigMap
- // Ex.: chi-example02-common-usersd for chi named as 'example02'
- configMapCommonUsersNamePattern = "chi-%s-common-usersd"
-
- // configMapDeploymentNamePattern is a template of macros ConfigMap
- // Ex.: chi-example02-deploy-confd-33260f1800-2 for chi named as 'example02'
- configMapDeploymentNamePattern = "chi-%s-deploy-confd-%s-%s-%s"
-
- distributedDDLPathPattern = "/clickhouse/%s/task_queue/ddl"
-)
-
const (
// Default docker image to be used
defaultClickHouseDockerImage = "yandex/clickhouse-server:latest"
@@ -163,3 +108,8 @@ const (
// Default value for ClusterIP service
templateDefaultsServiceClusterIP = "None"
)
+
+const (
+ podDistributionOnePerHost = "OnePerHost"
+ podDistributionUnspecified = "Unspecified"
+)
diff --git a/pkg/model/creator.go b/pkg/model/creator.go
index 8c1640032..38a4e80f3 100644
--- a/pkg/model/creator.go
+++ b/pkg/model/creator.go
@@ -16,292 +16,188 @@ package model
import (
chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/config"
"github.com/altinity/clickhouse-operator/pkg/util"
- "github.com/golang/glog"
+ "k8s.io/apimachinery/pkg/util/intstr"
+
apps "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/util/intstr"
-)
-
-// Creator is the base struct to create k8s objects
-type Creator struct {
- appVersion string
- chi *chiv1.ClickHouseInstallation
- chopConfig *config.Config
- chConfigGenerator *ClickHouseConfigGenerator
-
- podTemplatesIndex podTemplatesIndex
- volumeClaimTemplatesIndex volumeClaimTemplatesIndex
-}
-
-// NewCreator creates new creator
-func NewCreator(chi *chiv1.ClickHouseInstallation, chopConfig *config.Config, appVersion string) *Creator {
- creator := &Creator{
- chi: chi,
- chopConfig: chopConfig,
- appVersion: appVersion,
- chConfigGenerator: NewClickHouseConfigGenerator(chi),
- }
- creator.createPodTemplatesIndex()
- creator.createVolumeClaimTemplatesIndex()
-
- return creator
-}
-
-// ChiCreateObjects returns a map of the k8s objects created based on ClickHouseInstallation Object properties
-func (c *Creator) CreateObjects() []interface{} {
- list := make([]interface{}, 0)
- list = append(list, c.createServiceObjects())
- list = append(list, c.createConfigMapObjects())
- list = append(list, c.createStatefulSetObjects())
-
- return list
-}
-
-// createConfigMapObjects returns a list of corev1.ConfigMap objects
-func (c *Creator) createConfigMapObjects() ConfigMapList {
- configMapList := make(ConfigMapList, 0)
- configMapList = append(
- configMapList,
- c.createConfigMapObjectsCommon()...,
- )
- configMapList = append(
- configMapList,
- c.createConfigMapObjectsPod()...,
- )
-
- return configMapList
-}
-
-// createConfigMapObjectsCommon returns a list of corev1.ConfigMap objects
-func (c *Creator) createConfigMapObjectsCommon() ConfigMapList {
- var configs configSections
-
- // commonConfigSections maps section name to section XML chopConfig of the following sections:
- // 1. remote servers
- // 2. zookeeper
- // 3. settings
- configs.commonConfigSections = make(map[string]string)
- util.IncludeNonEmpty(configs.commonConfigSections, filenameRemoteServersXML, c.chConfigGenerator.GetRemoteServers())
- util.IncludeNonEmpty(configs.commonConfigSections, filenameZookeeperXML, c.chConfigGenerator.GetZookeeper())
- util.IncludeNonEmpty(configs.commonConfigSections, filenameSettingsXML, c.chConfigGenerator.GetSettings())
- // Extra user-specified configs
- for filename, content := range c.chopConfig.ChCommonConfigs {
- util.IncludeNonEmpty(configs.commonConfigSections, filename, content)
- }
-
- // commonConfigSections maps section name to section XML chopConfig of the following sections:
- // 1. users
- // 2. quotas
- // 3. profiles
- configs.commonUsersConfigSections = make(map[string]string)
- util.IncludeNonEmpty(configs.commonUsersConfigSections, filenameUsersXML, c.chConfigGenerator.GetUsers())
- util.IncludeNonEmpty(configs.commonUsersConfigSections, filenameQuotasXML, c.chConfigGenerator.GetQuotas())
- util.IncludeNonEmpty(configs.commonUsersConfigSections, filenameProfilesXML, c.chConfigGenerator.GetProfiles())
- // Extra user-specified configs
- for filename, content := range c.chopConfig.ChUsersConfigs {
- util.IncludeNonEmpty(configs.commonUsersConfigSections, filename, content)
- }
- // There are two types of configs, kept in ConfigMaps:
- // 1. Common configs - for all resources in the CHI (remote servers, zookeeper setup, etc)
- // consists of common configs and common users configs
- // 2. Personal configs - macros chopConfig
- // configMapList contains all configs so we need deploymentsNum+2 ConfigMap objects
- // personal chopConfig for each deployment and +2 for common chopConfig + common user chopConfig
- configMapList := make(ConfigMapList, 0)
-
- // ConfigMap common for all resources in CHI
- // contains several sections, mapped as separated chopConfig files,
- // such as remote servers, zookeeper setup, etc
- configMapList = append(
- configMapList,
- &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: CreateConfigMapCommonName(c.chi),
- Namespace: c.chi.Namespace,
- Labels: c.getLabelsCommonObject(),
- },
- // Data contains several sections which are to be several xml chopConfig files
- Data: configs.commonConfigSections,
- },
- )
+ "github.com/golang/glog"
+)
- // ConfigMap common for all users resources in CHI
- configMapList = append(
- configMapList,
- &corev1.ConfigMap{
+// createServiceChi creates new corev1.Service
+func (r *Reconciler) createServiceChi(chi *chiv1.ClickHouseInstallation) *corev1.Service {
+ serviceName := CreateChiServiceName(chi)
+
+ glog.V(1).Infof("createServiceChi(%s/%s)", chi.Namespace, serviceName)
+ if template, ok := r.chi.GetOwnServiceTemplate(); ok {
+ // .templates.ServiceTemplate specified
+ return r.createServiceFromTemplate(
+ template,
+ r.chi.Namespace,
+ serviceName,
+ r.labeler.getLabelsChiScope(),
+ r.labeler.getSelectorChiScope(),
+ )
+ } else {
+ // Incorrect/unknown .templates.ServiceTemplate specified
+ // Create default Service
+ return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
- Name: CreateConfigMapCommonUsersName(c.chi),
- Namespace: c.chi.Namespace,
- Labels: c.getLabelsCommonObject(),
+ Name: serviceName,
+ Namespace: r.chi.Namespace,
+ Labels: r.labeler.getLabelsChiScope(),
},
- // Data contains several sections which are to be several xml chopConfig files
- Data: configs.commonUsersConfigSections,
- },
- )
-
- return configMapList
-}
-
-// createConfigMapObjectsPod returns a list of corev1.ConfigMap objects
-func (c *Creator) createConfigMapObjectsPod() ConfigMapList {
- configMapList := make(ConfigMapList, 0)
- replicaProcessor := func(replica *chiv1.ChiReplica) error {
- // Prepare for this replica deployment chopConfig files map as filename->content
- podConfigSections := make(map[string]string)
- util.IncludeNonEmpty(podConfigSections, filenameMacrosXML, c.chConfigGenerator.GetHostMacros(replica))
- // Extra user-specified configs
- for filename, content := range c.chopConfig.ChPodConfigs {
- util.IncludeNonEmpty(podConfigSections, filename, content)
- }
-
- // Add corev1.ConfigMap object to the list
- configMapList = append(
- configMapList,
- &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: CreateConfigMapPodName(replica),
- Namespace: replica.Address.Namespace,
- Labels: c.getLabelsReplica(replica, false),
+ Spec: corev1.ServiceSpec{
+ // ClusterIP: templateDefaultsServiceClusterIP,
+ Ports: []corev1.ServicePort{
+ {
+ Name: chDefaultHTTPPortName,
+ Port: chDefaultHTTPPortNumber,
+ },
+ {
+ Name: chDefaultClientPortName,
+ Port: chDefaultClientPortNumber,
+ },
},
- Data: podConfigSections,
+ Selector: r.labeler.getSelectorChiScope(),
+ Type: "LoadBalancer",
},
- )
-
- return nil
+ }
}
- c.chi.WalkReplicas(replicaProcessor)
-
- return configMapList
}
-// createServiceObjects returns a list of corev1.Service objects
-func (c *Creator) createServiceObjects() ServiceList {
- // We'd like to create "number of deployments" + 1 kubernetes services in order to provide access
- // to each deployment separately and one common predictably-named access point - common service
- serviceList := make(ServiceList, 0)
- serviceList = append(
- serviceList,
- c.createServiceObjectsCommon()...,
- )
- serviceList = append(
- serviceList,
- c.createServiceObjectsPod()...,
- )
-
- return serviceList
-}
-
-func (c *Creator) createServiceObjectsCommon() ServiceList {
- // Create one predictably-named service to access the whole installation
- // NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
- // service/clickhouse-replcluster ClusterIP None 9000/TCP,9009/TCP,8123/TCP 1h
- return ServiceList{
- c.createServiceObjectChi(CreateChiServiceName(c.chi)),
+// createServiceCluster
+func (r *Reconciler) createServiceCluster(cluster *chiv1.ChiCluster) *corev1.Service {
+ serviceName := CreateClusterServiceName(cluster)
+
+ glog.V(1).Infof("createServiceCluster(%s/%s)", cluster.Address.Namespace, serviceName)
+ if template, ok := cluster.GetServiceTemplate(); ok {
+ // .templates.ServiceTemplate specified
+ return r.createServiceFromTemplate(
+ template,
+ cluster.Address.Namespace,
+ serviceName,
+ r.labeler.getLabelsClusterScope(cluster),
+ r.labeler.getSelectorClusterScope(cluster),
+ )
+ } else {
+ return nil
}
}
-func (c *Creator) createServiceObjectsPod() ServiceList {
- // Create "number of pods" service - one service for each stateful set
- // Each replica has its stateful set and each stateful set has it service
- // NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
- // service/chi-01a1ce7dce-2 ClusterIP None 9000/TCP,9009/TCP,8123/TCP 1h
- serviceList := make(ServiceList, 0)
-
- replicaProcessor := func(replica *chiv1.ChiReplica) error {
- // Add corev1.Service object to the list
- serviceList = append(
- serviceList,
- c.createServiceObjectForStatefulSet(replica),
+// createServiceShard
+func (r *Reconciler) createServiceShard(shard *chiv1.ChiShard) *corev1.Service {
+ serviceName := CreateShardServiceName(shard)
+
+ glog.V(1).Infof("createServiceShard(%s/%s)", shard.Address.Namespace, serviceName)
+ if template, ok := shard.GetServiceTemplate(); ok {
+ // .templates.ServiceTemplate specified
+ return r.createServiceFromTemplate(
+ template,
+ shard.Address.Namespace,
+ serviceName,
+ r.labeler.getLabelsShardScope(shard),
+ r.labeler.getSelectorShardScope(shard),
)
+ } else {
return nil
}
- c.chi.WalkReplicas(replicaProcessor)
-
- return serviceList
}
-func (c *Creator) createServiceObjectChi(serviceName string) *corev1.Service {
- glog.V(1).Infof("createServiceObjectChi() for service %s", serviceName)
- return &corev1.Service{
- ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Namespace: c.chi.Namespace,
- Labels: c.getLabelsCommonObject(),
- },
- Spec: corev1.ServiceSpec{
- // ClusterIP: templateDefaultsServiceClusterIP,
- Ports: []corev1.ServicePort{
- {
- Name: chDefaultHTTPPortName,
- Port: chDefaultHTTPPortNumber,
- },
- {
- Name: chDefaultClientPortName,
- Port: chDefaultClientPortNumber,
+// createServiceReplica creates new corev1.Service
+func (r *Reconciler) createServiceReplica(replica *chiv1.ChiReplica) *corev1.Service {
+ serviceName := CreateStatefulSetServiceName(replica)
+ statefulSetName := CreateStatefulSetName(replica)
+
+ glog.V(1).Infof("createServiceReplica(%s/%s) for Set %s", replica.Address.Namespace, serviceName, statefulSetName)
+ if template, ok := replica.GetServiceTemplate(); ok {
+ // .templates.ServiceTemplate specified
+ return r.createServiceFromTemplate(
+ template,
+ replica.Address.Namespace,
+ serviceName,
+ r.labeler.getLabelsReplicaScope(replica, false),
+ r.labeler.GetSelectorReplicaScope(replica),
+ )
+ } else {
+ // Incorrect/unknown .templates.ServiceTemplate specified
+ // Create default Service
+ return &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: serviceName,
+ Namespace: replica.Address.Namespace,
+ Labels: r.labeler.getLabelsReplicaScope(replica, false),
+ },
+ Spec: corev1.ServiceSpec{
+ Ports: []corev1.ServicePort{
+ {
+ Name: chDefaultHTTPPortName,
+ Port: chDefaultHTTPPortNumber,
+ },
+ {
+ Name: chDefaultClientPortName,
+ Port: chDefaultClientPortNumber,
+ },
+ {
+ Name: chDefaultInterServerPortName,
+ Port: chDefaultInterServerPortNumber,
+ },
},
+ Selector: r.labeler.GetSelectorReplicaScope(replica),
+ ClusterIP: templateDefaultsServiceClusterIP,
+ Type: "ClusterIP",
},
- Selector: c.getSelectorCommonObject(),
- Type: "LoadBalancer",
- },
+ }
}
}
-func (c *Creator) createServiceObjectForStatefulSet(replica *chiv1.ChiReplica) *corev1.Service {
- serviceName := CreateStatefulSetServiceName(replica)
- statefulSetName := CreateStatefulSetName(replica)
+// createServiceFromTemplate create Service from ChiServiceTemplate and additional info
+func (r *Reconciler) createServiceFromTemplate(
+ template *chiv1.ChiServiceTemplate,
+ namespace string,
+ name string,
+ labels map[string]string,
+ selector map[string]string,
+) *corev1.Service {
+ // Verify Ports
+ for i := range template.Spec.Ports {
+ servicePort := &template.Spec.Ports[i]
+ if (servicePort.Port < 1) || (servicePort.Port > 65535) {
+ glog.V(1).Infof("createServiceFromTemplate(%s/%s) INCORRECT PORT: %d ", namespace, name, servicePort.Port)
+ return nil
+ }
+ }
- glog.V(1).Infof("createServiceObjectForStatefulSet() for service %s %s", serviceName, statefulSetName)
- return &corev1.Service{
+ service := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Namespace: replica.Address.Namespace,
- Labels: c.getLabelsReplica(replica, false),
- },
- Spec: corev1.ServiceSpec{
- Ports: []corev1.ServicePort{
- {
- Name: chDefaultHTTPPortName,
- Port: chDefaultHTTPPortNumber,
- },
- {
- Name: chDefaultClientPortName,
- Port: chDefaultClientPortNumber,
- },
- {
- Name: chDefaultInterServerPortName,
- Port: chDefaultInterServerPortNumber,
- },
- },
- Selector: c.getSelectorReplica(replica),
- ClusterIP: templateDefaultsServiceClusterIP,
- Type: "ClusterIP",
+ Name: name,
+ Namespace: namespace,
+ Labels: labels,
},
+ Spec: *template.Spec.DeepCopy(),
}
-}
+ // Append provided Selector to already specified Selector in template
+ service.Spec.Selector = util.MergeStringMaps(service.Spec.Selector, selector)
-// createStatefulSetObjects returns a list of apps.StatefulSet objects
-func (c *Creator) createStatefulSetObjects() StatefulSetList {
- statefulSetList := make(StatefulSetList, 0)
-
- // Create list of apps.StatefulSet objects
- // StatefulSet is created for each replica.Deployment
+ return service
+}
- replicaProcessor := func(replica *chiv1.ChiReplica) error {
- glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s", CreateStatefulSetName(replica))
- // Append new StatefulSet to the list of stateful sets
- statefulSetList = append(statefulSetList, c.createStatefulSetObject(replica))
- return nil
+// createConfigMapReplica creates new corev1.ConfigMap
+func (r *Reconciler) createConfigMapReplica(replica *chiv1.ChiReplica) *corev1.ConfigMap {
+ return &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: CreateConfigMapPodName(replica),
+ Namespace: replica.Address.Namespace,
+ Labels: r.labeler.getLabelsReplicaScope(replica, false),
+ },
+ Data: r.chConfigSectionsGenerator.CreateConfigsPod(replica),
}
- c.chi.WalkReplicas(replicaProcessor)
-
- return statefulSetList
}
-func (c *Creator) createStatefulSetObject(replica *chiv1.ChiReplica) *apps.StatefulSet {
+// createStatefulSet creates new apps.StatefulSet
+func (r *Reconciler) createStatefulSet(replica *chiv1.ChiReplica) *apps.StatefulSet {
statefulSetName := CreateStatefulSetName(replica)
serviceName := CreateStatefulSetServiceName(replica)
@@ -312,13 +208,13 @@ func (c *Creator) createStatefulSetObject(replica *chiv1.ChiReplica) *apps.State
ObjectMeta: metav1.ObjectMeta{
Name: statefulSetName,
Namespace: replica.Address.Namespace,
- Labels: c.getLabelsReplica(replica, true),
+ Labels: r.labeler.getLabelsReplicaScope(replica, true),
},
Spec: apps.StatefulSetSpec{
Replicas: &replicasNum,
ServiceName: serviceName,
Selector: &metav1.LabelSelector{
- MatchLabels: c.getSelectorReplica(replica),
+ MatchLabels: r.labeler.GetSelectorReplicaScope(replica),
},
// IMPORTANT
// VolumeClaimTemplates are to be setup later
@@ -330,56 +226,50 @@ func (c *Creator) createStatefulSetObject(replica *chiv1.ChiReplica) *apps.State
},
}
- c.setupStatefulSetPodTemplate(statefulSet, replica)
- c.setupStatefulSetVolumeClaimTemplates(statefulSet, replica)
+ r.setupStatefulSetPodTemplate(statefulSet, replica)
+ r.setupStatefulSetVolumeClaimTemplates(statefulSet, replica)
return statefulSet
}
-func (c *Creator) setupStatefulSetPodTemplate(
- statefulSetObject *apps.StatefulSet,
- replica *chiv1.ChiReplica,
-) {
+// setupStatefulSetPodTemplate performs PodTemplate setup of StatefulSet
+func (r *Reconciler) setupStatefulSetPodTemplate(statefulSetObject *apps.StatefulSet, replica *chiv1.ChiReplica) {
statefulSetName := CreateStatefulSetName(replica)
- podTemplateName := replica.Templates.PodTemplate
// Initial PodTemplateSpec value
// All the rest fields would be filled later
statefulSetObject.Spec.Template = corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: c.getLabelsReplica(replica, true),
+ Labels: r.labeler.getLabelsReplicaScope(replica, true),
},
}
// Specify pod templates - either explicitly defined or default
- if podTemplate, ok := c.getPodTemplate(podTemplateName); ok {
+ if podTemplate, ok := replica.GetPodTemplate(); ok {
// Replica references known PodTemplate
copyPodTemplateFrom(statefulSetObject, podTemplate)
- glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s - template: %s", statefulSetName, podTemplateName)
+ glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s - template used", statefulSetName)
} else {
// Replica references UNKNOWN PodTemplate
copyPodTemplateFrom(statefulSetObject, createDefaultPodTemplate(statefulSetName))
glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s - default template", statefulSetName)
}
- c.setupConfigMapVolumes(statefulSetObject, replica)
+ r.setupConfigMapVolumes(statefulSetObject, replica)
}
// setupConfigMapVolumes adds to each container in the Pod VolumeMount objects with
-func (c *Creator) setupConfigMapVolumes(
- statefulSetObject *apps.StatefulSet,
- replica *chiv1.ChiReplica,
-) {
+func (r *Reconciler) setupConfigMapVolumes(statefulSetObject *apps.StatefulSet, replica *chiv1.ChiReplica) {
configMapMacrosName := CreateConfigMapPodName(replica)
- configMapCommonName := CreateConfigMapCommonName(c.chi)
- configMapCommonUsersName := CreateConfigMapCommonUsersName(c.chi)
+ configMapCommonName := CreateConfigMapCommonName(r.chi)
+ configMapCommonUsersName := CreateConfigMapCommonUsersName(r.chi)
// Add all ConfigMap objects as Volume objects of type ConfigMap
statefulSetObject.Spec.Template.Spec.Volumes = append(
statefulSetObject.Spec.Template.Spec.Volumes,
- createVolumeObjectConfigMap(configMapCommonName),
- createVolumeObjectConfigMap(configMapCommonUsersName),
- createVolumeObjectConfigMap(configMapMacrosName),
+ createVolumeForConfigMap(configMapCommonName),
+ createVolumeForConfigMap(configMapCommonUsersName),
+ createVolumeForConfigMap(configMapMacrosName),
)
// And reference these Volumes in each Container via VolumeMount
@@ -390,29 +280,30 @@ func (c *Creator) setupConfigMapVolumes(
// Append to each Container current VolumeMount's to VolumeMount's declared in template
container.VolumeMounts = append(
container.VolumeMounts,
- createVolumeMountObject(configMapCommonName, dirPathConfigd),
- createVolumeMountObject(configMapCommonUsersName, dirPathUsersd),
- createVolumeMountObject(configMapMacrosName, dirPathConfd),
+ createVolumeMount(configMapCommonName, dirPathConfigd),
+ createVolumeMount(configMapCommonUsersName, dirPathUsersd),
+ createVolumeMount(configMapMacrosName, dirPathConfd),
)
}
}
-func (c *Creator) setupStatefulSetVolumeClaimTemplates(
- statefulSetObject *apps.StatefulSet,
+// setupStatefulSetVolumeClaimTemplates performs VolumeClaimTemplate setup for Containers in PodTemplate of a StatefulSet
+func (r *Reconciler) setupStatefulSetVolumeClaimTemplates(
+ statefulSet *apps.StatefulSet,
replica *chiv1.ChiReplica,
) {
// Append VolumeClaimTemplates, that are referenced in Containers' VolumeMount object(s)
// to StatefulSet's Spec.VolumeClaimTemplates slice, so these
statefulSetName := CreateStatefulSetName(replica)
- for i := range statefulSetObject.Spec.Template.Spec.Containers {
+ for i := range statefulSet.Spec.Template.Spec.Containers {
// Convenience wrapper
- container := &statefulSetObject.Spec.Template.Spec.Containers[i]
+ container := &statefulSet.Spec.Template.Spec.Containers[i]
for j := range container.VolumeMounts {
// Convenience wrapper
volumeMount := &container.VolumeMounts[j]
- if volumeClaimTemplate, ok := c.getVolumeClaimTemplate(volumeMount.Name); ok {
+ if volumeClaimTemplate, ok := r.chi.GetVolumeClaimTemplate(volumeMount.Name); ok {
// Found VolumeClaimTemplate to mount by VolumeMount
- appendVolumeClaimTemplateFrom(statefulSetObject, volumeClaimTemplate)
+ appendVolumeClaimTemplateFrom(statefulSet, volumeClaimTemplate)
}
}
}
@@ -421,7 +312,7 @@ func (c *Creator) setupStatefulSetVolumeClaimTemplates(
//
// We want to mount this default VolumeClaimTemplate into /var/lib/clickhouse in case:
// 1. This default VolumeClaimTemplate is not already mounted with any VolumeMount
- // 2. And /var/lib/clickhouse is not already mounted with any VolumeMount
+ // 2. And /var/lib/clickhouse is not already mounted with any VolumeMount
defaultVolumeClaimTemplateName := replica.Templates.VolumeClaimTemplate
@@ -430,15 +321,16 @@ func (c *Creator) setupStatefulSetVolumeClaimTemplates(
return
}
- if _, ok := c.getVolumeClaimTemplate(defaultVolumeClaimTemplateName); !ok {
- // Incorrect .templates.VolumeClaimTemplate specified
+ if _, ok := r.chi.GetVolumeClaimTemplate(defaultVolumeClaimTemplateName); !ok {
+ // Incorrect/unknown .templates.VolumeClaimTemplate specified
return
}
// 1. Check explicit usage - whether this default VolumeClaimTemplate is already listed in VolumeMount
- for i := range statefulSetObject.Spec.Template.Spec.Containers[ClickHouseContainerIndex].VolumeMounts {
+ clickHouseContainer := getClickHouseContainer(statefulSet)
+ for i := range clickHouseContainer.VolumeMounts {
// Convenience wrapper
- volumeMount := &statefulSetObject.Spec.Template.Spec.Containers[ClickHouseContainerIndex].VolumeMounts[i]
+ volumeMount := &clickHouseContainer.VolumeMounts[i]
if volumeMount.Name == defaultVolumeClaimTemplateName {
// This .templates.VolumeClaimTemplate is already used in VolumeMount
glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s - VC template 1: %s", statefulSetName, volumeMount.Name)
@@ -451,9 +343,9 @@ func (c *Creator) setupStatefulSetVolumeClaimTemplates(
// However, mount point /var/lib/clickhouse may be used already explicitly. Need to check
// 2. Check whether /var/lib/clickhouse is already mounted
- for i := range statefulSetObject.Spec.Template.Spec.Containers[ClickHouseContainerIndex].VolumeMounts {
+ for i := range clickHouseContainer.VolumeMounts {
// Convenience wrapper
- volumeMount := &statefulSetObject.Spec.Template.Spec.Containers[ClickHouseContainerIndex].VolumeMounts[i]
+ volumeMount := &clickHouseContainer.VolumeMounts[i]
if volumeMount.MountPath == dirPathClickHouseData {
// /var/lib/clickhouse is already mounted
glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s - VC template 2: /var/lib/clickhouse already mounted", statefulSetName)
@@ -463,13 +355,13 @@ func (c *Creator) setupStatefulSetVolumeClaimTemplates(
// This default volumeClaimTemplate is not used explicitly by name and /var/lib/clickhouse is not mounted also.
// Let's mount this default VolumeClaimTemplate into /var/lib/clickhouse
- if template, ok := c.getVolumeClaimTemplate(defaultVolumeClaimTemplateName); ok {
+ if template, ok := r.chi.GetVolumeClaimTemplate(defaultVolumeClaimTemplateName); ok {
// Add VolumeClaimTemplate to StatefulSet
- appendVolumeClaimTemplateFrom(statefulSetObject, template)
+ appendVolumeClaimTemplateFrom(statefulSet, template)
// Add VolumeMount to ClickHouse container to /var/lib/clickhouse point
- statefulSetObject.Spec.Template.Spec.Containers[ClickHouseContainerIndex].VolumeMounts = append(
- statefulSetObject.Spec.Template.Spec.Containers[ClickHouseContainerIndex].VolumeMounts,
- createVolumeMountObject(replica.Templates.VolumeClaimTemplate, dirPathClickHouseData),
+ clickHouseContainer.VolumeMounts = append(
+ clickHouseContainer.VolumeMounts,
+ createVolumeMount(replica.Templates.VolumeClaimTemplate, dirPathClickHouseData),
)
}
@@ -538,8 +430,8 @@ func createDefaultPodTemplate(name string) *chiv1.ChiPodTemplate {
}
}
-// createVolumeObjectConfigMap returns corev1.Volume object with defined name
-func createVolumeObjectConfigMap(name string) corev1.Volume {
+// createVolumeForConfigMap returns corev1.Volume object with defined name
+func createVolumeForConfigMap(name string) corev1.Volume {
return corev1.Volume{
Name: name,
VolumeSource: corev1.VolumeSource{
@@ -552,42 +444,15 @@ func createVolumeObjectConfigMap(name string) corev1.Volume {
}
}
-// createVolumeMountObject returns corev1.VolumeMount object with name and mount path
-func createVolumeMountObject(name, mountPath string) corev1.VolumeMount {
+// createVolumeMount returns corev1.VolumeMount object with name and mount path
+func createVolumeMount(name, mountPath string) corev1.VolumeMount {
return corev1.VolumeMount{
Name: name,
MountPath: mountPath,
}
}
-// createVolumeClaimTemplatesIndex creates a map of volumeClaimTemplatesIndexData used as a reference storage for VolumeClaimTemplates
-func (c *Creator) createVolumeClaimTemplatesIndex() {
- c.volumeClaimTemplatesIndex = make(volumeClaimTemplatesIndex)
- for i := range c.chi.Spec.Templates.VolumeClaimTemplates {
- // Convenience wrapper
- volumeClaimTemplate := &c.chi.Spec.Templates.VolumeClaimTemplates[i]
- c.volumeClaimTemplatesIndex[volumeClaimTemplate.Name] = volumeClaimTemplate
- }
-}
-
-// getVolumeClaimTemplate gets VolumeClaimTemplate by name
-func (c *Creator) getVolumeClaimTemplate(name string) (*chiv1.ChiVolumeClaimTemplate, bool) {
- volumeClaimTemplate, ok := c.volumeClaimTemplatesIndex[name]
- return volumeClaimTemplate, ok
-}
-
-// createPodTemplatesIndex creates a map of podTemplatesIndexData used as a reference storage for PodTemplates
-func (c *Creator) createPodTemplatesIndex() {
- c.podTemplatesIndex = make(podTemplatesIndex)
- for i := range c.chi.Spec.Templates.PodTemplates {
- // Convenience wrapper
- podTemplate := &c.chi.Spec.Templates.PodTemplates[i]
- c.podTemplatesIndex[podTemplate.Name] = podTemplate
- }
-}
-
-// getPodTemplate gets PodTemplate by name
-func (c *Creator) getPodTemplate(name string) (*chiv1.ChiPodTemplate, bool) {
- podTemplate, ok := c.podTemplatesIndex[name]
- return podTemplate, ok
+// getClickHouseContainer finds Container with ClickHouse amond all containers of Pod specified in StatefulSet
+func getClickHouseContainer(statefulSet *apps.StatefulSet) *corev1.Container {
+ return &statefulSet.Spec.Template.Spec.Containers[ClickHouseContainerIndex]
}
diff --git a/pkg/model/deleter.go b/pkg/model/deleter.go
new file mode 100644
index 000000000..d163fdc76
--- /dev/null
+++ b/pkg/model/deleter.go
@@ -0,0 +1,39 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+)
+
+func ReplicaCanDeletePVC(replica *chiv1.ChiReplica) bool {
+ templateName := replica.Templates.VolumeClaimTemplate
+ template, ok := replica.Chi.GetVolumeClaimTemplate(templateName)
+ if !ok {
+ // Unknown template name, however, this is strange
+ return true
+ }
+
+ switch template.PVCReclaimPolicy {
+ case chiv1.PVCReclaimPolicyRetain:
+ return false
+ case chiv1.PVCReclaimPolicyDelete:
+ return true
+ default:
+ // Unknown PVCReclaimPolicy
+ return true
+ }
+
+}
diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go
index 9d2454f17..059907ef5 100644
--- a/pkg/model/labeler.go
+++ b/pkg/model/labeler.go
@@ -21,29 +21,79 @@ import (
"k8s.io/apimachinery/pkg/labels"
)
-func (c *Creator) getLabelsCommonObject() map[string]string {
+type Labeler struct {
+ version string
+ chi *chi.ClickHouseInstallation
+}
+
+func NewLabeler(version string, chi *chi.ClickHouseInstallation) *Labeler {
+ return &Labeler{
+ version: version,
+ chi: chi,
+ }
+}
+
+func (l *Labeler) getLabelsChiScope() map[string]string {
return map[string]string{
LabelApp: LabelAppValue,
- LabelChop: c.appVersion,
- LabelChi: nameSectionChi(c.chi),
+ LabelChop: l.version,
+ LabelChi: getNamePartChiName(l.chi),
+ }
+}
+
+func (l *Labeler) getSelectorChiScope() map[string]string {
+ return map[string]string{
+ LabelApp: LabelAppValue,
+ LabelChi: getNamePartChiName(l.chi),
+ }
+}
+
+func (l *Labeler) getLabelsClusterScope(cluster *chi.ChiCluster) map[string]string {
+ return map[string]string{
+ LabelApp: LabelAppValue,
+ LabelChop: l.version,
+ LabelChi: getNamePartChiName(cluster),
+ LabelCluster: getNamePartClusterName(cluster),
}
}
-func (c *Creator) getSelectorCommonObject() map[string]string {
+func (l *Labeler) getSelectorClusterScope(cluster *chi.ChiCluster) map[string]string {
return map[string]string{
LabelApp: LabelAppValue,
- LabelChi: nameSectionChi(c.chi),
+ // skip chop
+ LabelChi: getNamePartChiName(cluster),
+ LabelCluster: getNamePartClusterName(cluster),
}
}
-func (c *Creator) getLabelsReplica(replica *chi.ChiReplica, zk bool) map[string]string {
+func (l *Labeler) getLabelsShardScope(shard *chi.ChiShard) map[string]string {
+ return map[string]string{
+ LabelApp: LabelAppValue,
+ LabelChop: l.version,
+ LabelChi: getNamePartChiName(shard),
+ LabelCluster: getNamePartClusterName(shard),
+ LabelShard: getNamePartShardName(shard),
+ }
+}
+
+func (l *Labeler) getSelectorShardScope(shard *chi.ChiShard) map[string]string {
+ return map[string]string{
+ LabelApp: LabelAppValue,
+ // skip chop
+ LabelChi: getNamePartChiName(shard),
+ LabelCluster: getNamePartClusterName(shard),
+ LabelShard: getNamePartShardName(shard),
+ }
+}
+
+func (l *Labeler) getLabelsReplicaScope(replica *chi.ChiReplica, zk bool) map[string]string {
labels := map[string]string{
LabelApp: LabelAppValue,
- LabelChop: c.appVersion,
- LabelChi: nameSectionChi(replica),
- LabelCluster: nameSectionCluster(replica),
- LabelShard: nameSectionShard(replica),
- LabelReplica: nameSectionReplica(replica),
+ LabelChop: l.version,
+ LabelChi: getNamePartChiName(replica),
+ LabelCluster: getNamePartClusterName(replica),
+ LabelShard: getNamePartShardName(replica),
+ LabelReplica: getNamePartReplicaName(replica),
LabelStatefulSet: CreateStatefulSetName(replica),
}
if zk {
@@ -52,14 +102,14 @@ func (c *Creator) getLabelsReplica(replica *chi.ChiReplica, zk bool) map[string]
return labels
}
-func (c *Creator) getSelectorReplica(replica *chi.ChiReplica) map[string]string {
+func (l *Labeler) GetSelectorReplicaScope(replica *chi.ChiReplica) map[string]string {
return map[string]string{
LabelApp: LabelAppValue,
// skip chop
- LabelChi: nameSectionChi(replica),
- LabelCluster: nameSectionCluster(replica),
- LabelShard: nameSectionShard(replica),
- LabelReplica: nameSectionReplica(replica),
+ LabelChi: getNamePartChiName(replica),
+ LabelCluster: getNamePartClusterName(replica),
+ LabelShard: getNamePartShardName(replica),
+ LabelReplica: getNamePartReplicaName(replica),
// skip StatefulSet
// skip Zookeeper
}
@@ -106,3 +156,23 @@ func IsChopGeneratedObject(objectMeta *meta.ObjectMeta) bool {
return ok
}
+
+func GetChiNameFromObjectMeta(meta *meta.ObjectMeta) (string, error) {
+ // ObjectMeta must have LabelChi: chi.Name label
+ name, ok := meta.Labels[LabelChi]
+ if ok {
+ return name, nil
+ } else {
+ return "", fmt.Errorf("can not find %s label in meta", LabelChi)
+ }
+}
+
+func GetClusterNameFromObjectMeta(meta *meta.ObjectMeta) (string, error) {
+ // ObjectMeta must have LabelCluster
+ name, ok := meta.Labels[LabelCluster]
+ if ok {
+ return name, nil
+ } else {
+ return "", fmt.Errorf("can not find %s label in meta", LabelChi)
+ }
+}
diff --git a/pkg/model/namer.go b/pkg/model/namer.go
index f26461b48..3eb79ac9b 100644
--- a/pkg/model/namer.go
+++ b/pkg/model/namer.go
@@ -19,115 +19,273 @@ import (
chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/util"
apps "k8s.io/api/apps/v1"
+ "strconv"
+ "strings"
)
-func createChiNameID(name string) string {
- //return util.CreateStringID(name, 6)
- return util.StringHead(name, 15)
+const (
+ namePartChiMaxLen = 15
+ namePartClusterMaxLen = 15
+ namePartShardMaxLen = 15
+ namePartReplicaMaxLen = 15
+)
+
+const (
+ // chiServiceNamePattern is a template of CHI Service name
+ chiServiceNamePattern = "clickhouse-{chi}"
+
+ // clusterServiceNamePattern is a template of cluster Service name
+ clusterServiceNamePattern = "cluster-{chi}-{cluster}"
+
+ // shardServiceNamePattern is a template of shard Service name
+ shardServiceNamePattern = "shard-{chi}-{cluster}-{shard}"
+
+ // statefulSetNamePattern is a template of replica's StatefulSet's name
+ statefulSetNamePattern = "chi-{chi}-{cluster}-{shard}-{replica}"
+
+ // statefulSetServiceNamePattern is a template of replica's StatefulSet's Service name
+ statefulSetServiceNamePattern = "chi-{chi}-{cluster}-{shard}-{replica}"
+
+ // configMapCommonNamePattern is a template of common settings for the CHI ConfigMap
+ configMapCommonNamePattern = "chi-{chi}-common-configd"
+
+ // configMapCommonUsersNamePattern is a template of common users settings for the CHI ConfigMap
+ configMapCommonUsersNamePattern = "chi-{chi}-common-usersd"
+
+ // configMapDeploymentNamePattern is a template of macros ConfigMap
+ configMapDeploymentNamePattern = "chi-{chi}-deploy-confd-{cluster}-{shard}-{replica}"
+
+ // namespaceDomainPattern presents Domain Name pattern of a namespace
+ // In this pattern "%s" is substituted namespace name's value
+ // Ex.: my-dev-namespace.svc.cluster.local
+ namespaceDomainPattern = "%s.svc.cluster.local"
+
+ // ServiceName.domain.name
+ serviceFQDNPattern = "%s" + "." + namespaceDomainPattern
+
+ // podFQDNPattern consists of 3 parts:
+ // 1. nameless service of of stateful set
+ // 2. namespace name
+ // Hostname.domain.name
+ podFQDNPattern = "%s" + "." + namespaceDomainPattern
+
+ // podNamePattern is a name of a Pod as ServiceName-0
+ podNamePattern = "%s-0"
+)
+
+func namePartChiName(name string) string {
+ return util.StringHead(name, namePartChiMaxLen)
}
-func createClusterNameID(name string) string {
- //return util.CreateStringID(name, 4)
- return util.StringHead(name, 15)
+func namePartChiNameID(name string) string {
+ return util.CreateStringID(name, namePartChiMaxLen)
}
-func createShardNameID(name string) string {
- return util.StringHead(name, 8)
+func namePartClusterName(name string) string {
+ return util.StringHead(name, namePartClusterMaxLen)
}
-func createReplicaNameID(name string) string {
- return util.StringHead(name, 8)
+func namePartClusterNameID(name string) string {
+ return util.CreateStringID(name, namePartClusterMaxLen)
}
-func nameSectionChi(obj interface{}) string {
+func namePartShardName(name string) string {
+ return util.StringHead(name, namePartShardMaxLen)
+}
+
+func namePartShardNameID(name string) string {
+ return util.CreateStringID(name, namePartShardMaxLen)
+}
+
+func namePartReplicaName(name string) string {
+ return util.StringHead(name, namePartReplicaMaxLen)
+}
+
+func namePartReplicaNameID(name string) string {
+ return util.CreateStringID(name, namePartReplicaMaxLen)
+}
+
+func getNamePartChiName(obj interface{}) string {
switch obj.(type) {
- case *chop.ChiReplica:
- replica := obj.(*chop.ChiReplica)
- return createChiNameID(replica.Address.ChiName)
case *chop.ClickHouseInstallation:
chi := obj.(*chop.ClickHouseInstallation)
- return createChiNameID(chi.Name)
+ return namePartChiName(chi.Name)
+ case *chop.ChiCluster:
+ cluster := obj.(*chop.ChiCluster)
+ return namePartChiName(cluster.Address.ChiName)
+ case *chop.ChiShard:
+ shard := obj.(*chop.ChiShard)
+ return namePartChiName(shard.Address.ChiName)
+ case *chop.ChiReplica:
+ replica := obj.(*chop.ChiReplica)
+ return namePartChiName(replica.Address.ChiName)
}
return "ERROR"
}
-func nameSectionCluster(replica *chop.ChiReplica) string {
- return createClusterNameID(replica.Address.ClusterName)
+func getNamePartClusterName(obj interface{}) string {
+ switch obj.(type) {
+ case *chop.ChiCluster:
+ cluster := obj.(*chop.ChiCluster)
+ return namePartClusterName(cluster.Address.ClusterName)
+ case *chop.ChiShard:
+ shard := obj.(*chop.ChiShard)
+ return namePartClusterName(shard.Address.ClusterName)
+ case *chop.ChiReplica:
+ replica := obj.(*chop.ChiReplica)
+ return namePartClusterName(replica.Address.ClusterName)
+ }
+
+ return "ERROR"
}
-func nameSectionShard(replica *chop.ChiReplica) string {
- return createShardNameID(replica.Address.ShardName)
+func getNamePartShardName(obj interface{}) string {
+ switch obj.(type) {
+ case *chop.ChiShard:
+ shard := obj.(*chop.ChiShard)
+ return namePartShardName(shard.Address.ShardName)
+ case *chop.ChiReplica:
+ replica := obj.(*chop.ChiReplica)
+ return namePartShardName(replica.Address.ShardName)
+ }
+
+ return "ERROR"
}
-func nameSectionReplica(replica *chop.ChiReplica) string {
- return createReplicaNameID(replica.Address.ReplicaName)
+func getNamePartReplicaName(replica *chop.ChiReplica) string {
+ return namePartReplicaName(replica.Address.ReplicaName)
+}
+
+func newReplacerChi(chi *chop.ClickHouseInstallation) *strings.Replacer {
+ return strings.NewReplacer(
+ "{chi}", namePartChiName(chi.Name),
+ "{chiID}", namePartChiNameID(chi.Name),
+ )
+}
+
+func newReplacerCluster(cluster *chop.ChiCluster) *strings.Replacer {
+ return strings.NewReplacer(
+ "{chi}", namePartChiName(cluster.Address.ChiName),
+ "{chiID}", namePartChiNameID(cluster.Address.ChiName),
+ "{cluster}", namePartClusterName(cluster.Address.ClusterName),
+ "{clusterID}", namePartClusterNameID(cluster.Address.ClusterName),
+ "{clusterIndex}", strconv.Itoa(cluster.Address.ClusterIndex),
+ )
+}
+
+func newReplacerShard(shard *chop.ChiShard) *strings.Replacer {
+ return strings.NewReplacer(
+ "{chi}", namePartChiName(shard.Address.ChiName),
+ "{chiID}", namePartChiNameID(shard.Address.ChiName),
+ "{cluster}", namePartClusterName(shard.Address.ClusterName),
+ "{clusterID}", namePartClusterNameID(shard.Address.ClusterName),
+ "{clusterIndex}", strconv.Itoa(shard.Address.ClusterIndex),
+ "{shard}", namePartShardName(shard.Address.ShardName),
+ "{shardID}", namePartShardNameID(shard.Address.ShardName),
+ "{shardIndex}", strconv.Itoa(shard.Address.ShardIndex),
+ )
+}
+
+func newReplacerReplica(replica *chop.ChiReplica) *strings.Replacer {
+ return strings.NewReplacer(
+ "{chi}", namePartChiName(replica.Address.ChiName),
+ "{chiID}", namePartChiNameID(replica.Address.ChiName),
+ "{cluster}", namePartClusterName(replica.Address.ClusterName),
+ "{clusterID}", namePartClusterNameID(replica.Address.ClusterName),
+ "{clusterIndex}", strconv.Itoa(replica.Address.ClusterIndex),
+ "{shard}", namePartShardName(replica.Address.ShardName),
+ "{shardID}", namePartShardNameID(replica.Address.ShardName),
+ "{shardIndex}", strconv.Itoa(replica.Address.ShardIndex),
+ "{replica}", namePartReplicaName(replica.Address.ReplicaName),
+ "{replicaID}", namePartReplicaNameID(replica.Address.ReplicaName),
+ "{replicaIndex}", strconv.Itoa(replica.Address.ReplicaIndex),
+ )
}
// CreateConfigMapPodName returns a name for a ConfigMap for replica's pod
func CreateConfigMapPodName(replica *chop.ChiReplica) string {
- return fmt.Sprintf(
- configMapDeploymentNamePattern,
- nameSectionChi(replica),
- nameSectionCluster(replica),
- nameSectionShard(replica),
- nameSectionReplica(replica),
- )
+ return newReplacerReplica(replica).Replace(configMapDeploymentNamePattern)
}
// CreateConfigMapCommonName returns a name for a ConfigMap for replica's common chopConfig
func CreateConfigMapCommonName(chi *chop.ClickHouseInstallation) string {
- return fmt.Sprintf(
- configMapCommonNamePattern,
- nameSectionChi(chi),
- )
+ return newReplacerChi(chi).Replace(configMapCommonNamePattern)
}
// CreateConfigMapCommonUsersName returns a name for a ConfigMap for replica's common chopConfig
func CreateConfigMapCommonUsersName(chi *chop.ClickHouseInstallation) string {
- return fmt.Sprintf(
- configMapCommonUsersNamePattern,
- nameSectionChi(chi),
- )
+ return newReplacerChi(chi).Replace(configMapCommonUsersNamePattern)
}
// CreateChiServiceName creates a name of a Installation Service resource
func CreateChiServiceName(chi *chop.ClickHouseInstallation) string {
- return fmt.Sprintf(
- chiServiceNamePattern,
- chi.Name,
- )
+ if template, ok := chi.GetOwnServiceTemplate(); ok {
+ // Service template available
+ if template.GenerateName != "" {
+ // Service template has explicitly specified service name template
+ return newReplacerChi(chi).Replace(template.GenerateName)
+ }
+ }
+
+ // Create Service name based on default Service Name template
+ return newReplacerChi(chi).Replace(chiServiceNamePattern)
}
// CreateChiServiceName creates a name of a Installation Service resource
func CreateChiServiceFQDN(chi *chop.ClickHouseInstallation) string {
return fmt.Sprintf(
- chiServiceFQDNPattern,
+ serviceFQDNPattern,
CreateChiServiceName(chi),
chi.Namespace,
)
}
+// CreateClusterServiceName returns a name of a cluster's Service
+func CreateClusterServiceName(cluster *chop.ChiCluster) string {
+ if template, ok := cluster.GetServiceTemplate(); ok {
+ // Service template available
+ if template.GenerateName != "" {
+ // Service template has explicitly specified service name template
+ return newReplacerCluster(cluster).Replace(template.GenerateName)
+ }
+ }
+
+ // Create Service name based on default Service Name template
+ return newReplacerCluster(cluster).Replace(clusterServiceNamePattern)
+}
+
+// CreateShardServiceName returns a name of a shard's Service
+func CreateShardServiceName(shard *chop.ChiShard) string {
+ if template, ok := shard.GetServiceTemplate(); ok {
+ // Service template available
+ if template.GenerateName != "" {
+ // Service template has explicitly specified service name template
+ return newReplacerShard(shard).Replace(template.GenerateName)
+ }
+ }
+
+ // Create Service name based on default Service Name template
+ return newReplacerShard(shard).Replace(shardServiceNamePattern)
+}
+
// CreateStatefulSetName creates a name of a StatefulSet for replica
func CreateStatefulSetName(replica *chop.ChiReplica) string {
- return fmt.Sprintf(
- statefulSetNamePattern,
- nameSectionChi(replica),
- nameSectionCluster(replica),
- nameSectionShard(replica),
- nameSectionReplica(replica),
- )
+ return newReplacerReplica(replica).Replace(statefulSetNamePattern)
}
// CreateStatefulSetServiceName returns a name of a StatefulSet-related Service for replica
func CreateStatefulSetServiceName(replica *chop.ChiReplica) string {
- return fmt.Sprintf(
- statefulSetServiceNamePattern,
- nameSectionChi(replica),
- nameSectionCluster(replica),
- nameSectionShard(replica),
- nameSectionReplica(replica),
- )
+ if template, ok := replica.GetServiceTemplate(); ok {
+ // Service template available
+ if template.GenerateName != "" {
+ // Service template has explicitly specified service name template
+ return newReplacerReplica(replica).Replace(template.GenerateName)
+ }
+ }
+
+ // Create Service name based on default Service Name template
+ return newReplacerReplica(replica).Replace(statefulSetServiceNamePattern)
}
// CreatePodHostname returns a name of a Pod resource for a replica
@@ -136,12 +294,6 @@ func CreatePodHostname(replica *chop.ChiReplica) string {
return CreateStatefulSetServiceName(replica)
}
-// CreateNamespaceDomainName creates domain name of a namespace
-// .my-dev-namespace.svc.cluster.local
-func CreateNamespaceDomainName(chiNamespace string) string {
- return fmt.Sprintf(namespaceDomainPattern, chiNamespace)
-}
-
// CreatePodFQDN creates a fully qualified domain name of a pod
// ss-1eb454-2-0.my-dev-domain.svc.cluster.local
func CreatePodFQDN(replica *chop.ChiReplica) string {
diff --git a/pkg/model/normalizer.go b/pkg/model/normalizer.go
index 9f22dda01..d9c9c7a52 100644
--- a/pkg/model/normalizer.go
+++ b/pkg/model/normalizer.go
@@ -18,6 +18,9 @@ import (
chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
chopconfig "github.com/altinity/clickhouse-operator/pkg/config"
"github.com/altinity/clickhouse-operator/pkg/util"
+ "k8s.io/api/core/v1"
+ v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
"regexp"
"strconv"
"strings"
@@ -55,17 +58,22 @@ func (n *Normalizer) DoChi(chi *chiv1.ClickHouseInstallation) (*chiv1.ClickHouse
// Walk over ChiSpec datatype fields
n.doDefaults(&n.chi.Spec.Defaults)
n.doConfiguration(&n.chi.Spec.Configuration)
- // ChiSpec.Templates
+ n.doTemplates(&n.chi.Spec.Templates)
+
+ n.doStatus()
+
+ return n.chi, nil
+}
- endpoint := CreateChiServiceFQDN(chi)
+// doStatus prepares .status section
+func (n *Normalizer) doStatus() {
+ endpoint := CreateChiServiceFQDN(n.chi)
pods := make([]string, 0)
n.chi.WalkReplicas(func(replica *chiv1.ChiReplica) error {
pods = append(pods, CreatePodName(replica))
return nil
})
n.chi.StatusFill(endpoint, pods)
-
- return n.chi, nil
}
// doDefaults normalizes .spec.defaults
@@ -86,6 +94,258 @@ func (n *Normalizer) doConfiguration(conf *chiv1.ChiConfiguration) {
n.doClusters()
}
+// doTemplates normalizes .spec.templates
+func (n *Normalizer) doTemplates(templates *chiv1.ChiTemplates) {
+ for i := range templates.PodTemplates {
+ podTemplate := &templates.PodTemplates[i]
+ n.doPodTemplate(podTemplate)
+ }
+
+ for i := range templates.VolumeClaimTemplates {
+ vcTemplate := &templates.VolumeClaimTemplates[i]
+ n.doVolumeClaimTemplate(vcTemplate)
+ }
+
+ for i := range templates.ServiceTemplates {
+ serviceTemplate := &templates.ServiceTemplates[i]
+ n.doServiceTemplate(serviceTemplate)
+ }
+}
+
+// doPodTemplate normalizes .spec.templates.podTemplates
+func (n *Normalizer) doPodTemplate(template *chiv1.ChiPodTemplate) {
+ // Name
+
+ // Zone
+ if len(template.Zone.Values) == 0 {
+ // In case no values specified - no key is reasonable
+ template.Zone.Key = ""
+ } else if template.Zone.Key == "" {
+ // We have values specified, but no key
+ // Use default zone key in this case
+ template.Zone.Key = "failure-domain.beta.kubernetes.io/zone"
+ } else {
+ // We have both key and value(s) specified explicitly
+ }
+
+ // Distribution
+ if template.Distribution == podDistributionOnePerHost {
+ // Known distribution, all is fine
+ } else {
+ template.Distribution = podDistributionUnspecified
+ }
+
+ // Spec
+ template.Spec.Affinity = n.mergeAffinity(template.Spec.Affinity, n.buildAffinity(template))
+
+ // Introduce PodTemplate into Index
+ // Ensure map is in place
+ if n.chi.Spec.Templates.PodTemplatesIndex == nil {
+ n.chi.Spec.Templates.PodTemplatesIndex = make(map[string]*chiv1.ChiPodTemplate)
+ }
+
+ n.chi.Spec.Templates.PodTemplatesIndex[template.Name] = template
+}
+
+func (n *Normalizer) buildAffinity(template *chiv1.ChiPodTemplate) *v1.Affinity {
+ nodeAffinity := n.buildNodeAffinity(template)
+ podAntiAffinity := n.buildPodAntiAffinity(template)
+
+ if nodeAffinity == nil && podAntiAffinity == nil {
+ return nil
+ } else {
+ return &v1.Affinity{
+ NodeAffinity: nodeAffinity,
+ PodAffinity: nil,
+ PodAntiAffinity: podAntiAffinity,
+ }
+ }
+}
+
+func (n *Normalizer) mergeAffinity(dst *v1.Affinity, src *v1.Affinity) *v1.Affinity {
+ if src == nil {
+ // Nothing to merge from
+ return dst
+ }
+
+ if dst == nil {
+ // No receiver, allocate new one
+ dst = &v1.Affinity{
+ NodeAffinity: n.mergeNodeAffinity(nil, src.NodeAffinity),
+ PodAffinity: src.PodAffinity,
+ PodAntiAffinity: n.mergePodAntiAffinity(nil, src.PodAntiAffinity),
+ }
+ }
+
+ return dst
+}
+
+func (n *Normalizer) buildNodeAffinity(template *chiv1.ChiPodTemplate) *v1.NodeAffinity {
+ if template.Zone.Key == "" {
+ return nil
+ } else {
+ return &v1.NodeAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
+ NodeSelectorTerms: []v1.NodeSelectorTerm{
+ {
+ // A list of node selector requirements by node's labels.
+ MatchExpressions: []v1.NodeSelectorRequirement{
+ {
+ Key: template.Zone.Key,
+ Operator: v1.NodeSelectorOpIn,
+ Values: template.Zone.Values,
+ },
+ },
+ // A list of node selector requirements by node's fields.
+ //MatchFields: []v1.NodeSelectorRequirement{
+ // v1.NodeSelectorRequirement{},
+ //},
+ },
+ },
+ },
+
+ PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{},
+ }
+ }
+}
+
+func (n *Normalizer) mergeNodeAffinity(dst *v1.NodeAffinity, src *v1.NodeAffinity) *v1.NodeAffinity {
+ if src == nil {
+ // Nothing to merge from
+ return dst
+ }
+
+ // Check NodeSelectors are available
+ if src.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ return dst
+ }
+ if len(src.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 {
+ return dst
+ }
+
+ if dst == nil {
+ // No receiver, allocate new one
+ dst = &v1.NodeAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
+ NodeSelectorTerms: []v1.NodeSelectorTerm{},
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{},
+ }
+ }
+
+ // Copy NodeSelectors
+ for i := range src.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
+ dst.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(
+ dst.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
+ src.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[i],
+ )
+ }
+
+ // Copy PreferredSchedulingTerm
+ for i := range src.PreferredDuringSchedulingIgnoredDuringExecution {
+ dst.PreferredDuringSchedulingIgnoredDuringExecution = append(
+ dst.PreferredDuringSchedulingIgnoredDuringExecution,
+ src.PreferredDuringSchedulingIgnoredDuringExecution[i],
+ )
+ }
+
+ return dst
+}
+
+func (n *Normalizer) buildPodAntiAffinity(template *chiv1.ChiPodTemplate) *v1.PodAntiAffinity {
+ if template.Distribution == podDistributionOnePerHost {
+ return &v1.PodAntiAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
+ {
+ LabelSelector: &v12.LabelSelector{
+ // A list of node selector requirements by node's labels.
+ MatchExpressions: []v12.LabelSelectorRequirement{
+ {
+ Key: LabelApp,
+ Operator: v12.LabelSelectorOpIn,
+ Values: []string{
+ LabelAppValue,
+ },
+ },
+ },
+ },
+ TopologyKey: "kubernetes.io/hostname",
+ },
+ },
+
+ PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{},
+ }
+ } else {
+ return nil
+ }
+}
+
+func (n *Normalizer) mergePodAntiAffinity(dst *v1.PodAntiAffinity, src *v1.PodAntiAffinity) *v1.PodAntiAffinity {
+ if src == nil {
+ // Nothing to merge from
+ return dst
+ }
+
+ if len(src.RequiredDuringSchedulingIgnoredDuringExecution) == 0 {
+ return dst
+ }
+
+ if dst == nil {
+ // No receiver, allocate new one
+ dst = &v1.PodAntiAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{},
+ PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{},
+ }
+ }
+
+ // Copy PodAffinityTerm
+ for i := range src.RequiredDuringSchedulingIgnoredDuringExecution {
+ dst.RequiredDuringSchedulingIgnoredDuringExecution = append(
+ dst.RequiredDuringSchedulingIgnoredDuringExecution,
+ src.RequiredDuringSchedulingIgnoredDuringExecution[i],
+ )
+ }
+
+ // Copy WeightedPodAffinityTerm
+ for i := range src.PreferredDuringSchedulingIgnoredDuringExecution {
+ dst.PreferredDuringSchedulingIgnoredDuringExecution = append(
+ dst.PreferredDuringSchedulingIgnoredDuringExecution,
+ src.PreferredDuringSchedulingIgnoredDuringExecution[i],
+ )
+ }
+
+ return dst
+}
+
+// doVolumeClaimTemplate normalizes .spec.templates.volumeClaimTemplates
+func (n *Normalizer) doVolumeClaimTemplate(template *chiv1.ChiVolumeClaimTemplate) {
+ // Check name
+ // Check PVCReclaimPolicy
+ if !template.PVCReclaimPolicy.IsValid() {
+ template.PVCReclaimPolicy = chiv1.PVCReclaimPolicyDelete
+ }
+ // Check Spec
+
+ // Ensure map is in place
+ if n.chi.Spec.Templates.VolumeClaimTemplatesIndex == nil {
+ n.chi.Spec.Templates.VolumeClaimTemplatesIndex = make(map[string]*chiv1.ChiVolumeClaimTemplate)
+ }
+ n.chi.Spec.Templates.VolumeClaimTemplatesIndex[template.Name] = template
+}
+
+// doServiceTemplate normalizes .spec.templates.serviceTemplates
+func (n *Normalizer) doServiceTemplate(template *chiv1.ChiServiceTemplate) {
+ // Check name
+ // Check GenerateName
+ // Check Spec
+
+ // Ensure map is in place
+ if n.chi.Spec.Templates.ServiceTemplatesIndex == nil {
+ n.chi.Spec.Templates.ServiceTemplatesIndex = make(map[string]*chiv1.ChiServiceTemplate)
+ }
+ n.chi.Spec.Templates.ServiceTemplatesIndex[template.Name] = template
+}
+
// doClusters normalizes clusters
func (n *Normalizer) doClusters() {
@@ -103,6 +363,7 @@ func (n *Normalizer) doClusters() {
return n.doCluster(cluster)
})
n.chi.FillAddressInfo()
+ n.chi.FillChiPointer()
n.chi.WalkReplicas(func(replica *chiv1.ChiReplica) error {
replica.Config.ZkFingerprint = fingerprint(n.chi.Spec.Configuration.Zookeeper)
return nil
@@ -129,6 +390,13 @@ func (n *Normalizer) doConfigurationUsers(users *map[string]interface{}) {
for path := range *users {
// Split 'admin/password'
tags := strings.Split(path, "/")
+
+ // Basic sanity check - need to have at least "username/something" pair
+ if len(tags) < 2 {
+ // Skip incorrect entry
+ continue
+ }
+
username := tags[0]
usernameMap[username] = true
}
@@ -177,7 +445,7 @@ func (n *Normalizer) doConfigurationSettings(settings *map[string]interface{}) {
// doCluster normalizes cluster and returns deployments usage counters for this cluster
func (n *Normalizer) doCluster(cluster *chiv1.ChiCluster) error {
- // Inherit PodTemplate from .spec.defaults
+ // Use PodTemplate from .spec.defaults
cluster.InheritTemplates(n.chi)
// Convenience wrapper
@@ -238,7 +506,7 @@ func (n *Normalizer) doShardReplicasCount(shard *chiv1.ChiShard, layoutReplicasC
// We have Replicas specified as slice - ok, this means exact ReplicasCount is known
shard.ReplicasCount = len(shard.Replicas)
} else {
- // Inherit ReplicasCount from layout
+ // MergeFrom ReplicasCount from layout
shard.ReplicasCount = layoutReplicasCount
}
}
@@ -310,7 +578,7 @@ func (n *Normalizer) doShardReplicas(shard *chiv1.ChiShard) {
// Normalize a replica
n.doReplicaName(replica, replicaIndex)
n.doReplicaPort(replica)
- // Inherit PodTemplate from shard
+ // Use PodTemplate from shard
replica.InheritTemplates(shard)
}
}
diff --git a/pkg/model/reconciler.go b/pkg/model/reconciler.go
new file mode 100644
index 000000000..8c9440e55
--- /dev/null
+++ b/pkg/model/reconciler.go
@@ -0,0 +1,170 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/config"
+ apps "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// Reconciler is the base struct to create k8s objects
+type Reconciler struct {
+ appVersion string
+ chi *chiv1.ClickHouseInstallation
+ chopConfig *config.Config
+ chConfigGenerator *ClickHouseConfigGenerator
+ chConfigSectionsGenerator *configSections
+ labeler *Labeler
+ funcs *ReconcileFuncs
+}
+
+type ReconcileFuncs struct {
+ ReconcileConfigMap func(configMap *corev1.ConfigMap) error
+ ReconcileService func(service *corev1.Service) error
+ ReconcileStatefulSet func(newStatefulSet *apps.StatefulSet, replica *chiv1.ChiReplica) error
+}
+
+// NewReconciler creates new creator
+func NewReconciler(
+ chi *chiv1.ClickHouseInstallation,
+ chopConfig *config.Config,
+ appVersion string,
+ funcs *ReconcileFuncs,
+) *Reconciler {
+ reconciler := &Reconciler{
+ chi: chi,
+ chopConfig: chopConfig,
+ appVersion: appVersion,
+ chConfigGenerator: NewClickHouseConfigGenerator(chi),
+ labeler: NewLabeler(appVersion, chi),
+ funcs: funcs,
+ }
+ reconciler.chConfigSectionsGenerator = NewConfigSections(reconciler.chConfigGenerator, reconciler.chopConfig)
+
+ return reconciler
+}
+
+// Reconcile runs reconcile process
+func (r *Reconciler) Reconcile() error {
+ return r.chi.WalkClusterTillError(
+ r.reconcileChi,
+ r.reconcileCluster,
+ r.reconcileShard,
+ r.reconcileReplica,
+ )
+}
+
+// reconcileChi reconciles CHI global objects
+func (r *Reconciler) reconcileChi(chi *chiv1.ClickHouseInstallation) error {
+ if err := r.reconcileChiService(r.chi); err != nil {
+ return err
+ }
+
+ if err := r.reconcileChiConfigMaps(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// reconcileCluster reconciles Cluster, excluding nested shards
+func (r *Reconciler) reconcileCluster(cluster *chiv1.ChiCluster) error {
+ // Add Cluster's Service
+ if service := r.createServiceCluster(cluster); service != nil {
+ return r.funcs.ReconcileService(service)
+ } else {
+ return nil
+ }
+}
+
+// reconcileShard reconciles Shard, excluding nested replicas
+func (r *Reconciler) reconcileShard(shard *chiv1.ChiShard) error {
+ // Add Shard's Service
+ if service := r.createServiceShard(shard); service != nil {
+ return r.funcs.ReconcileService(service)
+ } else {
+ return nil
+ }
+}
+
+// reconcileReplica reconciles Replica
+func (r *Reconciler) reconcileReplica(replica *chiv1.ChiReplica) error {
+ // Add replica's Service
+ service := r.createServiceReplica(replica)
+ if err := r.funcs.ReconcileService(service); err != nil {
+ return err
+ }
+
+ // Add replica's ConfigMap
+ configMap := r.createConfigMapReplica(replica)
+ if err := r.funcs.ReconcileConfigMap(configMap); err != nil {
+ return err
+ }
+
+ // Add replica's StatefulSet
+ statefulSet := r.createStatefulSet(replica)
+ if err := r.funcs.ReconcileStatefulSet(statefulSet, replica); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// reconcileChiService reconciles global Services belonging to CHI
+func (r *Reconciler) reconcileChiService(chi *chiv1.ClickHouseInstallation) error {
+ service := r.createServiceChi(chi)
+ return r.funcs.ReconcileService(service)
+}
+
+// reconcileChiConfigMaps reconciles global ConfigMaps belonging to CHI
+func (r *Reconciler) reconcileChiConfigMaps() error {
+ r.chConfigSectionsGenerator.CreateConfigsUsers()
+ r.chConfigSectionsGenerator.CreateConfigsCommon()
+
+ // ConfigMap common for all resources in CHI
+ // contains several sections, mapped as separated chopConfig files,
+ // such as remote servers, zookeeper setup, etc
+ configMapCommon := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: CreateConfigMapCommonName(r.chi),
+ Namespace: r.chi.Namespace,
+ Labels: r.labeler.getLabelsChiScope(),
+ },
+ // Data contains several sections which are to be several xml chopConfig files
+ Data: r.chConfigSectionsGenerator.commonConfigSections,
+ }
+ if err := r.funcs.ReconcileConfigMap(configMapCommon); err != nil {
+ return err
+ }
+
+ // ConfigMap common for all users resources in CHI
+ configMapUsers := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: CreateConfigMapCommonUsersName(r.chi),
+ Namespace: r.chi.Namespace,
+ Labels: r.labeler.getLabelsChiScope(),
+ },
+ // Data contains several sections which are to be several xml chopConfig files
+ Data: r.chConfigSectionsGenerator.commonUsersConfigSections,
+ }
+ if err := r.funcs.ReconcileConfigMap(configMapUsers); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/pkg/model/types.go b/pkg/model/types.go
deleted file mode 100644
index 7c4543d62..000000000
--- a/pkg/model/types.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- apps "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
-)
-
-// ConfigMapList defines a list of the ConfigMap objects
-type ConfigMapList []*corev1.ConfigMap
-
-// StatefulSetList defines a list of the StatefulSet objects
-type StatefulSetList []*apps.StatefulSet
-
-// ServiceList defines a list of the Service objects
-type ServiceList []*corev1.Service
-
-type configSections struct {
- // commonConfigSections maps section name to section XML chopConfig
- commonConfigSections map[string]string
-
- // commonUsersConfigSections maps section name to section XML chopConfig
- commonUsersConfigSections map[string]string
-}
-
-// volumeClaimTemplatesIndex maps volume claim template name - which
-// is .spec.templates.volumeClaimTemplates.name to VolumeClaimTemplate itself
-// Used to provide dictionary/index for templates
-type volumeClaimTemplatesIndex map[string]*v1.ChiVolumeClaimTemplate
-
-// podTemplatesIndex maps pod template name - which
-// is .spec.templates.podTemplates.name to PodTemplate itself
-// Used to provide dictionary/index for templates
-type podTemplatesIndex map[string]*v1.ChiPodTemplate
diff --git a/pkg/util/map.go b/pkg/util/map.go
index ea52d87d4..74e83a8ec 100644
--- a/pkg/util/map.go
+++ b/pkg/util/map.go
@@ -26,3 +26,14 @@ func IncludeNonEmpty(dst map[string]string, key, src string) {
return
}
+
+// MergeStringMaps inserts (and overwrites) data into dst map object from src
+func MergeStringMaps(dst, src map[string]string) map[string]string {
+ if dst == nil {
+ dst = make(map[string]string)
+ }
+ for key := range src {
+ dst[key] = src[key]
+ }
+ return dst
+}
diff --git a/release b/release
index 0d91a54c7..1d0ba9ea1 100644
--- a/release
+++ b/release
@@ -1 +1 @@
-0.3.0
+0.4.0