Skip to content

Commit

Permalink
Merge pull request #141 from Altinity/0.4.3
Browse files Browse the repository at this point in the history
0.4.3
  • Loading branch information
alex-zaitsev committed Jul 19, 2019
2 parents 3277cef + cdb0f3c commit fc04cc3
Show file tree
Hide file tree
Showing 22 changed files with 285 additions and 94 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# === Builder ===

FROM golang:1.12 AS builder
FROM golang:1.12.7 AS builder

RUN apt-get update && apt-get install -y -q apt-utils && apt-get install -y -q gettext-base
WORKDIR $GOPATH/src/github.com/altinity/clickhouse-operator
Expand Down
12 changes: 8 additions & 4 deletions docs/custom_resource_explained.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,18 @@ clickhouse-installation-max 23h
```yaml
zookeeper:
nodes:
- host: zk-statefulset-0.zk-service.default.svc.cluster.local
- host: zookeeper-0.zookeepers.zoo3ns.svc.cluster.local
port: 2181
- host: zk-statefulset-1.zk-service.default.svc.cluster.local
- host: zookeeper-1.zookeepers.zoo3ns.svc.cluster.local
port: 2181
- host: zk-statefulset-2.zk-service.default.svc.cluster.local
- host: zookeeper-2.zookeepers.zoo3ns.svc.cluster.local
port: 2181
session_timeout_ms: 30000
operation_timeout_ms: 10000
root: /path/to/zookeeper/node
identity: user:password
```
`.spec.configuration.zookeeper` refers to [<yandex><zookeeper></zookeeper></yandex>](https://clickhouse.yandex/docs/en/operations/table_engines/replication/) config section
`.spec.configuration.zookeeper` refers to [<yandex><zookeeper></zookeeper></yandex>](https://clickhouse.yandex/docs/en/single/index.html?#server-settings_zookeeper) config section

## .spec.configuration.users
```yaml
Expand Down
9 changes: 9 additions & 0 deletions docs/examples/03-settings-01.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,15 @@ spec:
test_quota/interval/duration: "3600"
settings:
compression/case/method: zstd
disable_internal_dns_cache: 1
files:
dict1.xml: |
<yandex>
<!-- ref to file /etc/clickhouse-data/config.d/source1.csv -->
</yandex>
source1.csv: |
a1,b1,c1,d1
a2,b2,c2,d2
clusters:
- name: "standard"
layout:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@ spec:
port: 2181
- host: zookeeper-2.zookeepers.zoo3ns
port: 2181
session_timeout_ms: 30000
operation_timeout_ms: 10000
root: /path/to/zookeeper/node
identity: user:password
clusters:
- name: replcluster
layout:
Expand Down
82 changes: 82 additions & 0 deletions docs/examples/1.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: "verityprod"
namespace: dev
spec:
defaults:
replicasUseFQDN: "0"
distributedDDL:
profile: default
templates:
podTemplate: clickhouse-template
volumeClaimTemplate: aws-ebs-volume-claim
serviceTemplate: chi-service-template

configuration:
settings:
compression/case/method: zstd
users:
verity_prod/password: "verity"
verity_prod/networks/ip: "::/0"
verity_prod/profile: "default"
verity_prod/allow_databases/database:
- "veritytest"
- "verityprod"
clusters:
- name: replicas-only
templates:
podTemplate: clickhouse-template
volumeClaimTemplate: aws-ebs-volume-claim
layout:
replicasCount: 1

templates:
serviceTemplates:
- name: chi-service-template
generateName: "service-{chi}"
metadata:
annotations:
external-dns.alpha.kubernetes.io/hostname: XXXXXXXXXXXXXXXXXXXX.
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: ELBSecurityPolicy-TLS-1-2-2017-01
spec:
ports:
- name: http
port: 8123
- name: client
port: 9000
type: LoadBalancer

volumeClaimTemplates:
- name: aws-ebs-volume-claim
spec:
# storageClassName: ebs-volume-class
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

podTemplates:
- name: clickhouse-template
metadata:
labels:
app: clickhouse
spec:
containers:
- name: clickhouse
image: yandex/clickhouse-server
ports:
- name: http
containerPort: 8123
- name: client
containerPort: 9000
volumeMounts:
- name: aws-ebs-volume-claim
mountPath: /var/lib/clickhouse
# nodeSelector:
# beta.kubernetes.io/instance-type: r5a.xlarge

16 changes: 15 additions & 1 deletion docs/examples/99-clickhouseinstallation-max.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,18 @@ spec:
serviceTemplate: chi-service-template

configuration:
s zookeeper:
zookeeper:
nodes:
- host: zookeeper-0.zookeepers.zoo3ns.svc.cluster.local
port: 2181
- host: zookeeper-1.zookeepers.zoo3ns.svc.cluster.local
port: 2181
- host: zookeeper-2.zookeepers.zoo3ns.svc.cluster.local
port: 2181
session_timeout_ms: 30000
operation_timeout_ms: 10000
root: /path/to/zookeeper/node
identity: user:password
users:
readonly/profile: readonly
# <users>
Expand Down Expand Up @@ -67,6 +71,16 @@ s zookeeper:
# <method>zstd</method>
# </case>
# </compression>
disable_internal_dns_cache: 1
# <disable_internal_dns_cache>1</disable_internal_dns_cache>
files:
dict1.xml: |
<yandex>
<!-- ref to file /etc/clickhouse-data/config.d/source1.csv -->
</yandex>
source1.csv: |
a1,b1,c1,d1
a2,b2,c2,d2
clusters:

Expand Down
19 changes: 12 additions & 7 deletions docs/examples/label_nodes.sh
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
#!/bin/bash

# Get all nodes with output header
# Skip master node
# Skip output header
# Extract node names
# Label each node
kubectl get nodes | \
grep -v master | \
awk '{print $1}' | \
tail -n +2 | \
while read -r line; do
NODE=$line
#kubectl label nodes <node-name> <label-key>=<label-value>
#kubectl label nodes --overwrite=true $NODE clickhouse=allow
kubectl label nodes $NODE clickhouse=allow
awk '{print $1}' | \
while read -r LINE; do
# Label each node
NODE="${LINE}"
#kubectl label nodes <node-name> <label-key>=<label-value>
#kubectl label nodes --overwrite=true "${NODE}" clickhouse=allow
kubectl label nodes "${NODE}" clickhouse=allow
done

10 changes: 10 additions & 0 deletions manifests/dev/clickhouse-operator-template-01-section-crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,14 @@ spec:
port:
type: integer
minimum: 1
session_timeout_ms:
type: integer
operation_timeout_ms:
type: integer
root:
type: string
identity:
type: string
users:
type: object
profiles:
Expand All @@ -94,6 +102,8 @@ spec:
type: object
settings:
type: object
files:
type: object
clusters:
type: array
items:
Expand Down
10 changes: 10 additions & 0 deletions manifests/operator/clickhouse-operator-install.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,14 @@ spec:
port:
type: integer
minimum: 1
session_timeout_ms:
type: integer
operation_timeout_ms:
type: integer
root:
type: string
identity:
type: string
users:
type: object
profiles:
Expand All @@ -94,6 +102,8 @@ spec:
type: object
settings:
type: object
files:
type: object
clusters:
type: array
items:
Expand Down
13 changes: 12 additions & 1 deletion pkg/apis/clickhouse.altinity.com/v1/type_chi.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ func (chi *ClickHouseInstallation) StatusFill(endpoint string, pods []string) {
chi.Status.IsKnown = 1
chi.Status.Version = version.Version
chi.Status.ClustersCount = chi.ClustersCount()
chi.Status.ReplicasCount = chi.ReplicasCount()
chi.Status.ShardsCount = chi.ShardsCount()
chi.Status.ReplicasCount = chi.ReplicasCount()/chi.ShardsCount()
chi.Status.Pods = pods
chi.Status.Endpoint = endpoint
}
Expand Down Expand Up @@ -321,6 +322,16 @@ func (chi *ClickHouseInstallation) ReplicasCount() int {
return count
}

func (chi *ClickHouseInstallation) ShardsCount() int {
count := 0
chi.WalkShards(func(replica *ChiShard) error {
count++
return nil
})
return count
}


// GetPodTemplate gets ChiPodTemplate by name
func (chi *ClickHouseInstallation) GetPodTemplate(name string) (*ChiPodTemplate, bool) {
if chi.Spec.Templates.PodTemplatesIndex == nil {
Expand Down
10 changes: 9 additions & 1 deletion pkg/apis/clickhouse.altinity.com/v1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ type ChiStatus struct {
Version string `json:"version"`
ClustersCount int `json:"clusters"`
ReplicasCount int `json:"replicas"`
ShardsCount int `json:"shards"`
Pods []string `json:"pods"`
Endpoint string `json:"endpoint"`
}
Expand Down Expand Up @@ -93,6 +94,7 @@ type ChiConfiguration struct {
Profiles map[string]interface{} `json:"profiles,omitempty" yaml:"profiles"`
Quotas map[string]interface{} `json:"quotas,omitempty" yaml:"quotas"`
Settings map[string]interface{} `json:"settings,omitempty" yaml:"settings"`
Files map[string]string `json:"files,omitempty" yaml:"files"`
// TODO refactor into map[string]ChiCluster
Clusters []ChiCluster `json:"clusters,omitempty"`
}
Expand Down Expand Up @@ -247,8 +249,14 @@ type ChiDistributedDDL struct {
}

// ChiZookeeperConfig defines zookeeper section of .spec.configuration
// Refers to
// https://clickhouse.yandex/docs/en/single/index.html?#server-settings_zookeeper
type ChiZookeeperConfig struct {
Nodes []ChiZookeeperNode `json:"nodes,omitempty" yaml:"nodes"`
Nodes []ChiZookeeperNode `json:"nodes,omitempty" yaml:"nodes"`
SessionTimeoutMs int `json:"session_timeout_ms,omitempty" yaml:"session_timeout_ms"`
OperationTimeoutMs int `json:"operation_timeout_ms,omitempty" yaml:"operation_timeout_ms"`
Root string `json:"root,omitempty" yaml:"root"`
Identity string `json:"identity,omitempty" yaml:"identity"`
}

// ChiZookeeperNode defines item of nodes section of .spec.configuration.zookeeper
Expand Down
7 changes: 7 additions & 0 deletions pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,13 @@ func (in *ChiConfiguration) DeepCopyInto(out *ChiConfiguration) {
}
}
}
if in.Files != nil {
in, out := &in.Files, &out.Files
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Clusters != nil {
in, out := &in.Clusters, &out.Clusters
*out = make([]ChiCluster, len(*in))
Expand Down
Loading

0 comments on commit fc04cc3

Please sign in to comment.