diff --git a/Dockerfile b/Dockerfile index dcf17bfc0..ab6c27431 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,10 +2,10 @@ FROM golang:1.12.7 AS builder -RUN apt-get update && apt-get install -y -q apt-utils && apt-get install -y -q gettext-base -WORKDIR $GOPATH/src/github.com/altinity/clickhouse-operator +RUN apt-get update && apt-get install -y apt-utils gettext-base # Reconstruct source tree inside docker +WORKDIR $GOPATH/src/github.com/altinity/clickhouse-operator ADD . . # ./vendor is excluded in .dockerignore, reconstruct it with 'dep' tool RUN go get -u github.com/golang/dep/cmd/dep @@ -16,9 +16,11 @@ RUN OPERATOR_BIN=/tmp/clickhouse-operator ./dev/binary_build.sh # === Runner === -FROM alpine:3.8 AS runner -RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/* +FROM alpine:3.10 AS runner + +RUN apk add --no-cache ca-certificates WORKDIR / COPY --from=builder /tmp/clickhouse-operator . -ENTRYPOINT ["./clickhouse-operator"] + +ENTRYPOINT ["/clickhouse-operator"] CMD ["-alsologtostderr=true", "-v=1"] diff --git a/docs/examples/04-zookeeper-replication-05-simple-PV.yaml b/docs/examples/04-zookeeper-replication-05-simple-PV.yaml new file mode 100644 index 000000000..6670dcbdf --- /dev/null +++ b/docs/examples/04-zookeeper-replication-05-simple-PV.yaml @@ -0,0 +1,38 @@ +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" + +metadata: + name: "repl-05" + +spec: + defaults: + templates: + volumeClaimTemplate: default + podTemplate: clickhouse:19.6 + + configuration: + zookeeper: + nodes: + - host: zookeeper.zoo1ns + clusters: + - name: replicated + layout: + shardsCount: 2 + replicasCount: 2 + + templates: + volumeClaimTemplates: + - name: default + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Mi + podTemplates: + - name: clickhouse:19.6 + spec: + containers: + - name: clickhouse-pod + image: yandex/clickhouse-server:19.6.2.11 + diff --git a/docs/examples/07-rolling-update-stateless-01-initial-position.yaml b/docs/examples/07-rolling-update-stateless-01-initial-position.yaml index fc90576a9..08be368e2 100644 --- a/docs/examples/07-rolling-update-stateless-01-initial-position.yaml +++ b/docs/examples/07-rolling-update-stateless-01-initial-position.yaml @@ -4,11 +4,7 @@ metadata: name: "update-01" spec: configuration: - zookeeper: # Add Zookeeper - nodes: - - host: zookeeper.zoo1ns clusters: - name: "sharded" layout: shardsCount: 4 -# replicasCount: 2 # Enable replication diff --git a/docs/examples/09-rolling-update-emptydir-01-initial-position.yaml b/docs/examples/09-rolling-update-emptydir-01-initial-position.yaml index 4b84e05fa..a31d83968 100644 --- a/docs/examples/09-rolling-update-emptydir-01-initial-position.yaml +++ b/docs/examples/09-rolling-update-emptydir-01-initial-position.yaml @@ -30,5 +30,5 @@ spec: volumes: - name: clickhouse-storage emptyDir: - medium: "" #accepted values: empty str (means node's default medium) or Memory + medium: "" # accepted values: empty str (means node's default medium) or "Memory" sizeLimit: 1Gi diff --git a/docs/examples/09-rolling-update-emptydir-02-apply-update.yaml b/docs/examples/09-rolling-update-emptydir-02-apply-update.yaml index f6e4c9548..fad41b78f 100644 --- a/docs/examples/09-rolling-update-emptydir-02-apply-update.yaml +++ b/docs/examples/09-rolling-update-emptydir-02-apply-update.yaml @@ -30,5 +30,5 @@ spec: volumes: - name: clickhouse-storage emptyDir: - medium: "" #accepted values: empty str (means node's default medium) or Memory + medium: "" # accepted values: empty str (means node's default medium) or "Memory" sizeLimit: 1Gi diff --git a/docs/examples/98-quota-01.yaml b/docs/examples/98-quota-01.yaml new file mode 100644 index 000000000..557901078 --- /dev/null +++ b/docs/examples/98-quota-01.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: test-quotas + namespace: dev +spec: + hard: + pods: 10 + services: 10 + replicationcontrollers: 10 + diff --git a/docs/replication_setup.md b/docs/replication_setup.md index 768416b02..0b8625b87 100644 --- a/docs/replication_setup.md +++ b/docs/replication_setup.md @@ -4,19 +4,19 @@ ## Prerequisites 1. ClickHouse operator [installed](operator_installation_details.md) -1. Zookeeper installed as described in [Zookeeper Setup](zookeeper_setup.md) +1. Zookeeper [installed](zookeeper_setup.md) ## Manifest -The example below creates a cluster with 2 shards and 2 replicas and persistent storage. +Let's take a look on [example](./examples/04-zookeeper-replication-05-simple-PV.yaml), which creates a cluster with 2 shards and 2 replicas and persistent storage. ```yaml apiVersion: "clickhouse.altinity.com/v1" kind: "ClickHouseInstallation" metadata: - name: test + name: "repl-05" spec: defaults: @@ -25,10 +25,9 @@ spec: podTemplate: clickhouse:19.6 configuration: - zookeeper: # Add Zookeeper + zookeeper: nodes: - host: zookeeper.zoo1ns - port: 2181 clusters: - name: replicated layout: @@ -62,7 +61,7 @@ Operator provides set of [macros](https://clickhouse.yandex/docs/en/operations/s 1. `{replica}` -- replica name in the cluster, maps to pod service name 1. `{shard}` -- shard id -ClickHouse also supports internal macros {database} and {table} that maps to current database.table. +ClickHouse also supports internal macros `{database}` and `{table}` that maps to current **database** and **table** respectively. ### Create replicated table @@ -84,7 +83,7 @@ ENGINE = Distributed('{cluster}', default, events_local, rand()); We can generate some data: ```sql -insert into events select today(), rand()%3, number, 'my title' from numbers(100); +INSERT INTO events SELECT today(), rand()%3, number, 'my title' FROM numbers(100); ``` And check how these data are distributed over the cluster diff --git a/pkg/controller/chi/creators.go b/pkg/controller/chi/creators.go index fcca88151..0bb06d7f7 100644 --- a/pkg/controller/chi/creators.go +++ b/pkg/controller/chi/creators.go @@ -78,6 +78,19 @@ func (c *Controller) ReconcileService(service *core.Service) error { if curService != nil { // Object with such name already exists, this is not an error + glog.V(1).Infof("Update Service %s/%s", service.Namespace, service.Name) + // spec.resourceVersion is required in order to update Service + service.ResourceVersion = curService.ResourceVersion + // spec.clusterIP field is immutable, need to use already assigned value + // From https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + // Kubernetes assigns this Service an IP address (sometimes called the “cluster IP”), which is used by the Service proxies + // See also https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // You can specify your own cluster IP address as part of a Service creation request. To do this, set the .spec.clusterIP + service.Spec.ClusterIP = curService.Spec.ClusterIP + _, err := c.kubeClient.CoreV1().Services(service.Namespace).Update(service) + if err != nil { + return err + } return nil } diff --git a/pkg/model/creator.go b/pkg/model/creator.go index 340751cc7..35456ace5 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -134,21 +134,28 @@ func (r *Reconciler) createServiceReplica(replica *chiv1.ChiReplica) *corev1.Ser Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ { - Name: chDefaultHTTPPortName, - Port: chDefaultHTTPPortNumber, + Name: chDefaultHTTPPortName, + Protocol: corev1.ProtocolTCP, + Port: chDefaultHTTPPortNumber, + TargetPort: intstr.FromInt(chDefaultHTTPPortNumber), }, { - Name: chDefaultClientPortName, - Port: chDefaultClientPortNumber, + Name: chDefaultClientPortName, + Protocol: corev1.ProtocolTCP, + Port: chDefaultClientPortNumber, + TargetPort: intstr.FromInt(chDefaultClientPortNumber), }, { - Name: chDefaultInterServerPortName, - Port: chDefaultInterServerPortNumber, + Name: chDefaultInterServerPortName, + Protocol: corev1.ProtocolTCP, + Port: chDefaultInterServerPortNumber, + TargetPort: intstr.FromInt(chDefaultInterServerPortNumber), }, }, - Selector: r.labeler.GetSelectorReplicaScope(replica), - ClusterIP: templateDefaultsServiceClusterIP, - Type: "ClusterIP", + Selector: r.labeler.GetSelectorReplicaScope(replica), + ClusterIP: templateDefaultsServiceClusterIP, + Type: "ClusterIP", + PublishNotReadyAddresses: true, }, } } @@ -272,6 +279,14 @@ func (r *Reconciler) setupStatefulSetPodTemplate(statefulSetObject *apps.Statefu glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s - default template", statefulSetName) } + // Pod created by this StatefulSet has to have alias + statefulSetObject.Spec.Template.Spec.HostAliases = []corev1.HostAlias{ + { + IP: "127.0.0.1", + Hostnames: []string{CreatePodHostname(replica)}, + }, + } + r.setupConfigMapVolumes(statefulSetObject, replica) } diff --git a/release b/release index 17b2ccd9b..6f2743d65 100644 --- a/release +++ b/release @@ -1 +1 @@ -0.4.3 +0.4.4