Skip to content

Commit

Permalink
Merge pull request #147 from Altinity/0.4.4
Browse files Browse the repository at this point in the history
Service handling modificatons and repilcation docs enhancements
  • Loading branch information
alex-zaitsev committed Jul 29, 2019
2 parents 11c7400 + 4502b8e commit a9976e8
Show file tree
Hide file tree
Showing 10 changed files with 102 additions and 28 deletions.
12 changes: 7 additions & 5 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

FROM golang:1.12.7 AS builder

RUN apt-get update && apt-get install -y -q apt-utils && apt-get install -y -q gettext-base
WORKDIR $GOPATH/src/github.com/altinity/clickhouse-operator
RUN apt-get update && apt-get install -y apt-utils gettext-base

# Reconstruct source tree inside docker
WORKDIR $GOPATH/src/github.com/altinity/clickhouse-operator
ADD . .
# ./vendor is excluded in .dockerignore, reconstruct it with 'dep' tool
RUN go get -u github.com/golang/dep/cmd/dep
Expand All @@ -16,9 +16,11 @@ RUN OPERATOR_BIN=/tmp/clickhouse-operator ./dev/binary_build.sh

# === Runner ===

FROM alpine:3.8 AS runner
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
FROM alpine:3.10 AS runner

RUN apk add --no-cache ca-certificates
WORKDIR /
COPY --from=builder /tmp/clickhouse-operator .
ENTRYPOINT ["./clickhouse-operator"]

ENTRYPOINT ["/clickhouse-operator"]
CMD ["-alsologtostderr=true", "-v=1"]
38 changes: 38 additions & 0 deletions docs/examples/04-zookeeper-replication-05-simple-PV.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"

metadata:
name: "repl-05"

spec:
defaults:
templates:
volumeClaimTemplate: default
podTemplate: clickhouse:19.6

configuration:
zookeeper:
nodes:
- host: zookeeper.zoo1ns
clusters:
- name: replicated
layout:
shardsCount: 2
replicasCount: 2

templates:
volumeClaimTemplates:
- name: default
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Mi
podTemplates:
- name: clickhouse:19.6
spec:
containers:
- name: clickhouse-pod
image: yandex/clickhouse-server:19.6.2.11

Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,7 @@ metadata:
name: "update-01"
spec:
configuration:
zookeeper: # Add Zookeeper
nodes:
- host: zookeeper.zoo1ns
clusters:
- name: "sharded"
layout:
shardsCount: 4
# replicasCount: 2 # Enable replication
Original file line number Diff line number Diff line change
Expand Up @@ -30,5 +30,5 @@ spec:
volumes:
- name: clickhouse-storage
emptyDir:
medium: "" #accepted values: empty str (means node's default medium) or Memory
medium: "" # accepted values: empty str (means node's default medium) or "Memory"
sizeLimit: 1Gi
Original file line number Diff line number Diff line change
Expand Up @@ -30,5 +30,5 @@ spec:
volumes:
- name: clickhouse-storage
emptyDir:
medium: "" #accepted values: empty str (means node's default medium) or Memory
medium: "" # accepted values: empty str (means node's default medium) or "Memory"
sizeLimit: 1Gi
11 changes: 11 additions & 0 deletions docs/examples/98-quota-01.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: test-quotas
namespace: dev
spec:
hard:
pods: 10
services: 10
replicationcontrollers: 10

13 changes: 6 additions & 7 deletions docs/replication_setup.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,19 @@
## Prerequisites

1. ClickHouse operator [installed](operator_installation_details.md)
1. Zookeeper installed as described in [Zookeeper Setup](zookeeper_setup.md)
1. Zookeeper [installed](zookeeper_setup.md)


## Manifest

The example below creates a cluster with 2 shards and 2 replicas and persistent storage.
Let's take a look on [example](./examples/04-zookeeper-replication-05-simple-PV.yaml), which creates a cluster with 2 shards and 2 replicas and persistent storage.

```yaml
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"

metadata:
name: test
name: "repl-05"

spec:
defaults:
Expand All @@ -25,10 +25,9 @@ spec:
podTemplate: clickhouse:19.6

configuration:
zookeeper: # Add Zookeeper
zookeeper:
nodes:
- host: zookeeper.zoo1ns
port: 2181
clusters:
- name: replicated
layout:
Expand Down Expand Up @@ -62,7 +61,7 @@ Operator provides set of [macros](https://clickhouse.yandex/docs/en/operations/s
1. `{replica}` -- replica name in the cluster, maps to pod service name
1. `{shard}` -- shard id

ClickHouse also supports internal macros {database} and {table} that maps to current database.table.
ClickHouse also supports internal macros `{database}` and `{table}` that maps to current **database** and **table** respectively.

### Create replicated table

Expand All @@ -84,7 +83,7 @@ ENGINE = Distributed('{cluster}', default, events_local, rand());

We can generate some data:
```sql
insert into events select today(), rand()%3, number, 'my title' from numbers(100);
INSERT INTO events SELECT today(), rand()%3, number, 'my title' FROM numbers(100);
```

And check how these data are distributed over the cluster
Expand Down
13 changes: 13 additions & 0 deletions pkg/controller/chi/creators.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,19 @@ func (c *Controller) ReconcileService(service *core.Service) error {

if curService != nil {
// Object with such name already exists, this is not an error
glog.V(1).Infof("Update Service %s/%s", service.Namespace, service.Name)
// spec.resourceVersion is required in order to update Service
service.ResourceVersion = curService.ResourceVersion
// spec.clusterIP field is immutable, need to use already assigned value
// From https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
// Kubernetes assigns this Service an IP address (sometimes called the “cluster IP”), which is used by the Service proxies
// See also https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// You can specify your own cluster IP address as part of a Service creation request. To do this, set the .spec.clusterIP
service.Spec.ClusterIP = curService.Spec.ClusterIP
_, err := c.kubeClient.CoreV1().Services(service.Namespace).Update(service)
if err != nil {
return err
}
return nil
}

Expand Down
33 changes: 24 additions & 9 deletions pkg/model/creator.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,21 +134,28 @@ func (r *Reconciler) createServiceReplica(replica *chiv1.ChiReplica) *corev1.Ser
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: chDefaultHTTPPortName,
Port: chDefaultHTTPPortNumber,
Name: chDefaultHTTPPortName,
Protocol: corev1.ProtocolTCP,
Port: chDefaultHTTPPortNumber,
TargetPort: intstr.FromInt(chDefaultHTTPPortNumber),
},
{
Name: chDefaultClientPortName,
Port: chDefaultClientPortNumber,
Name: chDefaultClientPortName,
Protocol: corev1.ProtocolTCP,
Port: chDefaultClientPortNumber,
TargetPort: intstr.FromInt(chDefaultClientPortNumber),
},
{
Name: chDefaultInterServerPortName,
Port: chDefaultInterServerPortNumber,
Name: chDefaultInterServerPortName,
Protocol: corev1.ProtocolTCP,
Port: chDefaultInterServerPortNumber,
TargetPort: intstr.FromInt(chDefaultInterServerPortNumber),
},
},
Selector: r.labeler.GetSelectorReplicaScope(replica),
ClusterIP: templateDefaultsServiceClusterIP,
Type: "ClusterIP",
Selector: r.labeler.GetSelectorReplicaScope(replica),
ClusterIP: templateDefaultsServiceClusterIP,
Type: "ClusterIP",
PublishNotReadyAddresses: true,
},
}
}
Expand Down Expand Up @@ -272,6 +279,14 @@ func (r *Reconciler) setupStatefulSetPodTemplate(statefulSetObject *apps.Statefu
glog.V(1).Infof("createStatefulSetObjects() for statefulSet %s - default template", statefulSetName)
}

// Pod created by this StatefulSet has to have alias
statefulSetObject.Spec.Template.Spec.HostAliases = []corev1.HostAlias{
{
IP: "127.0.0.1",
Hostnames: []string{CreatePodHostname(replica)},
},
}

r.setupConfigMapVolumes(statefulSetObject, replica)
}

Expand Down
2 changes: 1 addition & 1 deletion release
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.4.3
0.4.4

0 comments on commit a9976e8

Please sign in to comment.